File size: 44,922 Bytes
a48cea9 1c41bb9 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 1c41bb9 a48cea9 1c41bb9 3903b53 de2fdbb a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 e20b484 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 3903b53 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 3903b53 1c41bb9 3903b53 1c41bb9 8a6537e 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 1c41bb9 a48cea9 a7374a3 1c41bb9 a48cea9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 |
# app.py
import streamlit as st
import os
import logging
# --- Streamlit PermissionError Mitigation Attempts (Python level) ---
# These are fallbacks; Dockerfile ENV STREAMLIT_HOME is the primary fix for '/.streamlit'
if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
print("INFO: app.py - Disabled Streamlit client usage stats gathering via env var.")
if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ: # Another potential flag
os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
print("INFO: app.py - Set STREAMLIT_BROWSER_GATHERUSAGESTATS to false.")
# If running in /app and STREAMLIT_HOME isn't set, try to define a writable one.
# The Dockerfile's ENV STREAMLIT_HOME should ideally handle this.
streamlit_home_path_app = "/app/.streamlit_cai_config" # Using a unique name
if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
try:
os.makedirs(streamlit_home_path_app, exist_ok=True)
# In Docker, this path should already be writable by the app user if Dockerfile is set up correctly.
print(f"INFO: app.py - Set STREAMLIT_HOME to: {streamlit_home_path_app}")
except Exception as e_mkdir_sh:
print(f"WARNING: app.py - Could not create STREAMLIT_HOME '{streamlit_home_path_app}': {e_mkdir_sh}")
from core.gemini_handler import GeminiHandler
from core.visual_engine import VisualEngine
from core.prompt_engineering import (
create_cinematic_treatment_prompt,
construct_dalle_prompt,
construct_text_to_video_prompt_for_gen4,
create_narration_script_prompt_enhanced,
create_scene_regeneration_prompt,
create_visual_regeneration_prompt
)
# --- Page Config & Logging ---
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
# Configure logging (level can be adjusted for production vs. development)
logging.basicConfig(
level=logging.DEBUG, # DEBUG for development, INFO for production
format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)'
)
logger = logging.getLogger(__name__)
# --- Global Definitions ---
SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
DEFAULT_SCENE_DURATION_SECS = 5
DEFAULT_SHOT_TYPE = "Director's Choice"
ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
# --- API Key Loading ---
def load_api_key(key_name_streamlit, key_name_env, service_name):
key_value = None
secrets_available = hasattr(st, 'secrets')
try:
if secrets_available and key_name_streamlit in st.secrets:
key_value = st.secrets.get(key_name_streamlit) # Use .get for safety
if key_value: logger.info(f"API Key for {service_name} found in Streamlit secrets.")
except Exception as e_secrets: logger.warning(f"Could not access st.secrets for {key_name_streamlit} ({service_name}): {e_secrets}")
if not key_value and key_name_env in os.environ:
key_value = os.environ.get(key_name_env) # Use .get for safety
if key_value: logger.info(f"API Key for {service_name} found in env var '{key_name_env}'.")
if not key_value: logger.warning(f"API Key for {service_name} (Key: {key_name_streamlit}/{key_name_env}) NOT FOUND. Service may be disabled.")
return key_value
# --- Service Initialization (Singleton Pattern using Session State) ---
if 'services_initialized_flag' not in st.session_state: # Renamed flag for clarity
logger.info("APP_INIT: Initializing services and API keys...")
# Load all keys first
st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
# Critical services check
if not st.session_state.API_KEY_GEMINI:
st.error("CRITICAL FAILURE: Gemini API Key is missing! Application cannot function without it."); logger.critical("Gemini API Key missing."); st.stop()
# Initialize Gemini Handler
try:
st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI) # Renamed
logger.info("APP_INIT: GeminiHandler component initialized successfully.")
except Exception as e_gem_init:
st.error(f"CRITICAL FAILURE: Failed to initialize GeminiHandler: {e_gem_init}"); logger.critical(f"GeminiHandler init failed: {e_gem_init}", exc_info=True); st.stop()
# Initialize Visual Engine
try:
el_default_voice = "Rachel" # Hardcoded fallback default
el_resolved_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_default_voice
# <<< THIS IS WHERE VisualEngine IS INSTANTIATED >>>
st.session_state.visual_content_engine = VisualEngine( # Renamed
output_dir="temp_cinegen_media",
default_elevenlabs_voice_id=el_resolved_voice_id
)
# Set keys for individual services within VisualEngine
st.session_state.visual_content_engine.set_openai_api_key(st.session_state.API_KEY_OPENAI)
st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
logger.info("APP_INIT: VisualEngine component initialized and all sub-service API keys configured.")
except Exception as e_vis_eng_init:
st.error(f"CRITICAL FAILURE: Failed to initialize VisualEngine: {e_vis_eng_init}"); logger.critical(f"VisualEngine init or key setting failed: {e_vis_eng_init}", exc_info=True); st.warning("VisualEngine encountered a critical setup issue. Many features will be disabled or will fail."); st.stop()
st.session_state.services_initialized_flag = True
logger.info("APP_INIT: All services initialized successfully.")
# Initialize project-specific session state variables
for project_ss_key, project_ss_default_val in [
('project_treatment_scenes_list', []),
('project_scene_generation_prompts_list', []),
('project_generated_assets_info_list', []),
('project_final_video_path', None),
('project_character_definitions_map', {}),
('project_global_style_keywords_str', ""),
('project_narration_audio_filepath', None),
('project_narration_script_text', "")
]:
if project_ss_key not in st.session_state: st.session_state[project_ss_key] = project_ss_default_val
def initialize_new_project_data_in_session():
st.session_state.project_treatment_scenes_list = []
st.session_state.project_scene_generation_prompts_list = []
st.session_state.project_generated_assets_info_list = []
st.session_state.project_final_video_path = None
st.session_state.project_overall_narration_audio_path = None # Corrected key
st.session_state.project_narration_script_text = ""
logger.info("PROJECT_DATA: New project data initialized (treatment, assets, narration). Character defs and global styles persist.")
# --- Asset Generation Wrapper ---
def generate_asset_for_scene_in_app(scene_idx_num, scene_data_obj, asset_ver_num=1, user_asset_type_choice_ui="Auto (Director's Choice)"):
logger.debug(f"APP: generate_asset_for_scene_in_app called for scene index {scene_idx_num}, version {asset_ver_num}, user type: {user_asset_type_choice_ui}")
final_decision_generate_as_video = False
gemini_suggested_type = scene_data_obj.get('suggested_asset_type_κ°λ
', 'image').lower()
if user_asset_type_choice_ui == "Image": final_decision_generate_as_video = False
elif user_asset_type_choice_ui == "Video Clip": final_decision_generate_as_video = True
elif user_asset_type_choice_ui == "Auto (Director's Choice)": final_decision_generate_as_video = (gemini_suggested_type == "video_clip")
logger.debug(f"APP: Final decision for asset type: {'Video' if final_decision_generate_as_video else 'Image'}")
prompt_for_base_img_gen = construct_dalle_prompt(scene_data_obj, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
prompt_for_runway_motion = ""
if final_decision_generate_as_video:
prompt_for_runway_motion = construct_text_to_video_prompt_for_gen4(scene_data_obj, st.session_state.project_global_style_keywords_str)
if not prompt_for_runway_motion: prompt_for_runway_motion = scene_data_obj.get('video_clip_motion_description_κ°λ
', "subtle ambient cinematic motion"); logger.warning(f"S{scene_data_obj.get('scene_number', scene_idx_num+1)}: Empty motion prompt, using default for Runway.")
if not prompt_for_base_img_gen: logger.error(f"Base image prompt construction failed for S{scene_data_obj.get('scene_number', scene_idx_num+1)}"); return False
# Ensure session state lists are long enough
while len(st.session_state.project_scene_generation_prompts_list) <= scene_idx_num: st.session_state.project_scene_generation_prompts_list.append("")
while len(st.session_state.project_generated_assets_info_list) <= scene_idx_num: st.session_state.project_generated_assets_info_list.append(None)
st.session_state.project_scene_generation_prompts_list[scene_idx_num] = prompt_for_runway_motion if final_decision_generate_as_video else prompt_for_base_img_gen
filename_base_for_output_asset = f"scene_{scene_data_obj.get('scene_number', scene_idx_num+1)}_asset_v{asset_ver_num}"
duration_for_rwy_vid = scene_data_obj.get('video_clip_duration_estimate_secs_κ°λ
', scene_data_obj.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
if duration_for_rwy_vid <= 0 : duration_for_rwy_vid = DEFAULT_SCENE_DURATION_SECS
generated_asset_info_dict = st.session_state.visual_content_engine.generate_scene_asset(
image_generation_prompt_text=prompt_for_base_img_gen,
motion_prompt_text_for_video=prompt_for_runway_motion,
scene_data_dict=scene_data_obj, # <<< CORRECTED KEYWORD ARGUMENT NAME
scene_identifier_fn_base=filename_base_for_output_asset,
generate_as_video_clip_flag=final_decision_generate_as_video,
runway_target_dur_val=duration_for_rwy_vid
)
st.session_state.project_generated_assets_info_list[scene_idx_num] = generated_asset_info_dict
if generated_asset_info_dict and generated_asset_info_dict.get('prompt_used') and \
st.session_state.project_scene_generation_prompts_list[scene_idx_num] != generated_asset_info_dict['prompt_used']:
st.session_state.project_scene_generation_prompts_list[scene_idx_num] = generated_asset_info_dict['prompt_used']
if generated_asset_info_dict and not generated_asset_info_dict['error'] and generated_asset_info_dict.get('path') and os.path.exists(generated_asset_info_dict['path']):
logger.info(f"APP: Asset ({generated_asset_info_dict.get('type')}) generated for S{scene_data_obj.get('scene_number', scene_idx_num+1)}: {os.path.basename(generated_asset_info_dict['path'])}")
return True
else:
error_msg_from_asset_gen = generated_asset_info_dict.get('error_message', 'Unknown error') if generated_asset_info_dict else 'Asset result dictionary is None'
logger.warning(f"APP: Asset gen FAILED for S{scene_data_obj.get('scene_number', scene_idx_num+1)}. Type: {'Video' if final_decision_generate_as_video else 'Image'}. Err: {error_msg_from_asset_gen}")
current_prompt = st.session_state.project_scene_generation_prompts_list[scene_idx_num]
st.session_state.project_generated_assets_info_list[scene_idx_num] = {'path': None, 'type': 'none', 'error': True, 'error_message': error_msg_from_asset_gen, 'prompt_used': current_prompt}
return False
# --- Sidebar UI ---
with st.sidebar:
st.image("assets/logo.png", width=150) # Display logo
st.title("CineGen AI Ultra+")
st.markdown("### Creative Seed")
ui_user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=100, key="ui_user_idea")
ui_genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="ui_genre")
ui_mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="ui_mood")
ui_num_scenes = st.slider("Number of Key Scenes:", 1, 10, 1, key="ui_num_scenes")
ui_creative_guidance_map = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
ui_selected_guidance_key = st.selectbox("AI Creative Director Style:", options=list(ui_creative_guidance_map.keys()), key="ui_creative_guidance")
ui_actual_guidance = ui_creative_guidance_map[ui_selected_guidance_key]
if st.button("π Generate Cinematic Treatment", type="primary", key="btn_generate_treatment", use_container_width=True):
initialize_new_project_data_in_session()
if not ui_user_idea.strip(): st.warning("Please provide a story idea.")
else:
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_operation:
try:
status_operation.write("Phase 1: Gemini crafting cinematic treatment... π"); logger.info("APP_MAIN_FLOW: Phase 1 - Treatment Gen.")
prompt_for_treatment = create_cinematic_treatment_prompt(ui_user_idea, ui_genre, ui_mood, ui_num_scenes, ui_actual_guidance)
list_of_raw_treatment_scenes = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_for_treatment)
if not isinstance(list_of_raw_treatment_scenes, list) or not list_of_raw_treatment_scenes: raise ValueError("Gemini returned invalid scene list format.")
temp_processed_scenes = []
for scene_data_gemini in list_of_raw_treatment_scenes:
gemini_dur_est = scene_data_gemini.get('video_clip_duration_estimate_secs_κ°λ
', 0)
scene_data_gemini['user_scene_duration_secs'] = gemini_dur_est if gemini_dur_est > 0 else DEFAULT_SCENE_DURATION_SECS
scene_data_gemini['user_shot_type'] = scene_data_gemini.get('PROACTIVE_camera_work_κ°λ
', DEFAULT_SHOT_TYPE)
scene_data_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"
temp_processed_scenes.append(scene_data_gemini)
st.session_state.project_story_treatment_scenes = temp_processed_scenes
num_scenes_generated = len(st.session_state.project_story_treatment_scenes)
st.session_state.project_scene_generation_prompts_list = [""]*num_scenes_generated
st.session_state.project_generated_assets_info_list = [None]*num_scenes_generated
logger.info(f"APP_MAIN_FLOW: Phase 1 complete. {num_scenes_generated} scenes."); status_operation.update(label="Treatment complete! β
Generating visual assets...", state="running")
status_operation.write("Phase 2: Creating visual assets..."); logger.info("APP_MAIN_FLOW: Phase 2 - Asset Gen.")
num_successful_assets = 0
for idx, scene_item_data in enumerate(st.session_state.project_story_treatment_scenes):
scene_num_log = scene_item_data.get('scene_number', idx+1)
status_operation.write(f" Processing asset for Scene {scene_num_log}..."); logger.info(f" APP_MAIN_FLOW: Processing asset for Scene {scene_num_log}.")
if generate_asset_for_scene_in_app(idx, scene_item_data, asset_ver_num=1): num_successful_assets += 1
status_label_p2 = "Visual assets generated! "; next_state_p2 = "running"
if num_successful_assets == 0 and num_scenes_generated > 0: logger.error("APP_MAIN_FLOW: Asset gen FAILED for all scenes."); status_label_p2 = "Asset gen FAILED for all scenes."; next_state_p2="error"; status_operation.update(label=status_label_p2, state=next_state_p2, expanded=True); st.stop()
elif num_successful_assets < num_scenes_generated: logger.warning(f"APP_MAIN_FLOW: Assets partially generated ({num_successful_assets}/{num_scenes_generated})."); status_label_p2 = f"Assets partially done ({num_successful_assets}/{num_scenes_generated}). "
status_operation.update(label=f"{status_label_p2}Generating narration...", state=next_state_p2)
if next_state_p2 == "error": st.stop()
status_operation.write("Phase 3: Generating narration script..."); logger.info("APP_MAIN_FLOW: Phase 3 - Narration Script.")
voice_style_narr = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
prompt_narr_script = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes, ui_mood, ui_genre, voice_style_narr)
st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr_script) # generate_image_prompt for general text
logger.info("APP_MAIN_FLOW: Narration script generated."); status_operation.update(label="Narration script ready! Synthesizing voice...", state="running")
status_operation.write("Phase 4: Synthesizing voice (ElevenLabs)..."); logger.info("APP_MAIN_FLOW: Phase 4 - Voice Synthesis.")
st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
final_status_msg = "All initial components ready! Review storyboard. π"; final_op_status_val = "complete"
if not st.session_state.project_overall_narration_audio_path: final_status_msg = f"{status_label_p2}Storyboard ready (Voiceover failed/skipped)."; logger.warning("APP_MAIN_FLOW: Narration audio failed/skipped.")
else: logger.info("APP_MAIN_FLOW: Narration audio generated.")
status_operation.update(label=final_status_msg, state=final_op_status_val, expanded=False)
except ValueError as e_val: logger.error(f"APP_MAIN_FLOW: ValueError: {e_val}", exc_info=True); status_operation.update(label=f"Data/Response Error: {e_val}", state="error", expanded=True);
except TypeError as e_type: logger.error(f"APP_MAIN_FLOW: TypeError: {e_type}", exc_info=True); status_operation.update(label=f"Type Error: {e_type}", state="error", expanded=True);
except Exception as e_gen: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_gen}", exc_info=True); status_operation.update(label=f"Unexpected Error: {e_gen}", state="error", expanded=True);
with st.expander("Define Characters", expanded=False):
# (Keep as before - unique keys for inputs)
sb_char_name_input = st.text_input("Character Name", key="sb_char_name_unique_2"); sb_char_desc_input = st.text_area("Visual Description", key="sb_char_desc_unique_2", height=100, placeholder="e.g., Jax: rugged astronaut...")
if st.button("Save Character", key="sb_add_char_unique_2"):
if sb_char_name_input and sb_char_desc_input: st.session_state.project_character_definitions_map[sb_char_name_input.strip().lower()] = sb_char_desc_input.strip(); st.success(f"Character '{sb_char_name_input.strip()}' saved.")
else: st.warning("Both character name and description are required.")
if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{char_key.title()}:** _{char_val}_") for char_key,char_val in st.session_state.project_character_definitions_map.items()]
with st.expander("Global Style Overrides", expanded=False):
# (Keep as before - unique keys for inputs)
sb_style_presets_map = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir, extreme detail...", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy elements...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi film aesthetic..."}
sb_selected_preset_key_style = st.selectbox("Base Style Preset:", options=list(sb_style_presets_map.keys()), key="sb_style_preset_unique_2")
sb_custom_keywords_style = st.text_area("Additional Custom Style Keywords:", key="sb_custom_style_unique_2", height=80, placeholder="e.g., 'Dutch angle', 'lens flare'")
sb_current_global_style_display = st.session_state.project_global_style_keywords_str
if st.button("Apply Global Styles", key="sb_apply_styles_unique_2"):
final_style_str_global = sb_style_presets_map[sb_selected_preset_key_style];
if sb_custom_keywords_style.strip(): final_style_str_global = f"{final_style_str_global}, {sb_custom_keywords_style.strip()}" if final_style_str_global else sb_custom_keywords_style.strip()
st.session_state.project_global_style_keywords_str = final_style_str_global.strip(); sb_current_global_style_display = final_style_str_global.strip()
if sb_current_global_style_display: st.success("Global visual styles applied!")
else: st.info("Global visual style additions cleared.")
if sb_current_global_style_display: st.caption(f"Active global styles: \"{sb_current_global_style_display}\"")
with st.expander("Voice & Narration Style", expanded=False):
# (Keep as before - unique keys for inputs)
sb_engine_default_voice_val_narr = "Rachel"
if hasattr(st.session_state, 'visual_content_engine') and st.session_state.visual_content_engine: sb_engine_default_voice_val_narr = st.session_state.visual_content_engine.elevenlabs_voice_id
sb_user_voice_id_input_narr = st.text_input("ElevenLabs Voice ID (override):", value=sb_engine_default_voice_val_narr, key="sb_el_voice_id_override_unique_2", help=f"Defaulting to '{sb_engine_default_voice_val_narr}'.")
sb_narration_styles_map_narr = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
sb_selected_narration_style_key_narr = st.selectbox("Narration Script Style:", list(sb_narration_styles_map_narr.keys()), key="sb_narr_style_sel_unique_2", index=0)
if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_2"):
final_voice_id_to_use_el_narr = sb_user_voice_id_input_narr.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_voice_id_to_use_el_narr
st.session_state.selected_voice_style_for_generation = sb_narration_styles_map_narr[sb_selected_narration_style_key_narr]
st.success(f"Narrator Voice ID set to: {final_voice_id_to_use_el_narr}. Script Style: {sb_selected_narration_style_key_narr}")
logger.info(f"User updated 11L Voice ID: {final_voice_id_to_use_el_narr}, Narration Script Style: {sb_selected_narration_style_key_narr}")
# --- Main Content Area ---
st.header("π¬ Cinematic Storyboard & Treatment")
if st.session_state.project_narration_script_text:
with st.expander("π View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
if not st.session_state.project_story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
else:
for i_loop_main_display, scene_item_for_display in enumerate(st.session_state.project_story_treatment_scenes):
scene_num_for_display = scene_item_for_display.get('scene_number', i_loop_main_display + 1)
scene_title_for_display_main = scene_item_for_display.get('scene_title', 'Untitled Scene')
key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_{i_loop_main_display}"
if "director_note" in scene_item_for_display and scene_item_for_display['director_note']: st.info(f"π¬ Director Note S{scene_num_for_display}: {scene_item_for_display['director_note']}")
st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}"); main_col_treatment_area, main_col_visual_area = st.columns([0.45, 0.55])
with main_col_treatment_area:
with st.expander("π Scene Treatment & Controls", expanded=True):
# (Display textual scene details - as before)
st.markdown(f"**Beat:** {scene_item_for_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_item_for_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_item_for_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_item_for_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_item_for_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_item_for_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_item_for_display.get('PROACTIVE_visual_style_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_item_for_display.get('PROACTIVE_camera_work_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_item_for_display.get('PROACTIVE_sound_design_κ°λ
', 'N/A')}_"); st.markdown("---")
st.markdown("##### Shot, Pacing & Asset Controls")
# (Shot Type, Scene Duration, Asset Type Override selectboxes - as before, using unique keys)
ui_shot_type_current = st.session_state.project_story_treatment_scenes[i_loop_main_display].get('user_shot_type', DEFAULT_SHOT_TYPE)
try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_shot_type'] = ui_shot_type_new
ui_duration_current = st.session_state.project_story_treatment_scenes[i_loop_main_display].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_scene_duration_secs'] = ui_duration_new
ui_asset_type_override_current = st.session_state.project_story_treatment_scenes[i_loop_main_display].get('user_selected_asset_type', "Auto (Director's Choice)")
try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
except ValueError: ui_asset_type_idx_val = 0
ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_selected_asset_type'] = ui_asset_type_override_new
st.markdown("---")
prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_scene_generation_prompts_list) else None
if prompt_for_asset_to_display:
with st.popover("ποΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
pexels_query_to_display = scene_item_for_display.get('pexels_search_query_κ°λ
', None)
if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")
with main_col_visual_area:
# (Display logic for different asset types - as before)
current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_generated_assets_info_list) else None
if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
elif type_of_asset_for_display == 'video':
try:
with open(path_of_asset_for_display, 'rb') as vid_file_obj_read: video_bytes_for_st_video = vid_file_obj_read.read()
st.video(video_bytes_for_st_video, format="video/mp4", start_time=0); st.caption(f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
else:
if st.session_state.project_story_treatment_scenes:
error_message_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
st.caption(error_message_for_asset_display)
with st.popover(f"βοΈ Edit S{scene_num_for_display} Treatment"):
# (Treatment Regeneration Popover - using corrected generate_asset_for_scene_in_app call)
feedback_for_treatment_regen_input = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{widget_key_base_main_area}", height=150)
if st.button(f"π Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{widget_key_base_main_area}"):
if feedback_for_treatment_regen_input:
with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
preserved_user_shot_type_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_shot_type']
preserved_user_duration_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_scene_duration_secs']
preserved_user_asset_type_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_selected_asset_type']
prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_for_treatment_regen_input, st.session_state.project_story_treatment_scenes)
try:
updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type_pop; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration_pop; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type_pop
st.session_state.project_story_treatment_scenes[i_loop_main_display] = final_merged_updated_scene_data_pop
status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
version_for_regenerated_asset_pop = 1
if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
except: version_for_regenerated_asset_pop = 2
if generate_asset_for_scene_in_app(i_loop_main_display, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_selected_asset_type_override=preserved_user_asset_type_pop): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! π", state="complete", expanded=False)
else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
st.rerun()
except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
else: st.warning("Please provide feedback to update the treatment.")
with st.popover(f"π¨ Edit S{scene_num_for_display} Visual Prompt/Asset"):
# (Visual Asset Regeneration Popover - using corrected generate_asset_for_scene_in_app call)
prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_scene_generation_prompts_list) else "No prompt available."
st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
feedback_for_visual_asset_regen_input = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_input_pop_{widget_key_base_main_area}", height=150)
if st.button(f"π Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{widget_key_base_main_area}"):
if feedback_for_visual_asset_regen_input:
with st.status(f"Refining prompt & regenerating asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_selected_asset_type']
is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_κ°λ
') == 'video_clip')
newly_constructed_asset_prompt_regen_pop = ""
if not is_video_type_for_regen_pop: # IMAGE
gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running")
except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement (Gemini) error: {e_gemini_refine_viz_pop}", exc_info=True); continue
else: # VIDEO
logger.info(f"Reconstructing video motion prompt for S{scene_num_for_display}. Feedback (indirect): {feedback_for_visual_asset_regen_input}")
newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction/refinement failed.", state="error"); continue
version_for_regenerated_visual_asset_pop = 1
if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
except: version_for_regenerated_visual_asset_pop = 2
if generate_asset_for_scene_in_app(i_loop_main_display, st.session_state.project_story_treatment_scenes[i_loop_main_display], asset_ver_num=version_for_regenerated_visual_asset_pop, user_selected_asset_type_override=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! π", state="complete", expanded=False)
else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
st.rerun()
else: st.warning("Please provide feedback for visual asset regeneration.")
st.markdown("---")
# Video Assembly Button Logic
if st.session_state.project_story_treatment_scenes and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None):
if st.button("π¬ Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique", type="primary", use_container_width=True): # Unique key
with st.status("Assembling Ultra Animatic (this may take a few minutes)...", expanded=True) as status_video_assembly_final_op:
assets_for_final_video_assembly_list_main = []
for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes):
asset_info_current_scene_for_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None
if asset_info_current_scene_for_vid_main and not asset_info_current_scene_for_vid_main.get('error') and asset_info_current_scene_for_vid_main.get('path') and os.path.exists(asset_info_current_scene_for_vid_main['path']):
assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid_main['path'], 'type': asset_info_current_scene_for_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
status_video_assembly_final_op.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid_main.get('type')}).")
else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
if assets_for_final_video_assembly_list_main:
status_video_assembly_final_op.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op.update(label="Ultra animatic assembled! π", state="complete", expanded=False); st.balloons()
else: status_video_assembly_final_op.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
else: status_video_assembly_final_op.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
elif st.session_state.project_story_treatment_scenes: st.info("Generate visual assets for your scenes before attempting to assemble the animatic.")
if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path):
st.header("π¬ Generated Cinematic Animatic");
try:
with open(st.session_state.project_final_video_path, 'rb') as final_video_file_obj_display: final_video_bytes_for_display = final_video_file_obj_display.read()
st.video(final_video_bytes_for_display, format="video/mp4")
st.download_button(label="Download Ultra Animatic", data=final_video_bytes_for_display, file_name=os.path.basename(st.session_state.project_final_video_path), mime="video/mp4", use_container_width=True, key="download_video_main_area_btn_final_unique" )
except Exception as e_final_video_display_op_main: st.error(f"Error displaying final animatic video: {e_final_video_display_op_main}"); logger.error(f"Error displaying final animatic video: {e_final_video_display_op_main}", exc_info=True)
st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production") |