mgbam commited on
Commit
ed24a71
Β·
verified Β·
1 Parent(s): 96a2b41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -194
app.py CHANGED
@@ -6,45 +6,34 @@ import logging
6
  # --- Streamlit PermissionError Mitigation Attempts ---
7
  if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
8
  os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
9
- # print("INFO: app.py - Disabled Streamlit client usage stats gathering via env var.")
10
- if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ: # For newer versions
11
  os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
12
- # print("INFO: app.py - Set STREAMLIT_BROWSER_GATHERUSAGESTATS to false.")
13
- streamlit_home_path_app = "/app/.streamlit_cai_config_v2" # Changed name slightly just in case
14
  if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
15
  os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
16
- try:
17
- os.makedirs(streamlit_home_path_app, exist_ok=True)
18
- # print(f"INFO: app.py - Set STREAMLIT_HOME to: {streamlit_home_path_app}")
19
- except Exception as e_mkdir_sh:
20
- print(f"WARNING: app.py - Could not create STREAMLIT_HOME '{streamlit_home_path_app}': {e_mkdir_sh}")
21
 
22
  from core.gemini_handler import GeminiHandler
23
  from core.visual_engine import VisualEngine
24
  from core.prompt_engineering import (
25
- create_cinematic_treatment_prompt,
26
- construct_dalle_prompt,
27
- construct_text_to_video_prompt_for_gen4,
28
- create_narration_script_prompt_enhanced,
29
- create_scene_regeneration_prompt,
30
- create_visual_regeneration_prompt
31
  )
32
 
33
  st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
34
- logging.basicConfig(
35
- level=logging.DEBUG,
36
- format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)'
37
- )
38
  logger = logging.getLogger(__name__)
39
 
40
  SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
41
  DEFAULT_SCENE_DURATION_SECS = 5; DEFAULT_SHOT_TYPE = "Director's Choice"; ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
42
 
43
- def load_api_key(key_name_st, key_name_e, service_n): # Renamed for clarity
44
  key_val = None; secrets_avail = hasattr(st, 'secrets')
45
  try:
46
  if secrets_avail and key_name_st in st.secrets: key_val = st.secrets.get(key_name_st);
47
- if key_val: logger.info(f"API Key for {service_n} found in Streamlit secrets.")
48
  except Exception as e: logger.warning(f"No st.secrets for {key_name_st} ({service_n}): {e}")
49
  if not key_val and key_name_e in os.environ: key_val = os.environ.get(key_name_e);
50
  if key_val: logger.info(f"API Key for {service_n} found in env var '{key_name_e}'.")
@@ -53,15 +42,17 @@ def load_api_key(key_name_st, key_name_e, service_n): # Renamed for clarity
53
 
54
  if 'services_initialized_flag' not in st.session_state:
55
  logger.info("APP_INIT: Initializing services and API keys...")
 
56
  st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
57
  st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
58
  st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
59
  st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
60
  st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
61
  st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
 
62
  if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
63
  try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
64
- except Exception as e: st.error(f"CRITICAL: Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
65
  try:
66
  el_def_voice = "Rachel"; el_res_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_def_voice
67
  st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_res_voice_id)
@@ -69,8 +60,8 @@ if 'services_initialized_flag' not in st.session_state:
69
  st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
70
  st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
71
  st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
72
- logger.info("VisualEngine initialized and API keys set.")
73
- except Exception as e: st.error(f"CRITICAL: Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop()
74
  st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")
75
 
76
  PROJECT_SS_DEFAULTS = {'project_story_treatment_scenes_list':[],'project_scene_generation_prompts_list':[],'project_generated_assets_info_list':[],'project_final_video_path':None,'project_character_definitions_map':{},'project_global_style_keywords_str':"",'project_overall_narration_audio_path':None,'project_narration_script_text':""}
@@ -83,9 +74,9 @@ def initialize_new_project_data_in_session():
83
  logger.info("PROJECT_DATA: New project data re-initialized.")
84
 
85
  def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_ui="Auto (Director's Choice)"):
86
- # (This function's internal logic using scene_data_dict should be correct from previous full app.py)
87
  logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {sc_idx}, ver {asset_v}, user_type: {user_asset_type_ui}")
88
- gen_as_vid_final = False; gemini_sugg_type = sc_data.get('suggested_asset_type_감독','image').lower()
89
  if user_asset_type_ui=="Image": gen_as_vid_final=False
90
  elif user_asset_type_ui=="Video Clip": gen_as_vid_final=True
91
  elif user_asset_type_ui=="Auto (Director's Choice)": gen_as_vid_final=(gemini_sugg_type=="video_clip")
@@ -93,13 +84,13 @@ def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_
93
  prompt_base_img = construct_dalle_prompt(sc_data,st.session_state.project_character_definitions_map,st.session_state.project_global_style_keywords_str)
94
  prompt_motion_vid = ""
95
  if gen_as_vid_final: prompt_motion_vid=construct_text_to_video_prompt_for_gen4(sc_data,st.session_state.project_global_style_keywords_str) or sc_data.get('video_clip_motion_description_감독',"subtle motion")
96
- if not prompt_base_img: logger.error(f"Base img prompt fail S{sc_data.get('scene_number',sc_idx+1)}"); return False
97
  while len(st.session_state.project_scene_generation_prompts_list)<=sc_idx:st.session_state.project_scene_generation_prompts_list.append("")
98
  while len(st.session_state.project_generated_assets_info_list)<=sc_idx:st.session_state.project_generated_assets_info_list.append(None)
99
  st.session_state.project_scene_generation_prompts_list[sc_idx]=prompt_motion_vid if gen_as_vid_final else prompt_base_img
100
  fn_base_asset=f"scene_{sc_data.get('scene_number',sc_idx+1)}_asset_v{asset_v}"
101
  rwy_dur=sc_data.get('video_clip_duration_estimate_secs_감독',sc_data.get('user_scene_duration_secs',DEFAULT_SCENE_DURATION_SECS));rwy_dur=max(1,rwy_dur)
102
- asset_res_dict=st.session_state.visual_content_engine.generate_scene_asset(image_generation_prompt_text=prompt_base_img,motion_prompt_text_for_video=prompt_motion_vid,scene_data_dict=sc_data,scene_identifier_fn_base=fn_base_asset,generate_as_video_clip_flag=gen_as_vid_final,runway_target_dur_val=rwy_dur)
103
  st.session_state.project_generated_assets_info_list[sc_idx]=asset_res_dict
104
  if asset_res_dict and asset_res_dict.get('prompt_used')and st.session_state.project_scene_generation_prompts_list[sc_idx]!=asset_res_dict['prompt_used']:st.session_state.project_scene_generation_prompts_list[sc_idx]=asset_res_dict['prompt_used']
105
  if asset_res_dict and not asset_res_dict['error']and asset_res_dict.get('path')and os.path.exists(asset_res_dict['path']):logger.info(f"APP: Asset ({asset_res_dict.get('type')}) OK S{sc_data.get('scene_number',sc_idx+1)}:{os.path.basename(asset_res_dict['path'])}");return True
@@ -109,228 +100,210 @@ with st.sidebar: # Sidebar UI
109
  if os.path.exists("assets/logo.png"): st.image("assets/logo.png", width=150)
110
  else: st.sidebar.markdown("## 🎬 CineGen AI Ultra+"); logger.warning("assets/logo.png not found.")
111
  st.markdown("### Creative Seed")
112
- sb_user_idea = st.text_area("Core Idea:", "Lone wanderer, mythical oasis, post-apocalyptic desert, mirages, mechanical scavengers.", height=100, key="sb_idea")
113
- sb_genre = st.selectbox("Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="sb_genre")
114
- sb_mood = st.selectbox("Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical"], index=0, key="sb_mood")
115
- sb_num_scenes = st.slider("Key Scenes:", 1, 10, 1, key="sb_num_scenes")
116
  sb_guidance_opts = {"Standard": "standard", "Artistic": "more_artistic", "Experimental": "experimental_narrative"}
117
- sb_guidance_key = st.selectbox("AI Director Style:", list(sb_guidance_opts.keys()), key="sb_guidance")
118
  sb_actual_guidance = sb_guidance_opts[sb_guidance_key]
119
 
120
- if st.button("🌌 Generate Cinematic Treatment", type="primary", key="sb_btn_gen_treat", use_container_width=True):
121
  initialize_new_project_data_in_session()
122
  if not sb_user_idea.strip(): st.warning("Please provide a story idea.")
123
  else:
124
- # <<< CORRECTED VARIABLE NAME FOR STATUS OBJECT >>>
125
- with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_operation:
126
  try:
127
- main_status_operation.write("Phase 1: Crafting treatment... πŸ“œ"); logger.info("APP: P1 - Treatment Gen.")
128
  prompt_treat = create_cinematic_treatment_prompt(sb_user_idea, sb_genre, sb_mood, sb_num_scenes, sb_actual_guidance)
129
  raw_treat_list = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_treat)
130
  if not isinstance(raw_treat_list, list) or not raw_treat_list: raise ValueError("Gemini invalid scene list.")
131
-
132
  init_scenes = []
133
  for scene_gemini in raw_treat_list:
134
- gem_dur = scene_gemini.get('video_clip_duration_estimate_secs_감독', 0)
135
- scene_gemini['user_scene_duration_secs'] = gem_dur if gem_dur > 0 else DEFAULT_SCENE_DURATION_SECS
136
- scene_gemini['user_shot_type'] = scene_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE)
137
- scene_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"
138
- init_scenes.append(scene_gemini)
139
  st.session_state.project_story_treatment_scenes_list = init_scenes
140
-
141
- num_gen_sc = len(init_scenes)
142
- st.session_state.project_scene_generation_prompts_list = [""]*num_gen_sc
143
- st.session_state.project_generated_assets_info_list = [None]*num_gen_sc
144
- logger.info(f"APP: P1 done. {num_gen_sc} scenes."); main_status_operation.update(label="Treatment complete! βœ… Generating assets...", state="running")
145
-
146
- main_status_operation.write("Phase 2: Creating assets..."); logger.info("APP: P2 - Asset Gen.")
147
  success_assets = 0
148
  for i, scene_item in enumerate(st.session_state.project_story_treatment_scenes_list):
149
- sc_num_log = scene_item.get('scene_number', i+1)
150
- main_status_operation.write(f" Asset for Scene {sc_num_log}..."); logger.info(f" APP: Asset S{sc_num_log}.")
151
  if generate_asset_for_scene_in_app(i, scene_item, asset_v=1): success_assets += 1
152
-
153
  lbl_p2 = "Assets generated! "; next_state = "running"
154
- if success_assets == 0 and num_gen_sc > 0: logger.error("APP: Asset gen FAIL all."); lbl_p2 = "Asset gen FAIL all."; next_state="error"; main_status_operation.update(label=lbl_p2, state=next_state, expanded=True); st.stop()
155
  elif success_assets < num_gen_sc: logger.warning(f"APP: Assets partial ({success_assets}/{num_gen_sc})."); lbl_p2 = f"Assets partial ({success_assets}/{num_gen_sc}). "
156
- main_status_operation.update(label=f"{lbl_p2}Generating narration...", state=next_state) # Corrected var
157
  if next_state == "error": st.stop()
158
-
159
- main_status_operation.write("Phase 3: Narration script..."); logger.info("APP: P3 - Narration Script.") # Corrected var
160
  voice_style = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
161
  prompt_narr = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, sb_mood, sb_genre, voice_style)
162
  st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr)
163
- logger.info("APP: Narration script OK."); main_status_operation.update(label="Narration ready! Synthesizing voice...", state="running") # Corrected var
164
-
165
- main_status_operation.write("Phase 4: Synthesizing voice..."); logger.info("APP: P4 - Voice Synth.") # <<< CORRECTED VARIABLE NAME >>>
166
  st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
167
-
168
  final_lbl = "All components ready! Review storyboard. πŸš€"; final_state = "complete"
169
  if not st.session_state.project_overall_narration_audio_path: final_lbl = f"{lbl_p2}Storyboard ready (Voiceover failed)."; logger.warning("APP: Narration audio fail.")
170
  else: logger.info("APP: Narration audio OK.")
171
- main_status_operation.update(label=final_lbl, state=final_state, expanded=False) # Corrected var
172
-
173
- except ValueError as e_val_main: logger.error(f"APP: ValueError: {e_val_main}", exc_info=True); main_status_operation.update(label=f"Data/Response Error: {e_val_main}", state="error", expanded=True); # Corrected var
174
- except TypeError as e_type_main: logger.error(f"APP: TypeError: {e_type_main}", exc_info=True); main_status_operation.update(label=f"Type Error: {e_type_main}", state="error", expanded=True); # Corrected var
175
- except Exception as e_unhandled_main_flow: # Renamed e_gen
176
- logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_unhandled_main_flow}", exc_info=True)
177
- main_status_operation.update(label=f"Unexpected Error: {e_unhandled_main_flow}", state="error", expanded=True) # <<< CORRECTED VARIABLE NAME >>>
178
 
179
- # (Sidebar Fine-Tuning Options - ensure unique keys and correct session state access)
180
- # ... (Characters, Global Style, Voice sections - use project_character_definitions_map, etc.)
181
  with st.expander("Define Characters", expanded=False):
182
- sb_char_name_input = st.text_input("Character Name", key="sb_char_name_unique_char"); sb_char_desc_input = st.text_area("Visual Description", key="sb_char_desc_unique_char", height=100)
183
- if st.button("Save Character", key="sb_add_char_unique_char"):
184
- if sb_char_name_input and sb_char_desc_input: st.session_state.project_character_definitions_map[sb_char_name_input.strip().lower()] = sb_char_desc_input.strip(); st.success(f"Char '{sb_char_name_input.strip()}' saved.")
185
  else: st.warning("Name and description needed.")
186
  if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()]
187
  with st.expander("Global Style Overrides", expanded=False):
188
- sb_style_presets_map_global = { "Default": "", "Noir": "gritty neo-noir...", "Fantasy": "epic fantasy...", "Sci-Fi": "analog sci-fi..."}
189
- sb_selected_preset_key_global = st.selectbox("Base Style Preset:", options=list(sb_style_presets_map_global.keys()), key="sb_style_preset_unique_global")
190
- sb_custom_keywords_global = st.text_area("Additional Custom Keywords:", key="sb_custom_style_unique_global", height=80)
191
- sb_current_global_style_val = st.session_state.project_global_style_keywords_str
192
- if st.button("Apply Global Styles", key="sb_apply_styles_unique_global"):
193
- final_style_str_global_val = sb_style_presets_map_global[sb_selected_preset_key_global];
194
- if sb_custom_keywords_global.strip(): final_style_str_global_val = f"{final_style_str_global_val}, {sb_custom_keywords_global.strip()}" if final_style_str_global_val else sb_custom_keywords_global.strip()
195
- st.session_state.project_global_style_keywords_str = final_style_str_global_val.strip(); sb_current_global_style_val = final_style_str_global_val.strip()
196
- if sb_current_global_style_val: st.success("Global styles applied!")
197
  else: st.info("Global styles cleared.")
198
- if sb_current_global_style_val: st.caption(f"Active: \"{sb_current_global_style_val}\"")
199
  with st.expander("Voice & Narration Style", expanded=False):
200
- sb_engine_default_voice_val = "Rachel"
201
- if hasattr(st.session_state, 'visual_content_engine') and st.session_state.visual_content_engine: sb_engine_default_voice_val = st.session_state.visual_content_engine.elevenlabs_voice_id
202
- sb_user_voice_id_input_val = st.text_input("11L Voice ID (override):", value=sb_engine_default_voice_val, key="sb_el_voice_id_override_unique_global")
203
- sb_narration_styles_map_val = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
204
- sb_selected_narration_style_key_val = st.selectbox("Narration Script Style:", list(sb_narration_styles_map_val.keys()), key="sb_narr_style_sel_unique_global", index=0)
205
- if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_global"):
206
- final_voice_id_to_use_el_val = sb_user_voice_id_input_val.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
207
- if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_voice_id_to_use_el_val
208
- st.session_state.selected_voice_style_for_generation = sb_narration_styles_map_val[sb_selected_narration_style_key_val]
209
- st.success(f"Narrator Voice ID: {final_voice_id_to_use_el_val}. Script Style: {sb_selected_narration_style_key_val}")
210
- logger.info(f"User updated 11L Voice ID: {final_voice_id_to_use_el_val}, Narration Script Style: {sb_selected_narration_style_key_val}")
211
 
212
- # --- Main Content Area ---
213
  st.header("🎬 Cinematic Storyboard & Treatment")
214
  if st.session_state.project_narration_script_text:
215
  with st.expander("πŸ“œ View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
216
 
217
  if not st.session_state.project_story_treatment_scenes_list: st.info("Use the sidebar to generate your cinematic treatment.")
218
  else:
219
- for i_main_display, scene_data_to_display in enumerate(st.session_state.project_story_treatment_scenes_list):
220
- # (Ensure all UI elements and logic within this loop use corrected session state keys and unique widget keys)
221
- # ... (The rest of your main content display and regeneration logic, ensuring correct variable names) ...
222
- scene_num_disp = scene_data_to_display.get('scene_number', i_main_display + 1)
223
- scene_title_disp = scene_data_to_display.get('scene_title', 'Untitled Scene')
224
- key_base_main = f"s{scene_num_disp}_main_area_loop_{i_main_display}"
225
- if "director_note" in scene_data_to_display and scene_data_to_display['director_note']: st.info(f"🎬 Director Note S{scene_num_disp}: {scene_data_to_display['director_note']}")
226
- st.subheader(f"SCENE {scene_num_disp}: {scene_title_disp.upper()}"); col_treat_disp, col_vis_disp = st.columns([0.45, 0.55])
227
- with col_treat_disp:
 
 
 
228
  with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
229
- # (Display scene textual details)
230
- st.markdown(f"**Beat:** {scene_data_to_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_data_to_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_data_to_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_data_to_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_data_to_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_data_to_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_data_to_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_data_to_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_data_to_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
231
  st.markdown("##### Shot, Pacing & Asset Controls")
232
- # (Widgets for shot type, duration, asset type override)
233
- curr_shot_type_ui = st.session_state.project_story_treatment_scenes_list[i_main_display].get('user_shot_type', DEFAULT_SHOT_TYPE)
234
- try: idx_shot_type = SHOT_TYPES_OPTIONS.index(curr_shot_type_ui)
235
- except ValueError: idx_shot_type = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
236
- new_shot_type_ui = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=idx_shot_type, key=f"shot_type_{key_base_main}")
237
- if new_shot_type_ui != curr_shot_type_ui: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_shot_type'] = new_shot_type_ui
238
- curr_duration_ui = st.session_state.project_story_treatment_scenes_list[i_main_display].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
239
- new_duration_ui = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=curr_duration_ui, step=1, key=f"duration_{key_base_main}")
240
- if new_duration_ui != curr_duration_ui: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_scene_duration_secs'] = new_duration_ui
241
- curr_asset_type_ui = st.session_state.project_story_treatment_scenes_list[i_main_display].get('user_selected_asset_type', "Auto (Director's Choice)")
242
- try: idx_asset_type = ASSET_TYPE_OPTIONS.index(curr_asset_type_ui)
243
- except ValueError: idx_asset_type = 0
244
- new_asset_type_ui = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=idx_asset_type, key=f"asset_type_{key_base_main}")
245
- if new_asset_type_ui != curr_asset_type_ui: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type'] = new_asset_type_ui
246
  st.markdown("---")
247
- prompt_asset_disp = st.session_state.project_scene_generation_prompts_list[i_main_display] if i_main_display < len(st.session_state.project_scene_generation_prompts_list) else None
248
- if prompt_asset_disp:
249
- with st.popover("πŸ‘οΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used:**"); st.code(prompt_asset_disp, language='text')
250
- px_query_disp = scene_data_to_display.get('pexels_search_query_감독', None)
251
- if px_query_disp: st.caption(f"Pexels Fallback: `{px_query_disp}`")
252
- with main_col_visual_area:
253
- # (Display logic for asset image/video)
254
- asset_info_disp = st.session_state.project_generated_assets_info_list[i_main_display] if i_main_display < len(st.session_state.project_generated_assets_info_list) else None
255
- if asset_info_disp and not asset_info_disp.get('error') and asset_info_disp.get('path') and os.path.exists(asset_info_disp['path']):
256
- asset_path_disp = asset_info_disp['path']; asset_type_disp = asset_info_disp.get('type','image')
257
- if asset_type_disp == 'image': st.image(asset_path_disp, caption=f"S{scene_num_disp} ({asset_type_disp}): {scene_title_disp}")
258
- elif asset_type_disp == 'video':
259
  try:
260
- with open(asset_path_disp,'rb') as vid_f: vid_bytes = vid_f.read()
261
- st.video(vid_bytes, format="video/mp4", start_time=0); st.caption(f"S{scene_num_disp} ({asset_type_disp}): {scene_title_disp}")
262
- except Exception as e: st.error(f"Error displaying video {asset_path_disp}: {e}"); logger.error(f"Display video error: {e}", exc_info=True)
263
- else: st.warning(f"Unknown asset type '{asset_type_disp}' S{scene_num_disp}.")
264
  else:
265
  if st.session_state.project_story_treatment_scenes_list:
266
- err_msg_disp = asset_info_disp.get('error_message', 'Visual pending/failed.') if asset_info_disp else 'Visual pending/failed.'
267
- st.caption(err_msg_disp)
268
- with st.popover(f"✏️ Edit S{scene_num_disp} Treatment"):
269
- # (Treatment Regeneration Popover logic - ensure correct session state keys and function calls)
270
- feedback_treat_regen = st.text_area("Changes to treatment:", key=f"treat_fb_pop_{key_base_main}", height=150)
271
- if st.button(f"πŸ”„ Update S{scene_num_disp} Treatment", key=f"regen_treat_btn_pop_{key_base_main}"):
272
- if feedback_treat_regen:
273
- with st.status(f"Updating S{scene_num_disp} Treatment & Asset...", expanded=True) as status_treat_upd_pop:
274
- user_shot_pref = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_shot_type']
275
- user_dur_pref = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_scene_duration_secs']
276
- user_asset_pref = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type']
277
- prompt_gemini_regen = create_scene_regeneration_prompt(scene_data_to_display, feedback_treat_regen, st.session_state.project_story_treatment_scenes_list)
278
  try:
279
- updated_scene_gemini = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_gemini_regen)
280
- final_updated_scene = {**updated_scene_gemini}
281
- final_updated_scene['user_shot_type']=user_shot_pref; final_updated_scene['user_scene_duration_secs']=user_dur_pref; final_updated_scene['user_selected_asset_type']=user_asset_pref
282
- st.session_state.project_story_treatment_scenes_list[i_main_display] = final_updated_scene
283
- status_treat_upd_pop.update(label="Treatment updated! Regenerating asset...", state="running")
284
- ver_asset_regen = 1
285
- if asset_info_disp and asset_info_disp.get('path') and os.path.exists(asset_info_disp['path']):
286
- try: base_fn_regen,_=os.path.splitext(os.path.basename(asset_info_disp['path'])); ver_asset_regen = int(base_fn_regen.split('_v')[-1])+1 if '_v' in base_fn_regen else 2
287
- except: ver_asset_regen = 2
288
- if generate_asset_for_scene_in_app(i_main_display, final_updated_scene, asset_ver_num=ver_asset_regen, user_selected_asset_type_override=user_asset_pref): status_treat_upd_pop.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
289
- else: status_treat_upd_pop.update(label="Treatment updated, asset regen failed.", state="complete", expanded=False)
290
  st.rerun()
291
- except Exception as e_treat_regen_main_loop: status_treat_upd_pop.update(label=f"Error: {e_treat_regen_main_loop}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_main_loop}", exc_info=True)
292
- else: st.warning("Please provide feedback for treatment.")
293
- with st.popover(f"🎨 Edit S{scene_num_disp} Visual Prompt/Asset"):
294
- # (Visual Asset Regeneration Popover logic - ensure correct session state keys and function calls)
295
- prompt_edit_disp_pop = st.session_state.project_scene_generation_prompts_list[i_main_display] if i_main_display < len(st.session_state.project_scene_generation_prompts_list) else "No prompt."
296
- st.caption("Current Asset Generation Prompt:"); st.code(prompt_edit_disp_pop, language='text')
297
- feedback_vis_asset_regen = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_{key_base_main}", height=150)
298
- if st.button(f"πŸ”„ Update S{scene_num_disp} Asset", key=f"regen_visual_btn_pop_{key_base_main}"):
299
- if feedback_vis_asset_regen:
300
- with st.status(f"Refining prompt & asset for S{scene_num_disp}...", expanded=True) as status_vis_asset_regen_pop:
301
- user_asset_type_choice_pop = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type']
302
- is_video_for_regen_pop = (user_asset_type_choice_pop == "Video Clip") or (user_asset_type_choice_pop == "Auto (Director's Choice)" and scene_data_to_display.get('suggested_asset_type_감독') == 'video_clip')
303
- new_prompt_asset_regen_pop = ""
304
- if not is_video_for_regen_pop:
305
- gemini_refine_prompt_pop = create_visual_regeneration_prompt(prompt_edit_disp_pop, feedback_vis_asset_regen, scene_data_to_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
306
- try: new_prompt_asset_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refine_prompt_pop); st.session_state.project_scene_generation_prompts_list[i_main_display] = new_prompt_asset_regen_pop; status_vis_asset_regen_pop.update(label="Image prompt refined! Regenerating asset...", state="running")
307
- except Exception as e_gem_refine_pop: status_vis_asset_regen_pop.update(label=f"Error refining prompt: {e_gem_refine_pop}", state="error"); logger.error(f"Visual prompt refinement error: {e_gem_refine_pop}", exc_info=True); continue
308
  else:
309
- new_prompt_asset_regen_pop = construct_text_to_video_prompt_for_gen4(scene_data_to_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_main_display] = new_prompt_asset_regen_pop; status_vis_asset_regen_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
310
- if not new_prompt_asset_regen_pop: status_vis_asset_regen_pop.update(label="Prompt construction failed.", state="error"); continue
311
- ver_vis_asset_regen_pop = 1
312
- if asset_info_disp and asset_info_disp.get('path') and os.path.exists(asset_info_disp['path']):
313
- try: base_fn_viz_pop,_=os.path.splitext(os.path.basename(asset_info_disp['path'])); ver_vis_asset_regen_pop = int(base_fn_viz_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_pop else 2
314
- except: ver_vis_asset_regen_pop = 2
315
- if generate_asset_for_scene_in_app(i_main_display, st.session_state.project_story_treatment_scenes_list[i_main_display], asset_ver_num=ver_vis_asset_regen_pop, user_selected_asset_type_override=user_asset_type_choice_pop): status_vis_asset_regen_pop.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
316
- else: status_vis_asset_regen_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
317
  st.rerun()
318
  else: st.warning("Please provide feedback for visual asset regeneration.")
319
  st.markdown("---")
320
 
321
- if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid and not asset_info_item_vid.get('error') and asset_info_item_vid.get('path') for asset_info_item_vid in st.session_state.project_generated_assets_info_list if asset_info_item_vid is not None):
322
  if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_3", type="primary", use_container_width=True):
323
  with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op_main:
324
- assets_for_final_vid_assembly_list = []
325
- for i_vid_assembly_main, scene_data_for_vid_main in enumerate(st.session_state.project_story_treatment_scenes_list):
326
- asset_info_curr_scene_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main] if i_vid_assembly_main < len(st.session_state.project_generated_assets_info_list) else None
327
- if asset_info_curr_scene_vid_main and not asset_info_curr_scene_vid_main.get('error') and asset_info_curr_scene_vid_main.get('path') and os.path.exists(asset_info_curr_scene_vid_main['path']):
328
- assets_for_final_vid_assembly_list.append({'path': asset_info_curr_scene_vid_main['path'], 'type': asset_info_curr_scene_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_main.get('scene_number', i_vid_assembly_main + 1), 'key_action': scene_data_for_vid_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
329
- status_video_assembly_final_op_main.write(f"Adding S{scene_data_for_vid_main.get('scene_number', i_vid_assembly_main + 1)} ({asset_info_curr_scene_vid_main.get('type')}).")
330
- else: logger.warning(f"Skipping S{scene_data_for_vid_main.get('scene_number', i_vid_assembly_main+1)} for video: No valid asset.")
331
- if assets_for_final_video_assembly_list:
332
  status_video_assembly_final_op_main.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
333
- st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
334
  if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op_main.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
335
  else: status_video_assembly_final_op_main.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
336
  else: status_video_assembly_final_op_main.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
 
6
  # --- Streamlit PermissionError Mitigation Attempts ---
7
  if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
8
  os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
9
+ if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ:
 
10
  os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
11
+ streamlit_home_path_app = "/app/.streamlit_cai_config_v2"
 
12
  if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
13
  os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
14
+ try: os.makedirs(streamlit_home_path_app, exist_ok=True)
15
+ except Exception: pass # Ignore if fails, Dockerfile ENV is primary
 
 
 
16
 
17
  from core.gemini_handler import GeminiHandler
18
  from core.visual_engine import VisualEngine
19
  from core.prompt_engineering import (
20
+ create_cinematic_treatment_prompt, construct_dalle_prompt,
21
+ construct_text_to_video_prompt_for_gen4, create_narration_script_prompt_enhanced,
22
+ create_scene_regeneration_prompt, create_visual_regeneration_prompt
 
 
 
23
  )
24
 
25
  st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
26
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)')
 
 
 
27
  logger = logging.getLogger(__name__)
28
 
29
  SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
30
  DEFAULT_SCENE_DURATION_SECS = 5; DEFAULT_SHOT_TYPE = "Director's Choice"; ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
31
 
32
+ def load_api_key(key_name_st, key_name_e, service_n):
33
  key_val = None; secrets_avail = hasattr(st, 'secrets')
34
  try:
35
  if secrets_avail and key_name_st in st.secrets: key_val = st.secrets.get(key_name_st);
36
+ if key_val: logger.info(f"API Key for {service_n} found in St secrets.")
37
  except Exception as e: logger.warning(f"No st.secrets for {key_name_st} ({service_n}): {e}")
38
  if not key_val and key_name_e in os.environ: key_val = os.environ.get(key_name_e);
39
  if key_val: logger.info(f"API Key for {service_n} found in env var '{key_name_e}'.")
 
42
 
43
  if 'services_initialized_flag' not in st.session_state:
44
  logger.info("APP_INIT: Initializing services and API keys...")
45
+ # (API Key Loading as before)
46
  st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
47
  st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
48
  st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
49
  st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
50
  st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
51
  st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
52
+
53
  if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
54
  try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
55
+ except Exception as e: st.error(f"CRITICAL: GeminiHandler init fail: {e}"); logger.critical(f"GeminiHandler init fail: {e}", exc_info=True); st.stop()
56
  try:
57
  el_def_voice = "Rachel"; el_res_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_def_voice
58
  st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_res_voice_id)
 
60
  st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
61
  st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
62
  st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
63
+ logger.info("VisualEngine initialized and keys set.")
64
+ except Exception as e: st.error(f"CRITICAL: VisualEngine init fail: {e}"); logger.critical(f"VisualEngine init fail: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop()
65
  st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")
66
 
67
  PROJECT_SS_DEFAULTS = {'project_story_treatment_scenes_list':[],'project_scene_generation_prompts_list':[],'project_generated_assets_info_list':[],'project_final_video_path':None,'project_character_definitions_map':{},'project_global_style_keywords_str':"",'project_overall_narration_audio_path':None,'project_narration_script_text':""}
 
74
  logger.info("PROJECT_DATA: New project data re-initialized.")
75
 
76
  def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_ui="Auto (Director's Choice)"):
77
+ # (Logic as in previous app.py, ensuring it uses scene_data_dict for visual_engine call)
78
  logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {sc_idx}, ver {asset_v}, user_type: {user_asset_type_ui}")
79
+ gen_as_vid_final = False; gemini_sugg_type = sc_data.get('suggested_asset_type_감독', 'image').lower()
80
  if user_asset_type_ui=="Image": gen_as_vid_final=False
81
  elif user_asset_type_ui=="Video Clip": gen_as_vid_final=True
82
  elif user_asset_type_ui=="Auto (Director's Choice)": gen_as_vid_final=(gemini_sugg_type=="video_clip")
 
84
  prompt_base_img = construct_dalle_prompt(sc_data,st.session_state.project_character_definitions_map,st.session_state.project_global_style_keywords_str)
85
  prompt_motion_vid = ""
86
  if gen_as_vid_final: prompt_motion_vid=construct_text_to_video_prompt_for_gen4(sc_data,st.session_state.project_global_style_keywords_str) or sc_data.get('video_clip_motion_description_감독',"subtle motion")
87
+ if not prompt_base_img: logger.error(f"Base image prompt construction failed for S{sc_data.get('scene_number',sc_idx+1)}"); return False
88
  while len(st.session_state.project_scene_generation_prompts_list)<=sc_idx:st.session_state.project_scene_generation_prompts_list.append("")
89
  while len(st.session_state.project_generated_assets_info_list)<=sc_idx:st.session_state.project_generated_assets_info_list.append(None)
90
  st.session_state.project_scene_generation_prompts_list[sc_idx]=prompt_motion_vid if gen_as_vid_final else prompt_base_img
91
  fn_base_asset=f"scene_{sc_data.get('scene_number',sc_idx+1)}_asset_v{asset_v}"
92
  rwy_dur=sc_data.get('video_clip_duration_estimate_secs_감독',sc_data.get('user_scene_duration_secs',DEFAULT_SCENE_DURATION_SECS));rwy_dur=max(1,rwy_dur)
93
+ asset_res_dict=st.session_state.visual_content_engine.generate_scene_asset(image_generation_prompt_text=prompt_base_img,motion_prompt_text_for_video=prompt_motion_vid,scene_data_dict=sc_data,scene_identifier_fn_base=fn_base_asset,generate_as_video_clip_flag=gen_as_vid_final,runway_target_dur_val=rwy_dur) # Uses scene_data_dict
94
  st.session_state.project_generated_assets_info_list[sc_idx]=asset_res_dict
95
  if asset_res_dict and asset_res_dict.get('prompt_used')and st.session_state.project_scene_generation_prompts_list[sc_idx]!=asset_res_dict['prompt_used']:st.session_state.project_scene_generation_prompts_list[sc_idx]=asset_res_dict['prompt_used']
96
  if asset_res_dict and not asset_res_dict['error']and asset_res_dict.get('path')and os.path.exists(asset_res_dict['path']):logger.info(f"APP: Asset ({asset_res_dict.get('type')}) OK S{sc_data.get('scene_number',sc_idx+1)}:{os.path.basename(asset_res_dict['path'])}");return True
 
100
  if os.path.exists("assets/logo.png"): st.image("assets/logo.png", width=150)
101
  else: st.sidebar.markdown("## 🎬 CineGen AI Ultra+"); logger.warning("assets/logo.png not found.")
102
  st.markdown("### Creative Seed")
103
+ sb_user_idea = st.text_area("Core Idea:", "Lone wanderer, mythical oasis, post-apocalyptic desert, mirages, mechanical scavengers.", height=100, key="sb_user_idea_unique")
104
+ sb_genre = st.selectbox("Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="sb_genre_unique")
105
+ sb_mood = st.selectbox("Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical"], index=0, key="sb_mood_unique")
106
+ sb_num_scenes = st.slider("Key Scenes:", 1, 10, 1, key="sb_num_scenes_unique")
107
  sb_guidance_opts = {"Standard": "standard", "Artistic": "more_artistic", "Experimental": "experimental_narrative"}
108
+ sb_guidance_key = st.selectbox("AI Director Style:", list(sb_guidance_opts.keys()), key="sb_guidance_unique")
109
  sb_actual_guidance = sb_guidance_opts[sb_guidance_key]
110
 
111
+ if st.button("🌌 Generate Cinematic Treatment", type="primary", key="sb_btn_gen_treat_unique", use_container_width=True):
112
  initialize_new_project_data_in_session()
113
  if not sb_user_idea.strip(): st.warning("Please provide a story idea.")
114
  else:
115
+ with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_op: # Renamed this for clarity
 
116
  try:
117
+ main_status_op.write("Phase 1: Crafting treatment... πŸ“œ"); logger.info("APP: P1 - Treatment Gen.")
118
  prompt_treat = create_cinematic_treatment_prompt(sb_user_idea, sb_genre, sb_mood, sb_num_scenes, sb_actual_guidance)
119
  raw_treat_list = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_treat)
120
  if not isinstance(raw_treat_list, list) or not raw_treat_list: raise ValueError("Gemini invalid scene list.")
 
121
  init_scenes = []
122
  for scene_gemini in raw_treat_list:
123
+ gem_dur = scene_gemini.get('video_clip_duration_estimate_secs_감독', 0); scene_gemini['user_scene_duration_secs'] = gem_dur if gem_dur > 0 else DEFAULT_SCENE_DURATION_SECS
124
+ scene_gemini['user_shot_type'] = scene_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE); scene_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"; init_scenes.append(scene_gemini)
 
 
 
125
  st.session_state.project_story_treatment_scenes_list = init_scenes
126
+ num_gen_sc = len(init_scenes); st.session_state.project_scene_generation_prompts_list = [""]*num_gen_sc; st.session_state.project_generated_assets_info_list = [None]*num_gen_sc
127
+ logger.info(f"APP: P1 done. {num_gen_sc} scenes."); main_status_op.update(label="Treatment complete! βœ… Generating assets...", state="running")
128
+ main_status_op.write("Phase 2: Creating assets..."); logger.info("APP: P2 - Asset Gen.")
 
 
 
 
129
  success_assets = 0
130
  for i, scene_item in enumerate(st.session_state.project_story_treatment_scenes_list):
131
+ sc_num_log = scene_item.get('scene_number', i+1); main_status_op.write(f" Asset for Scene {sc_num_log}..."); logger.info(f" APP: Asset S{sc_num_log}.")
 
132
  if generate_asset_for_scene_in_app(i, scene_item, asset_v=1): success_assets += 1
 
133
  lbl_p2 = "Assets generated! "; next_state = "running"
134
+ if success_assets == 0 and num_gen_sc > 0: logger.error("APP: Asset gen FAIL all."); lbl_p2 = "Asset gen FAIL all."; next_state="error"; main_status_op.update(label=lbl_p2, state=next_state, expanded=True); st.stop()
135
  elif success_assets < num_gen_sc: logger.warning(f"APP: Assets partial ({success_assets}/{num_gen_sc})."); lbl_p2 = f"Assets partial ({success_assets}/{num_gen_sc}). "
136
+ main_status_op.update(label=f"{lbl_p2}Generating narration...", state=next_state)
137
  if next_state == "error": st.stop()
138
+ main_status_op.write("Phase 3: Narration script..."); logger.info("APP: P3 - Narration Script.")
 
139
  voice_style = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
140
  prompt_narr = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, sb_mood, sb_genre, voice_style)
141
  st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr)
142
+ logger.info("APP: Narration script OK."); main_status_op.update(label="Narration ready! Synthesizing voice...", state="running")
143
+ main_status_op.write("Phase 4: Synthesizing voice..."); logger.info("APP: P4 - Voice Synth.")
 
144
  st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
 
145
  final_lbl = "All components ready! Review storyboard. πŸš€"; final_state = "complete"
146
  if not st.session_state.project_overall_narration_audio_path: final_lbl = f"{lbl_p2}Storyboard ready (Voiceover failed)."; logger.warning("APP: Narration audio fail.")
147
  else: logger.info("APP: Narration audio OK.")
148
+ main_status_op.update(label=final_lbl, state=final_state, expanded=False)
149
+ except ValueError as e_val_main: logger.error(f"APP: ValueError: {e_val_main}", exc_info=True); main_status_op.update(label=f"Data/Response Error: {e_val_main}", state="error", expanded=True);
150
+ except TypeError as e_type_main: logger.error(f"APP: TypeError: {e_type_main}", exc_info=True); main_status_op.update(label=f"Type Error: {e_type_main}", state="error", expanded=True);
151
+ except Exception as e_unhandled_main_flow: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_unhandled_main_flow}", exc_info=True); main_status_op.update(label=f"Unexpected Error: {e_unhandled_main_flow}", state="error", expanded=True);
 
 
 
152
 
 
 
153
  with st.expander("Define Characters", expanded=False):
154
+ sb_char_name = st.text_input("Character Name", key="sb_char_name_unique_char_main"); sb_char_desc = st.text_area("Visual Description", key="sb_char_desc_unique_char_main", height=100)
155
+ if st.button("Save Character", key="sb_add_char_unique_char_main"):
156
+ if sb_char_name and sb_char_desc: st.session_state.project_character_definitions_map[sb_char_name.strip().lower()] = sb_char_desc.strip(); st.success(f"Char '{sb_char_name.strip()}' saved.")
157
  else: st.warning("Name and description needed.")
158
  if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()]
159
  with st.expander("Global Style Overrides", expanded=False):
160
+ sb_style_presets = { "Default": "", "Noir": "gritty neo-noir...", "Fantasy": "epic fantasy...", "Sci-Fi": "analog sci-fi..."}
161
+ sb_selected_preset = st.selectbox("Base Style Preset:", list(sb_style_presets.keys()), key="sb_style_preset_unique_global_main")
162
+ sb_custom_keywords = st.text_area("Additional Custom Keywords:", key="sb_custom_style_unique_global_main", height=80)
163
+ sb_current_global_style = st.session_state.project_global_style_keywords_str
164
+ if st.button("Apply Global Styles", key="sb_apply_styles_unique_global_main"):
165
+ final_style = sb_style_presets[sb_selected_preset];
166
+ if sb_custom_keywords.strip(): final_style = f"{final_style}, {sb_custom_keywords.strip()}" if final_style else sb_custom_keywords.strip()
167
+ st.session_state.project_global_style_keywords_str = final_style.strip(); sb_current_global_style = final_style.strip()
168
+ if sb_current_global_style: st.success("Global styles applied!")
169
  else: st.info("Global styles cleared.")
170
+ if sb_current_global_style: st.caption(f"Active: \"{sb_current_global_style}\"")
171
  with st.expander("Voice & Narration Style", expanded=False):
172
+ sb_engine_default_voice = "Rachel"
173
+ if hasattr(st.session_state, 'visual_content_engine'): sb_engine_default_voice = st.session_state.visual_content_engine.elevenlabs_voice_id
174
+ sb_user_voice_id = st.text_input("11L Voice ID (override):", value=sb_engine_default_voice, key="sb_el_voice_id_override_unique_global_main")
175
+ sb_narration_styles = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
176
+ sb_selected_narr_style = st.selectbox("Narration Script Style:", list(sb_narration_styles.keys()), key="sb_narr_style_sel_unique_global_main", index=0)
177
+ if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_global_main"):
178
+ final_el_voice_id = sb_user_voice_id.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
179
+ if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_el_voice_id
180
+ st.session_state.selected_voice_style_for_generation = sb_narration_styles[sb_selected_narr_style]
181
+ st.success(f"Narrator Voice: {final_el_voice_id}. Script Style: {sb_selected_narr_style}")
182
+ logger.info(f"User updated 11L Voice ID: {final_el_voice_id}, Narr Style: {sb_selected_narr_style}")
183
 
 
184
  st.header("🎬 Cinematic Storyboard & Treatment")
185
  if st.session_state.project_narration_script_text:
186
  with st.expander("πŸ“œ View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
187
 
188
  if not st.session_state.project_story_treatment_scenes_list: st.info("Use the sidebar to generate your cinematic treatment.")
189
  else:
190
+ for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.project_story_treatment_scenes_list):
191
+ scene_num_for_display = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
192
+ scene_title_for_display_main = scene_content_item_display.get('scene_title', 'Untitled Scene')
193
+ key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_loop_v2_{i_main_loop_content}" # Ensure unique keys
194
+
195
+ if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"🎬 Director Note S{scene_num_for_display}: {scene_content_item_display['director_note']}")
196
+ st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}")
197
+
198
+ # <<< CORRECTED COLUMN VARIABLE NAMES >>>
199
+ treatment_display_col, visual_display_col = st.columns([0.45, 0.55])
200
+
201
+ with treatment_display_col: # Use the correctly defined variable
202
  with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
203
+ # (Display textual scene details)
204
+ st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
205
  st.markdown("##### Shot, Pacing & Asset Controls")
206
+ ui_shot_type_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
207
+ try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
208
+ except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
209
+ ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
210
+ if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type'] = ui_shot_type_new
211
+ ui_duration_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
212
+ ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
213
+ if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs'] = ui_duration_new
214
+ ui_asset_type_override_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
215
+ try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
216
+ except ValueError: ui_asset_type_idx_val = 0
217
+ ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
218
+ if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type'] = ui_asset_type_override_new
 
219
  st.markdown("---")
220
+ prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else None
221
+ if prompt_for_asset_to_display:
222
+ with st.popover("πŸ‘οΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
223
+ pexels_query_to_display = scene_content_item_display.get('pexels_search_query_감독', None)
224
+ if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")
225
+
226
+ with visual_display_col: # <<< CORRECTED: Use the correctly defined variable name >>>
227
+ current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_generated_assets_info_list) else None
228
+ if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
229
+ path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
230
+ if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
231
+ elif type_of_asset_for_display == 'video':
232
  try:
233
+ with open(path_of_asset_for_display, 'rb') as vid_file_obj_read: video_bytes_for_st_video = vid_file_obj_read.read()
234
+ st.video(video_bytes_for_st_video, format="video/mp4", start_time=0); st.caption(f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
235
+ except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
236
+ else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
237
  else:
238
  if st.session_state.project_story_treatment_scenes_list:
239
+ error_msg_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
240
+ st.caption(error_msg_for_asset_display)
241
+
242
+ with st.popover(f"✏️ Edit S{scene_num_for_display} Treatment"):
243
+ feedback_input_for_treatment_regen = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{key_base_main_area_widgets}", height=150)
244
+ if st.button(f"πŸ”„ Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{key_base_main_area_widgets}"):
245
+ if feedback_input_for_treatment_regen:
246
+ with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
247
+ preserved_user_shot_type = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type']
248
+ preserved_user_duration = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs']
249
+ preserved_user_asset_type = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type']
250
+ prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_input_for_treatment_regen, st.session_state.project_story_treatment_scenes_list)
251
  try:
252
+ updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
253
+ final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
254
+ final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type
255
+ st.session_state.project_story_treatment_scenes_list[i_main_loop_content] = final_merged_updated_scene_data_pop
256
+ status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
257
+ version_for_regenerated_asset_pop = 1
258
+ if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
259
+ try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
260
+ except: version_for_regenerated_asset_pop = 2
261
+ if generate_asset_for_scene_in_app(i_main_loop_content, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_asset_type_ui=preserved_user_asset_type): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
262
+ else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
263
  st.rerun()
264
+ except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
265
+ else: st.warning("Please provide feedback to update the treatment.")
266
+
267
+ with st.popover(f"🎨 Edit S{scene_num_for_display} Visual Prompt/Asset"):
268
+ prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else "No prompt."
269
+ st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
270
+ feedback_for_visual_asset_regen_input = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_{key_base_main_area_widgets}", height=150)
271
+ if st.button(f"πŸ”„ Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{key_base_main_area_widgets}"):
272
+ if feedback_for_visual_asset_regen_input:
273
+ with st.status(f"Refining prompt & asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
274
+ user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type']
275
+ is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_감독') == 'video_clip')
276
+ newly_constructed_asset_prompt_regen_pop = ""
277
+ if not is_video_type_for_regen_pop:
278
+ gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
279
+ try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_main_loop_content] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running")
280
+ except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz_pop}", exc_info=True); continue
281
  else:
282
+ newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_main_loop_content] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
283
+ if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction failed.", state="error"); continue
284
+ version_for_regenerated_visual_asset_pop = 1
285
+ if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
286
+ try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
287
+ except: version_for_regenerated_visual_asset_pop = 2
288
+ if generate_asset_for_scene_in_app(i_main_loop_content, st.session_state.project_story_treatment_scenes_list[i_main_loop_content], asset_ver_num=version_for_regenerated_visual_asset_pop, user_asset_type_ui=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
289
+ else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
290
  st.rerun()
291
  else: st.warning("Please provide feedback for visual asset regeneration.")
292
  st.markdown("---")
293
 
294
+ if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None):
295
  if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_3", type="primary", use_container_width=True):
296
  with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op_main:
297
+ assets_for_final_video_assembly_list_main = []
298
+ for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes_list):
299
+ asset_info_current_scene_for_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None
300
+ if asset_info_current_scene_for_vid_main and not asset_info_current_scene_for_vid_main.get('error') and asset_info_current_scene_for_vid_main.get('path') and os.path.exists(asset_info_current_scene_for_vid_main['path']):
301
+ assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid_main['path'], 'type': asset_info_current_scene_for_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
302
+ status_video_assembly_final_op_main.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid_main.get('type')}).")
303
+ else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
304
+ if assets_for_final_video_assembly_list_main:
305
  status_video_assembly_final_op_main.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
306
+ st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
307
  if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op_main.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
308
  else: status_video_assembly_final_op_main.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
309
  else: status_video_assembly_final_op_main.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")