mgbam commited on
Commit
3903b53
Β·
verified Β·
1 Parent(s): 7ff521a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -311
app.py CHANGED
@@ -5,19 +5,31 @@ from core.visual_engine import VisualEngine
5
  from core.prompt_engineering import (
6
  create_cinematic_treatment_prompt,
7
  construct_dalle_prompt,
8
- construct_text_to_video_prompt_for_gen4, # <<< USE THIS FOR RUNWAY
9
  create_narration_script_prompt_enhanced,
10
  create_scene_regeneration_prompt,
11
- create_visual_regeneration_prompt # This is for DALL-E image prompt refinement
12
  )
13
  import os
14
  import logging
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # --- Configuration & Initialization ---
17
  st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
18
- # Configure logging to be more verbose for debugging if needed
19
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
20
- logger = logging.getLogger(__name__) # Get logger for this module
21
 
22
  # --- Global Definitions ---
23
  SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
@@ -25,10 +37,7 @@ DEFAULT_SCENE_DURATION_SECS = 5
25
  DEFAULT_SHOT_TYPE = "Director's Choice"
26
  ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
27
 
28
-
29
- # --- Global State Variables & API Key Setup ---
30
  def load_api_key(key_name_streamlit, key_name_env, service_name):
31
- # (Keep this function as it was - robust)
32
  key = None; secrets_available = hasattr(st, 'secrets')
33
  try:
34
  if secrets_available and key_name_streamlit in st.secrets:
@@ -38,7 +47,7 @@ def load_api_key(key_name_streamlit, key_name_env, service_name):
38
  if not key and key_name_env in os.environ:
39
  key = os.environ[key_name_env]
40
  if key: logger.info(f"{service_name} API Key found in environment variable.")
41
- if not key: logger.warning(f"{service_name} API Key NOT FOUND. Related features may be disabled or use fallbacks.")
42
  return key
43
 
44
  if 'services_initialized' not in st.session_state:
@@ -51,214 +60,166 @@ if 'services_initialized' not in st.session_state:
51
  st.session_state.RUNWAY_API_KEY = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
52
 
53
  if not st.session_state.GEMINI_API_KEY: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
54
-
55
  try: st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY); logger.info("GeminiHandler initialized.")
56
  except Exception as e: st.error(f"Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
57
-
58
  try:
59
- default_voice_id_el = "Rachel" # Fallback
60
- configured_voice_id_el = st.session_state.ELEVENLABS_VOICE_ID_CONFIG or default_voice_id_el
61
  st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=configured_voice_id_el)
62
  st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
63
  st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY, voice_id_from_secret=st.session_state.ELEVENLABS_VOICE_ID_CONFIG)
64
  st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
65
- st.session_state.visual_engine.set_runway_api_key(st.session_state.RUNWAY_API_KEY) # Pass Runway key
66
  logger.info("VisualEngine initialized and API keys set.")
67
  except Exception as e: st.error(f"Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue.")
68
  st.session_state.services_initialized = True; logger.info("Service initialization complete.")
69
 
70
- # Initialize other session state variables
71
- for key_ss, default_val_ss in [ # Renamed loop vars
72
- ('story_treatment_scenes', []), ('scene_generation_prompts', []), ('generated_scene_assets_info', []), # Stores full asset info dicts
73
- ('video_path', None), ('character_definitions', {}), ('global_style_additions', ""),
74
- ('overall_narration_audio_path', None), ('narration_script_display', "")
75
- ]:
76
  if key_ss not in st.session_state: st.session_state[key_ss] = default_val_ss
77
 
78
- def initialize_new_project_state(): # Renamed
79
- st.session_state.story_treatment_scenes = []
80
- st.session_state.scene_generation_prompts = [] # Stores the prompt used for DALL-E or Runway
81
- st.session_state.generated_scene_assets_info = [] # Stores dicts {'path':..., 'type':..., 'error':..., 'prompt_used':...}
82
  st.session_state.video_path, st.session_state.overall_narration_audio_path, st.session_state.narration_script_display = None, None, ""
83
  logger.info("New project state initialized.")
84
 
85
- def generate_asset_for_scene_wrapper(scene_idx, scene_dict_data, version_num=1, user_selected_asset_type_override="Auto (Director's Choice)"): # Renamed
86
- # Determine if video clip is desired based on user override or Gemini's suggestion
87
  generate_as_video_clip_final = False
88
- gemini_suggested_asset_type = scene_dict_data.get('suggested_asset_type_감독', 'image').lower()
89
-
90
- if user_selected_asset_type_override == "Image":
91
- generate_as_video_clip_final = False
92
- elif user_selected_asset_type_override == "Video Clip":
93
- generate_as_video_clip_final = True
94
- elif user_selected_asset_type_override == "Auto (Director's Choice)": # Default
95
- generate_as_video_clip_final = (gemini_suggested_asset_type == "video_clip")
96
-
97
- # Prompt for base image generation (DALL-E or Pexels fallback)
98
- image_gen_prompt_text = construct_dalle_prompt(scene_dict_data, st.session_state.character_definitions, st.session_state.global_style_additions)
99
 
100
- # Prompt for video motion (Runway Gen-4) - only if generating video
101
  motion_gen_prompt_text = ""
102
  if generate_as_video_clip_final:
103
- motion_gen_prompt_text = construct_text_to_video_prompt_for_gen4(scene_dict_data, st.session_state.global_style_additions)
104
- if not motion_gen_prompt_text: # Fallback if specific motion prompt is empty
105
- logger.warning(f"S{scene_dict_data.get('scene_number', scene_idx+1)}: Motion prompt empty, using generic for Runway.")
106
- motion_gen_prompt_text = scene_dict_data.get('video_clip_motion_description_감독', "subtle ambient motion")
107
-
108
 
109
- if not image_gen_prompt_text: # Base image prompt is always needed
110
- logger.error(f"Base image prompt construction failed for S{scene_dict_data.get('scene_number', scene_idx+1)}"); return False
111
-
112
- # Ensure session state lists are adequate
113
  while len(st.session_state.scene_generation_prompts) <= scene_idx: st.session_state.scene_generation_prompts.append("")
114
  while len(st.session_state.generated_scene_assets_info) <= scene_idx: st.session_state.generated_scene_assets_info.append(None)
115
-
116
- # Store the relevant prompt (DALL-E for image, motion for video)
117
- # The generate_scene_asset method will return the actual prompt it used if different internally.
118
  st.session_state.scene_generation_prompts[scene_idx] = motion_gen_prompt_text if generate_as_video_clip_final else image_gen_prompt_text
119
 
120
- filename_base_for_asset = f"scene_{scene_dict_data.get('scene_number', scene_idx+1)}_asset_v{version_num}" # Renamed
121
- runway_dur_for_scene = scene_dict_data.get('video_clip_duration_estimate_secs_감독', scene_dict_data.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
122
  if runway_dur_for_scene <= 0 : runway_dur_for_scene = DEFAULT_SCENE_DURATION_SECS
123
 
124
  asset_result_dict = st.session_state.visual_engine.generate_scene_asset(
125
- image_generation_prompt_text=image_gen_prompt_text, # For base DALL-E/Pexels image
126
- motion_prompt_text_for_video=motion_gen_prompt_text, # For Runway motion
127
- scene_data=scene_dict_data,
128
- scene_identifier_filename_base=filename_base_for_asset,
129
- generate_as_video_clip=generate_as_video_clip_final,
130
- runway_target_duration=runway_dur_for_scene
131
  )
132
-
133
  st.session_state.generated_scene_assets_info[scene_idx] = asset_result_dict
134
- # Update the stored prompt with what was actually used by the engine, if available from result
135
- if asset_result_dict and asset_result_dict.get('prompt_used'):
136
- st.session_state.scene_generation_prompts[scene_idx] = asset_result_dict['prompt_used']
137
-
138
 
139
  if asset_result_dict and not asset_result_dict['error'] and asset_result_dict.get('path') and os.path.exists(asset_result_dict['path']):
140
- logger.info(f"Asset ({asset_result_dict.get('type')}) generated for S{scene_dict_data.get('scene_number', scene_idx+1)}: {os.path.basename(asset_result_dict['path'])}")
141
  return True
142
  else:
143
  err_msg_asset = asset_result_dict.get('error_message', 'Unknown error') if asset_result_dict else 'Asset result is None'
144
- logger.warning(f"Asset gen FAILED for S{scene_dict_data.get('scene_number', scene_idx+1)}. Type attempted: {'Video' if generate_as_video_clip_final else 'Image'}. Error: {err_msg_asset}")
145
- # Store a more detailed failure state if not already
146
  if not st.session_state.generated_scene_assets_info[scene_idx] or not st.session_state.generated_scene_assets_info[scene_idx]['error']:
147
  st.session_state.generated_scene_assets_info[scene_idx] = {'path': None, 'type': 'none', 'error': True, 'error_message': err_msg_asset, 'prompt_used': st.session_state.scene_generation_prompts[scene_idx]}
148
  return False
149
 
150
  # --- UI Sidebar ---
151
  with st.sidebar:
152
- # ... (Sidebar UI code as before, no changes needed for this fix) ...
153
  st.title("🎬 CineGen AI Ultra+")
154
  st.markdown("### Creative Seed")
155
- user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main_v5")
156
- genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main_v5")
157
- mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main_v5")
158
- num_scenes = st.slider("Number of Key Scenes:", 1, 10, 2, key="num_scenes_main_v5")
159
- creative_guidance_options = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
160
- selected_creative_guidance_key = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options.keys()), key="creative_guidance_select_v5")
161
- actual_creative_guidance = creative_guidance_options[selected_creative_guidance_key]
162
-
163
- if st.button("🌌 Generate Cinematic Treatment", type="primary", key="generate_treatment_btn_v5", use_container_width=True):
164
- initialize_new_project_state() # Use renamed function
165
- if not user_idea.strip(): st.warning("Please provide a story idea.")
166
  else:
167
- with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_op: # Renamed
168
  try:
169
- status_op.write("Phase 1: Gemini crafting cinematic treatment... πŸ“œ"); logger.info("Phase 1: Cinematic Treatment Gen.")
170
- treatment_gen_prompt = create_cinematic_treatment_prompt(user_idea, genre, mood, num_scenes, actual_creative_guidance) # Renamed
171
- raw_treatment_result = st.session_state.gemini_handler.generate_story_breakdown(treatment_gen_prompt) # Renamed
172
- if not isinstance(raw_treatment_result, list) or not raw_treatment_result: raise ValueError("Gemini returned invalid scene list format.")
173
-
174
- processed_scene_list = [] # Renamed
175
- for scene_from_gemini in raw_treatment_result: # Renamed
176
- scene_from_gemini['user_shot_type'] = scene_from_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE)
177
- # Use Gemini's video duration estimate if available for video clips, else default scene duration
178
- gemini_dur_est = scene_from_gemini.get('video_clip_duration_estimate_secs_감독', 0)
179
- scene_from_gemini['user_scene_duration_secs'] = gemini_dur_est if gemini_dur_est > 0 else DEFAULT_SCENE_DURATION_SECS
180
- scene_from_gemini['user_selected_asset_type'] = "Auto (Director's Choice)" # UI default
181
- processed_scene_list.append(scene_from_gemini)
182
- st.session_state.story_treatment_scenes = processed_scene_list
183
-
184
- num_generated_scenes = len(st.session_state.story_treatment_scenes) # Renamed
185
- st.session_state.scene_generation_prompts = [""]*num_generated_scenes
186
- st.session_state.generated_scene_assets_info = [None]*num_generated_scenes
187
- logger.info(f"Phase 1 complete. {num_generated_scenes} scenes."); status_op.update(label="Treatment complete! βœ… Generating visuals...", state="running")
188
-
189
- status_op.write("Phase 2: Creating visual assets (Image/Video)... πŸ–ΌοΈπŸŽ¬"); logger.info("Phase 2: Visual Asset Gen.")
190
- successful_asset_count = 0 # Renamed
191
- for i_scene, scene_data_item in enumerate(st.session_state.story_treatment_scenes): # Renamed
192
- scene_num_display = scene_data_item.get('scene_number', i_scene+1) # Renamed
193
- status_op.write(f" Asset for Scene {scene_num_display}..."); logger.info(f" Processing asset for Scene {scene_num_display}.")
194
- if generate_asset_for_scene_wrapper(i_scene, scene_data_item, version_num=1): # Pass default 'Auto' for initial gen
195
- successful_asset_count += 1
196
 
197
- status_label_phase2 = "Visual assets ready! " # Renamed
198
- next_op_state = "running" # Renamed
199
- if successful_asset_count == 0 and num_generated_scenes > 0:
200
- logger.error("Asset gen failed for all scenes."); status_label_phase2 = "Asset gen FAILED for all scenes."; next_op_state="error";
201
- status_op.update(label=status_label_phase2, state=next_op_state, expanded=True); st.stop()
202
- elif successful_asset_count < num_generated_scenes:
203
- logger.warning(f"Assets partially generated ({successful_asset_count}/{num_generated_scenes})."); status_label_phase2 = f"Assets partially generated ({successful_asset_count}/{num_generated_scenes}). "
204
- status_op.update(label=f"{status_label_phase2}Generating narration script...", state=next_op_state)
205
- if next_op_state == "error": st.stop()
206
-
207
- status_op.write("Phase 3: Generating narration script..."); logger.info("Phase 3: Narration Script Gen.")
208
- voice_style_for_narration_prompt = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer") # Renamed
209
- narration_gen_prompt = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood, genre, voice_style_for_narration_prompt) # Renamed
210
- st.session_state.narration_script_display = st.session_state.gemini_handler.generate_image_prompt(narration_gen_prompt) # generate_image_prompt returns string
211
- logger.info("Narration script generated."); status_op.update(label="Narration script ready! Synthesizing voice...", state="running")
212
-
213
- status_op.write("Phase 4: Synthesizing voice (ElevenLabs)... πŸ”Š"); logger.info("Phase 4: Voice Synthesis.")
214
  st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(st.session_state.narration_script_display)
215
-
216
- final_status_label = "All components ready! Storyboard below. πŸš€" # Renamed
217
- final_op_state = "complete" # Renamed
218
- if not st.session_state.overall_narration_audio_path:
219
- final_status_label = f"{status_label_phase2}Storyboard ready (Voiceover skipped or failed)."
220
- logger.warning("Voiceover generation was skipped or failed.")
221
  else: logger.info("Voiceover generated successfully.")
222
- status_op.update(label=final_status_label, state=final_op_state, expanded=False)
223
-
224
- except ValueError as ve_err: logger.error(f"ValueError in main generation: {ve_err}", exc_info=True); status_op.update(label=f"Input or Gemini response error: {ve_err}", state="error", expanded=True); # Renamed
225
- except Exception as e_unhandled: logger.error(f"Unhandled Exception in main generation: {e_unhandled}", exc_info=True); status_op.update(label=f"An unexpected error: {e_unhandled}", state="error", expanded=True); # Renamed
226
 
227
- # --- Sidebar Fine-Tuning Options (Characters, Global Style, Voice) ---
228
- # (Keep these sections as they were in the previous correct version)
229
  with st.expander("Define Characters", expanded=False):
230
- char_name_input = st.text_input("Character Name", key="char_name_adv_ultra_v5_sb"); char_desc_input = st.text_area("Visual Description", key="char_desc_adv_ultra_v5_sb", height=100, placeholder="e.g., Jax: rugged male astronaut...")
231
- if st.button("Save Character", key="add_char_adv_ultra_v5_sb"):
232
- if char_name_input and char_desc_input: st.session_state.character_definitions[char_name_input.strip().lower()] = char_desc_input.strip(); st.success(f"Char '{char_name_input.strip()}' saved.")
 
233
  else: st.warning("Name and description needed.")
234
  if st.session_state.character_definitions: st.caption("Current Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.character_definitions.items()]
235
-
236
  with st.expander("Global Style Overrides", expanded=False):
237
- style_presets_dict = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."} # Truncated for brevity
238
- selected_style_preset_key = st.selectbox("Base Style Preset:", options=list(style_presets_dict.keys()), key="style_preset_adv_ultra_v5_sb")
239
- custom_style_keywords_input = st.text_area("Additional Custom Style Keywords:", key="custom_style_adv_ultra_v5_sb", height=80, placeholder="e.g., 'Dutch angle'")
240
- current_global_style = st.session_state.global_style_additions
241
- if st.button("Apply Global Styles", key="apply_styles_adv_ultra_v5_sb"):
242
- final_style_str = style_presets_dict[selected_style_preset_key];
243
- if custom_style_keywords_input.strip(): final_style_str = f"{final_style_str}, {custom_style_keywords_input.strip()}" if final_style_str else custom_style_keywords_input.strip()
244
- st.session_state.global_style_additions = final_style_str.strip(); current_global_style = final_style_str.strip() # Update local var for immediate display
245
- if current_global_style: st.success("Global styles applied!")
 
246
  else: st.info("Global style additions cleared.")
247
- if current_global_style: st.caption(f"Active global styles: \"{current_global_style}\"")
248
-
249
  with st.expander("Voice & Narration Style", expanded=False):
250
- engine_default_voice = "Rachel"
251
- if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine: engine_default_voice = st.session_state.visual_engine.elevenlabs_voice_id
252
- user_voice_id_input = st.text_input("ElevenLabs Voice ID (override):", value=engine_default_voice, key="el_voice_id_override_v5_sb", help=f"Defaulting to '{engine_default_voice}'.")
253
- narration_prompt_styles_dict = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
254
- selected_narration_style_key = st.selectbox("Narration Script Style:", list(narration_prompt_styles_dict.keys()), key="narr_style_sel_v5_sb", index=0)
255
- if st.button("Set Narrator Voice & Style", key="set_voice_btn_ultra_v5_sb"):
256
- final_voice_id_to_use_el = user_voice_id_input.strip() or st.session_state.get("ELEVENLABS_VOICE_ID_CONFIG", "Rachel") # Fallback
257
- if hasattr(st.session_state, 'visual_engine'): st.session_state.visual_engine.elevenlabs_voice_id = final_voice_id_to_use_el
258
- st.session_state.selected_voice_style_for_generation = narration_prompt_styles_dict[selected_narration_style_key]
259
- st.success(f"Narrator Voice ID: {final_voice_id_to_use_el}. Script Style: {selected_narration_style_key}")
260
- logger.info(f"User updated 11L Voice ID: {final_voice_id_to_use_el}, Script Style: {selected_narration_style_key}")
261
-
262
 
263
  # --- Main Content Area ---
264
  st.header("🎬 Cinematic Storyboard & Treatment")
@@ -267,178 +228,134 @@ if st.session_state.narration_script_display:
267
 
268
  if not st.session_state.story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
269
  else:
270
- for i_main_loop, scene_content_item in enumerate(st.session_state.story_treatment_scenes): # Renamed
271
- scene_num_val = scene_content_item.get('scene_number', i_main_loop + 1) # Renamed
272
- scene_title_val = scene_content_item.get('scene_title', 'Untitled Scene') # Renamed
273
- # Ensure unique keys for widgets within the loop
274
- key_base_for_scene = f"s{scene_num_val}_{''.join(filter(str.isalnum, scene_title_val[:10]))}_main_{i_main_loop}" # Renamed
275
 
276
- if "director_note" in scene_content_item and scene_content_item['director_note']: st.info(f"🎬 Director Note S{scene_num_val}: {scene_content_item['director_note']}")
277
- st.subheader(f"SCENE {scene_num_val}: {scene_title_val.upper()}"); col_treatment, col_visual = st.columns([0.45, 0.55]) # Renamed
278
 
279
- with col_treatment: # Treatment and Controls Column
280
  with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
281
- # ... (Display textual scene details - beat, setting, chars, etc. - as before) ...
282
- st.markdown(f"**Beat:** {scene_content_item.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
283
-
284
  st.markdown("##### Shot, Pacing & Asset Controls")
285
- # User Shot Type (Camera Angle)
286
- current_ui_shot_type = st.session_state.story_treatment_scenes[i_main_loop].get('user_shot_type', DEFAULT_SHOT_TYPE) # Renamed
287
- try: shot_type_idx_val = SHOT_TYPES_OPTIONS.index(current_ui_shot_type) # Renamed
288
- except ValueError: shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
289
- new_ui_shot_type = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=shot_type_idx_val, key=f"shot_type_widget_{key_base_for_scene}") # Renamed
290
- if new_ui_shot_type != current_ui_shot_type: st.session_state.story_treatment_scenes[i_main_loop]['user_shot_type'] = new_ui_shot_type
291
-
292
- # User Scene Duration
293
- current_ui_duration = st.session_state.story_treatment_scenes[i_main_loop].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS) # Renamed
294
- new_ui_duration = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=current_ui_duration, step=1, key=f"duration_widget_{key_base_for_scene}") # Renamed
295
- if new_ui_duration != current_ui_duration: st.session_state.story_treatment_scenes[i_main_loop]['user_scene_duration_secs'] = new_ui_duration
296
-
297
- # User Asset Type Selection
298
- current_ui_asset_type = st.session_state.story_treatment_scenes[i_main_loop].get('user_selected_asset_type', "Auto (Director's Choice)") # Renamed
299
- try: asset_type_idx_val = ASSET_TYPE_OPTIONS.index(current_ui_asset_type) # Renamed
300
- except ValueError: asset_type_idx_val = 0
301
- new_ui_asset_type = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=asset_type_idx_val, key=f"asset_type_sel_{key_base_for_scene}", help="Choose 'Image' or 'Video Clip'. 'Auto' uses Gemini's suggestion.") # Renamed
302
- if new_ui_asset_type != current_ui_asset_type: st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] = new_ui_asset_type
303
  st.markdown("---")
304
-
305
- # Display generated prompt for the asset
306
- prompt_for_current_asset = st.session_state.scene_generation_prompts[i_main_loop] if i_main_loop < len(st.session_state.scene_generation_prompts) else None # Renamed
307
- if prompt_for_current_asset:
308
- with st.popover("πŸ‘οΈ View Asset Generation Prompt"):
309
- st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_current_asset, language='text')
310
-
311
- pexels_query_val = scene_content_item.get('pexels_search_query_감독', None) # Renamed
312
- if pexels_query_val: st.caption(f"Pexels Fallback Query: `{pexels_query_val}`")
313
-
314
- with col_visual: # Visuals Column
315
- asset_info_for_scene = st.session_state.generated_scene_assets_info[i_main_loop] if i_main_loop < len(st.session_state.generated_scene_assets_info) else None # Renamed
316
- if asset_info_for_scene and not asset_info_for_scene.get('error') and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
317
- path_to_asset_file = asset_info_for_scene['path'] # Renamed
318
- type_of_asset_file = asset_info_for_scene.get('type', 'image') # Renamed
319
- if type_of_asset_file == 'image': st.image(path_to_asset_file, caption=f"Scene {scene_num_val} ({type_of_asset_file}): {scene_title_val}")
320
- elif type_of_asset_file == 'video':
321
  try:
322
- with open(path_to_asset_file, 'rb') as vf_read: video_bytes_data = vf_read.read() # Renamed
323
- st.video(video_bytes_data, format="video/mp4", start_time=0); st.caption(f"Scene {scene_num_val} ({type_of_asset_file}): {scene_title_val}")
324
- except Exception as e_vid_display: st.error(f"Error displaying video {path_to_asset_file}: {e_vid_display}"); logger.error(f"Error displaying video: {e_vid_display}", exc_info=True) # Renamed
325
- else: st.warning(f"Unknown asset type '{type_of_asset_file}' for Scene {scene_num_val}.")
326
  else:
327
  if st.session_state.story_treatment_scenes:
328
- error_message_display = asset_info_for_scene.get('error_message', 'Visual pending or failed.') if asset_info_for_scene else 'Visual pending or failed.' # Renamed
329
- st.caption(error_message_display)
330
 
331
- # --- Popovers for Editing Scene Treatment & Visual Prompt ---
332
- with st.popover(f"✏️ Edit S{scene_num_val} Treatment"):
333
- feedback_for_treatment = st.text_area("Changes to treatment:", key=f"treat_fb_{key_base_for_scene}", height=150) # Renamed
334
- if st.button(f"πŸ”„ Update S{scene_num_val} Treatment", key=f"regen_treat_btn_{key_base_for_scene}"):
335
- if feedback_for_treatment:
336
- with st.status(f"Updating S{scene_num_val} Treatment & Asset...", expanded=True) as status_treatment_regen: # Renamed
337
- user_shot_type_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_shot_type'] # Renamed
338
- user_duration_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_scene_duration_secs'] # Renamed
339
- user_asset_type_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] # Renamed
340
-
341
- regen_prompt_for_gemini = create_scene_regeneration_prompt(scene_content_item, feedback_for_treatment, st.session_state.story_treatment_scenes) # Renamed
342
  try:
343
- updated_scene_data_gemini = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_for_gemini) # Renamed
344
- final_updated_scene_data = {**updated_scene_data_gemini} # Renamed
345
- final_updated_scene_data['user_shot_type'] = user_shot_type_pref
346
- final_updated_scene_data['user_scene_duration_secs'] = user_duration_pref
347
- final_updated_scene_data['user_selected_asset_type'] = user_asset_type_pref
348
- st.session_state.story_treatment_scenes[i_main_loop] = final_updated_scene_data
349
- status_treatment_regen.update(label="Treatment updated! Regenerating asset...", state="running")
350
-
351
- version_num_asset = 1 # Renamed
352
- if asset_info_for_scene and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
353
- try: base_fn_asset,_=os.path.splitext(os.path.basename(asset_info_for_scene['path'])); version_num_asset = int(base_fn_asset.split('_v')[-1])+1 if '_v' in base_fn_asset else 2 # Renamed
354
- except: version_num_asset = 2
355
-
356
- if generate_asset_for_scene_wrapper(i_main_loop, final_updated_scene_data, version_num=version_num_asset, user_selected_asset_type_override=user_asset_type_pref):
357
- status_treatment_regen.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
358
- else: status_treatment_regen.update(label="Treatment updated, asset failed.", state="complete", expanded=False)
359
  st.rerun()
360
- except Exception as e_treat_regen_main: status_treatment_regen.update(label=f"Error: {e_treat_regen_main}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_main}", exc_info=True) # Renamed
361
  else: st.warning("Please provide feedback for treatment.")
362
 
363
- with st.popover(f"🎨 Edit S{scene_num_val} Visual Prompt/Asset"):
364
- current_gen_prompt_display = st.session_state.scene_generation_prompts[i_main_loop] if i_main_loop < len(st.session_state.scene_generation_prompts) else "No prompt generated yet." # Renamed
365
- st.caption("Current Asset Generation Prompt:"); st.code(current_gen_prompt_display, language='text')
366
- feedback_for_visual = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_{key_base_for_scene}", height=150) # Renamed
367
- if st.button(f"πŸ”„ Update S{scene_num_val} Asset", key=f"regen_visual_btn_{key_base_for_scene}"):
368
- if feedback_for_visual:
369
- with st.status(f"Refining prompt & regenerating asset for S{scene_num_val}...", expanded=True) as status_visual_regen: # Renamed
370
- user_asset_type_choice_visual = st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] # Renamed
371
- is_video_asset_type = (user_asset_type_choice_visual == "Video Clip") or \
372
- (user_asset_type_choice_visual == "Auto (Director's Choice)" and scene_content_item.get('suggested_asset_type_감독') == 'video_clip')
373
-
374
- newly_constructed_asset_prompt = "" # Renamed
375
- if not is_video_asset_type: # Refining an IMAGE prompt
376
- gemini_refinement_prompt = create_visual_regeneration_prompt(current_gen_prompt_display, feedback_for_visual, scene_content_item, st.session_state.character_definitions, st.session_state.global_style_additions) # Renamed
377
- try:
378
- newly_constructed_asset_prompt = st.session_state.gemini_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt)
379
- st.session_state.scene_generation_prompts[i_main_loop] = newly_constructed_asset_prompt
380
- status_visual_regen.update(label="Image prompt refined by Gemini! Regenerating asset...", state="running")
381
- except Exception as e_gemini_prompt_refine: status_visual_regen.update(label=f"Error refining prompt: {e_gemini_prompt_refine}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_prompt_refine}", exc_info=True); continue # Skip asset gen
382
- else: # For VIDEO, reconstruct the motion prompt based on current scene data and feedback (feedback isn't directly used by construct_text_to_video_prompt_for_gen4 here, but scene_data might have changed)
383
- # For video, feedback should ideally modify scene_content_item's motion description first, then reconstruct.
384
- # Simple reconstruction for now:
385
- logger.info(f"Reconstructing video motion prompt for S{scene_num_val} based on feedback (indirectly via scene_data). Feedback was: {feedback_for_visual}")
386
- newly_constructed_asset_prompt = construct_text_to_video_prompt_for_gen4(scene_content_item, st.session_state.global_style_additions)
387
- st.session_state.scene_generation_prompts[i_main_loop] = newly_constructed_asset_prompt
388
- status_visual_regen.update(label="Video motion prompt reconstructed! Regenerating asset...", state="running")
389
-
390
- if not newly_constructed_asset_prompt: status_visual_regen.update(label="Prompt construction failed.", state="error"); continue
391
-
392
- version_num_visual_asset = 1 # Renamed
393
- if asset_info_for_scene and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
394
- try: base_fn_viz_asset,_=os.path.splitext(os.path.basename(asset_info_for_scene['path'])); version_num_visual_asset = int(base_fn_viz_asset.split('_v')[-1])+1 if '_v' in base_fn_viz_asset else 2 # Renamed
395
- except: version_num_visual_asset = 2
396
-
397
- if generate_asset_for_scene_wrapper(i_main_loop, st.session_state.story_treatment_scenes[i_main_loop], version_num=version_num_visual_asset, user_selected_asset_type_override=user_asset_type_choice_visual):
398
- status_visual_regen.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
399
- else: status_visual_regen.update(label="Prompt updated, asset regeneration failed.", state="complete", expanded=False)
400
  st.rerun()
401
  else: st.warning("Please provide feedback for visual asset.")
402
  st.markdown("---")
403
 
404
- # Video Assembly Button
405
  if st.session_state.story_treatment_scenes and any(asset_info_item_loop and not asset_info_item_loop.get('error') and asset_info_item_loop.get('path') for asset_info_item_loop in st.session_state.generated_scene_assets_info if asset_info_item_loop is not None):
406
- if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_ultra_video_btn_v5_main", type="primary", use_container_width=True): # Unique key
407
- with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly: # Renamed
408
- assets_for_final_video = [] # Renamed
409
- for i_vid_assembly, scene_data_for_vid in enumerate(st.session_state.story_treatment_scenes): # Renamed
410
- asset_info_current_scene = st.session_state.generated_scene_assets_info[i_vid_assembly] if i_vid_assembly < len(st.session_state.generated_scene_assets_info) else None # Renamed
411
- if asset_info_current_scene and not asset_info_current_scene.get('error') and asset_info_current_scene.get('path') and os.path.exists(asset_info_current_scene['path']):
412
- assets_for_final_video.append({
413
- 'path': asset_info_current_scene['path'],
414
- 'type': asset_info_current_scene.get('type', 'image'),
415
- 'scene_num': scene_data_for_vid.get('scene_number', i_vid_assembly + 1),
416
- 'key_action': scene_data_for_vid.get('key_plot_beat', ''),
417
- 'duration': scene_data_for_vid.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
418
- })
419
- status_video_assembly.write(f"Adding S{scene_data_for_vid.get('scene_number', i_vid_assembly + 1)} ({asset_info_current_scene.get('type')}).")
420
- else: logger.warning(f"Skipping S{scene_data_for_vid.get('scene_number', i_vid_assembly+1)} for video: No valid asset.")
421
-
422
- if assets_for_final_video:
423
- status_video_assembly.write("Calling video engine...");
424
- st.session_state.video_path = st.session_state.visual_engine.assemble_animatic_from_assets(
425
- asset_data_list=assets_for_final_video,
426
- overall_narration_path=st.session_state.overall_narration_audio_path,
427
- output_filename="cinegen_ultra_animatic.mp4", fps=24
428
- )
429
- if st.session_state.video_path and os.path.exists(st.session_state.video_path):
430
- status_video_assembly.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
431
- else: status_video_assembly.update(label="Video assembly failed. Check logs.", state="error", expanded=False); logger.error("Video assembly returned None or file does not exist.")
432
- else: status_video_assembly.update(label="No valid assets for video assembly.", state="error", expanded=False); logger.warning("No valid assets found for video assembly.")
433
  elif st.session_state.story_treatment_scenes: st.info("Generate visual assets before assembling the animatic.")
434
 
435
  if st.session_state.video_path and os.path.exists(st.session_state.video_path):
436
  st.header("🎬 Generated Cinematic Animatic");
437
  try:
438
- with open(st.session_state.video_path, 'rb') as vf_obj_read: video_bytes_content = vf_obj_read.read() # Renamed
439
- st.video(video_bytes_content, format="video/mp4")
440
- st.download_button(label="Download Ultra Animatic", data=video_bytes_content, file_name=os.path.basename(st.session_state.video_path), mime="video/mp4", use_container_width=True, key="download_ultra_video_btn_v5_main_dl" ) # Unique key
441
- except Exception as e_vid_final_display: st.error(f"Error displaying final video: {e_vid_final_display}"); logger.error(f"Error displaying final video: {e_vid_final_display}", exc_info=True) # Renamed
442
 
443
- # --- Footer ---
444
  st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")
 
5
  from core.prompt_engineering import (
6
  create_cinematic_treatment_prompt,
7
  construct_dalle_prompt,
8
+ construct_text_to_video_prompt_for_gen4,
9
  create_narration_script_prompt_enhanced,
10
  create_scene_regeneration_prompt,
11
+ create_visual_regeneration_prompt
12
  )
13
  import os
14
  import logging
15
 
16
+ # --- Mitigate Streamlit PermissionError on Hugging Face Spaces ---
17
+ # Streamlit tries to write to ~/.streamlit or /.streamlit.
18
+ # By setting global. Ο„ΟŒΟ„Ξ΅SIMIT_CONFIG_DIR or specific telemetry flags,
19
+ # we can sometimes avoid this. A common fix is to disable telemetry.
20
+ # However, the error specifically mentions '/.streamlit', which is root.
21
+ # Setting STREAMLIT_HOME in Dockerfile is a more robust fix for this specific path.
22
+ # If that's not enough, disabling usage stats collection is a good fallback.
23
+ if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
24
+ os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
25
+ # Also, ensure Streamlit doesn't try to create a global config dir if HOME is weird
26
+ if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"): # Common for HF
27
+ os.environ["STREAMLIT_HOME"] = "/app/.streamlit_config" # Use a writable path within the app dir
28
+
29
  # --- Configuration & Initialization ---
30
  st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
 
31
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
32
+ logger = logging.getLogger(__name__)
33
 
34
  # --- Global Definitions ---
35
  SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
 
37
  DEFAULT_SHOT_TYPE = "Director's Choice"
38
  ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
39
 
 
 
40
  def load_api_key(key_name_streamlit, key_name_env, service_name):
 
41
  key = None; secrets_available = hasattr(st, 'secrets')
42
  try:
43
  if secrets_available and key_name_streamlit in st.secrets:
 
47
  if not key and key_name_env in os.environ:
48
  key = os.environ[key_name_env]
49
  if key: logger.info(f"{service_name} API Key found in environment variable.")
50
+ if not key: logger.warning(f"{service_name} API Key NOT FOUND for {service_name}. Related features may be disabled or use fallbacks.")
51
  return key
52
 
53
  if 'services_initialized' not in st.session_state:
 
60
  st.session_state.RUNWAY_API_KEY = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
61
 
62
  if not st.session_state.GEMINI_API_KEY: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
 
63
  try: st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY); logger.info("GeminiHandler initialized.")
64
  except Exception as e: st.error(f"Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
 
65
  try:
66
+ default_voice_id_el = "Rachel"; configured_voice_id_el = st.session_state.ELEVENLABS_VOICE_ID_CONFIG or default_voice_id_el
 
67
  st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=configured_voice_id_el)
68
  st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
69
  st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY, voice_id_from_secret=st.session_state.ELEVENLABS_VOICE_ID_CONFIG)
70
  st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
71
+ st.session_state.visual_engine.set_runway_api_key(st.session_state.RUNWAY_API_KEY)
72
  logger.info("VisualEngine initialized and API keys set.")
73
  except Exception as e: st.error(f"Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue.")
74
  st.session_state.services_initialized = True; logger.info("Service initialization complete.")
75
 
76
+ for key_ss, default_val_ss in [ ('story_treatment_scenes', []), ('scene_generation_prompts', []), ('generated_scene_assets_info', []), ('video_path', None), ('character_definitions', {}), ('global_style_additions', ""), ('overall_narration_audio_path', None), ('narration_script_display', "")]:
 
 
 
 
 
77
  if key_ss not in st.session_state: st.session_state[key_ss] = default_val_ss
78
 
79
+ def initialize_new_project_state():
80
+ st.session_state.story_treatment_scenes = []; st.session_state.scene_generation_prompts = []; st.session_state.generated_scene_assets_info = []
 
 
81
  st.session_state.video_path, st.session_state.overall_narration_audio_path, st.session_state.narration_script_display = None, None, ""
82
  logger.info("New project state initialized.")
83
 
84
+ def generate_asset_for_scene_wrapper(scene_idx, scene_data_item_for_asset, version_num=1, user_selected_asset_type_override="Auto (Director's Choice)"): # Renamed scene_data
 
85
  generate_as_video_clip_final = False
86
+ gemini_suggested_asset_type = scene_data_item_for_asset.get('suggested_asset_type_감독', 'image').lower()
87
+ if user_selected_asset_type_override == "Image": generate_as_video_clip_final = False
88
+ elif user_selected_asset_type_override == "Video Clip": generate_as_video_clip_final = True
89
+ elif user_selected_asset_type_override == "Auto (Director's Choice)": generate_as_video_clip_final = (gemini_suggested_asset_type == "video_clip")
 
 
 
 
 
 
 
90
 
91
+ image_gen_prompt_text = construct_dalle_prompt(scene_data_item_for_asset, st.session_state.character_definitions, st.session_state.global_style_additions)
92
  motion_gen_prompt_text = ""
93
  if generate_as_video_clip_final:
94
+ motion_gen_prompt_text = construct_text_to_video_prompt_for_gen4(scene_data_item_for_asset, st.session_state.global_style_additions)
95
+ if not motion_gen_prompt_text: motion_gen_prompt_text = scene_data_item_for_asset.get('video_clip_motion_description_감독', "subtle ambient motion"); logger.warning(f"S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}: Empty motion prompt, using default.")
 
 
 
96
 
97
+ if not image_gen_prompt_text: logger.error(f"Base image prompt construction failed for S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}"); return False
 
 
 
98
  while len(st.session_state.scene_generation_prompts) <= scene_idx: st.session_state.scene_generation_prompts.append("")
99
  while len(st.session_state.generated_scene_assets_info) <= scene_idx: st.session_state.generated_scene_assets_info.append(None)
 
 
 
100
  st.session_state.scene_generation_prompts[scene_idx] = motion_gen_prompt_text if generate_as_video_clip_final else image_gen_prompt_text
101
 
102
+ filename_base_for_asset = f"scene_{scene_data_item_for_asset.get('scene_number', scene_idx+1)}_asset_v{version_num}"
103
+ runway_dur_for_scene = scene_data_item_for_asset.get('video_clip_duration_estimate_secs_감독', scene_data_item_for_asset.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
104
  if runway_dur_for_scene <= 0 : runway_dur_for_scene = DEFAULT_SCENE_DURATION_SECS
105
 
106
  asset_result_dict = st.session_state.visual_engine.generate_scene_asset(
107
+ image_generation_prompt_text=image_gen_prompt_text,
108
+ motion_prompt_text_for_video=motion_gen_prompt_text,
109
+ scene_data_dict=scene_data_item_for_asset, # <<< CHANGED 'scene_data' TO 'scene_data_dict'
110
+ scene_identifier_fn_base=filename_base_for_asset,
111
+ generate_as_video_clip_flag=generate_as_video_clip_final, # Renamed for clarity
112
+ runway_target_dur_val=runway_dur_for_scene # Renamed for clarity
113
  )
 
114
  st.session_state.generated_scene_assets_info[scene_idx] = asset_result_dict
115
+ if asset_result_dict and asset_result_dict.get('prompt_used'): st.session_state.scene_generation_prompts[scene_idx] = asset_result_dict['prompt_used']
 
 
 
116
 
117
  if asset_result_dict and not asset_result_dict['error'] and asset_result_dict.get('path') and os.path.exists(asset_result_dict['path']):
118
+ logger.info(f"Asset ({asset_result_dict.get('type')}) generated for S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}: {os.path.basename(asset_result_dict['path'])}")
119
  return True
120
  else:
121
  err_msg_asset = asset_result_dict.get('error_message', 'Unknown error') if asset_result_dict else 'Asset result is None'
122
+ logger.warning(f"Asset gen FAILED for S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}. Type: {'Video' if generate_as_video_clip_final else 'Image'}. Err: {err_msg_asset}")
 
123
  if not st.session_state.generated_scene_assets_info[scene_idx] or not st.session_state.generated_scene_assets_info[scene_idx]['error']:
124
  st.session_state.generated_scene_assets_info[scene_idx] = {'path': None, 'type': 'none', 'error': True, 'error_message': err_msg_asset, 'prompt_used': st.session_state.scene_generation_prompts[scene_idx]}
125
  return False
126
 
127
  # --- UI Sidebar ---
128
  with st.sidebar:
 
129
  st.title("🎬 CineGen AI Ultra+")
130
  st.markdown("### Creative Seed")
131
+ user_idea_input = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main_v5_sb_unique")
132
+ genre_selection = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main_v5_sb_unique")
133
+ mood_selection = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main_v5_sb_unique")
134
+ num_scenes_slider = st.slider("Number of Key Scenes:", 1, 10, 1, key="num_scenes_main_v5_sb_unique") # Default to 1 for faster testing
135
+ creative_guidance_options_map = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
136
+ selected_creative_guidance_key_sb = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options_map.keys()), key="creative_guidance_select_v5_sb_unique")
137
+ actual_creative_guidance_val = creative_guidance_options_map[selected_creative_guidance_key_sb]
138
+
139
+ if st.button("🌌 Generate Cinematic Treatment", type="primary", key="generate_treatment_btn_v5_sb_unique", use_container_width=True):
140
+ initialize_new_project_state()
141
+ if not user_idea_input.strip(): st.warning("Please provide a story idea.")
142
  else:
143
+ with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_op_main:
144
  try:
145
+ status_op_main.write("Phase 1: Gemini crafting cinematic treatment... πŸ“œ"); logger.info("Phase 1: Cinematic Treatment Gen.")
146
+ treatment_gen_prompt_gemini = create_cinematic_treatment_prompt(user_idea_input, genre_selection, mood_selection, num_scenes_slider, actual_creative_guidance_val)
147
+ raw_treatment_result_list = st.session_state.gemini_handler.generate_story_breakdown(treatment_gen_prompt_gemini)
148
+ if not isinstance(raw_treatment_result_list, list) or not raw_treatment_result_list: raise ValueError("Gemini returned invalid scene list format.")
149
+ processed_scene_list_init = []
150
+ for scene_from_gemini_init in raw_treatment_result_list:
151
+ scene_from_gemini_init['user_shot_type'] = scene_from_gemini_init.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE)
152
+ gemini_dur_est_init = scene_from_gemini_init.get('video_clip_duration_estimate_secs_감독', 0)
153
+ scene_from_gemini_init['user_scene_duration_secs'] = gemini_dur_est_init if gemini_dur_est_init > 0 else DEFAULT_SCENE_DURATION_SECS
154
+ scene_from_gemini_init['user_selected_asset_type'] = "Auto (Director's Choice)"
155
+ processed_scene_list_init.append(scene_from_gemini_init)
156
+ st.session_state.story_treatment_scenes = processed_scene_list_init
157
+ num_generated_scenes_val = len(st.session_state.story_treatment_scenes)
158
+ st.session_state.scene_generation_prompts = [""]*num_generated_scenes_val; st.session_state.generated_scene_assets_info = [None]*num_generated_scenes_val
159
+ logger.info(f"Phase 1 complete. {num_generated_scenes_val} scenes."); status_op_main.update(label="Treatment complete! βœ… Generating visuals...", state="running")
160
+
161
+ status_op_main.write("Phase 2: Creating visual assets (Image/Video)... πŸ–ΌοΈπŸŽ¬"); logger.info("Phase 2: Visual Asset Gen.")
162
+ successful_asset_count_val = 0
163
+ for i_scene_init, scene_data_item_init in enumerate(st.session_state.story_treatment_scenes):
164
+ scene_num_display_init = scene_data_item_init.get('scene_number', i_scene_init+1)
165
+ status_op_main.write(f" Asset for Scene {scene_num_display_init}..."); logger.info(f" Processing asset for Scene {scene_num_display_init}.")
166
+ if generate_asset_for_scene_wrapper(i_scene_init, scene_data_item_init, version_num=1): successful_asset_count_val += 1
 
 
 
 
 
167
 
168
+ status_label_phase2_val = "Visual assets ready! "; next_op_state_val = "running"
169
+ if successful_asset_count_val == 0 and num_generated_scenes_val > 0:
170
+ logger.error("Asset gen failed for all scenes."); status_label_phase2_val = "Asset gen FAILED for all scenes."; next_op_state_val="error";
171
+ status_op_main.update(label=status_label_phase2_val, state=next_op_state_val, expanded=True); st.stop()
172
+ elif successful_asset_count_val < num_generated_scenes_val: logger.warning(f"Assets partially generated ({successful_asset_count_val}/{num_generated_scenes_val})."); status_label_phase2_val = f"Assets partially generated ({successful_asset_count_val}/{num_generated_scenes_val}). "
173
+ status_op_main.update(label=f"{status_label_phase2_val}Generating narration script...", state=next_op_state_val)
174
+ if next_op_state_val == "error": st.stop()
175
+
176
+ status_op_main.write("Phase 3: Generating narration script..."); logger.info("Phase 3: Narration Script Gen.")
177
+ voice_style_for_narration_prompt_val = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
178
+ narration_gen_prompt_val = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood_selection, genre_selection, voice_style_for_narration_prompt_val)
179
+ st.session_state.narration_script_display = st.session_state.gemini_handler.generate_image_prompt(narration_gen_prompt_val)
180
+ logger.info("Narration script generated."); status_op_main.update(label="Narration script ready! Synthesizing voice...", state="running")
181
+ status_op_main.write("Phase 4: Synthesizing voice (ElevenLabs)... πŸ”Š"); logger.info("Phase 4: Voice Synthesis.")
 
 
 
182
  st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(st.session_state.narration_script_display)
183
+ final_status_label_val = "All components ready! Storyboard below. πŸš€"; final_op_state_val = "complete"
184
+ if not st.session_state.overall_narration_audio_path: final_status_label_val = f"{status_label_phase2_val}Storyboard ready (Voiceover skipped or failed)."; logger.warning("Voiceover generation skipped/failed.")
 
 
 
 
185
  else: logger.info("Voiceover generated successfully.")
186
+ status_op_main.update(label=final_status_label_val, state=final_op_state_val, expanded=False)
187
+ except ValueError as ve_err_main: logger.error(f"ValueError in main gen: {ve_err_main}", exc_info=True); status_op_main.update(label=f"Input/Gemini response error: {ve_err_main}", state="error", expanded=True);
188
+ except Exception as e_unhandled_main: logger.error(f"Unhandled Exception in main gen: {e_unhandled_main}", exc_info=True); status_op_main.update(label=f"Unexpected error: {e_unhandled_main}", state="error", expanded=True);
 
189
 
 
 
190
  with st.expander("Define Characters", expanded=False):
191
+ # ... (Keep as before)
192
+ char_name_input_sidebar = st.text_input("Character Name", key="char_name_sidebar_unique"); char_desc_input_sidebar = st.text_area("Visual Description", key="char_desc_sidebar_unique", height=100)
193
+ if st.button("Save Character", key="add_char_sidebar_unique"):
194
+ if char_name_input_sidebar and char_desc_input_sidebar: st.session_state.character_definitions[char_name_input_sidebar.strip().lower()] = char_desc_input_sidebar.strip(); st.success(f"Char '{char_name_input_sidebar.strip()}' saved.")
195
  else: st.warning("Name and description needed.")
196
  if st.session_state.character_definitions: st.caption("Current Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.character_definitions.items()]
 
197
  with st.expander("Global Style Overrides", expanded=False):
198
+ # ... (Keep as before)
199
+ style_presets_dict_sidebar = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."}
200
+ selected_style_key_sidebar = st.selectbox("Base Style Preset:", options=list(style_presets_dict_sidebar.keys()), key="style_preset_sidebar_unique")
201
+ custom_kw_input_sidebar = st.text_area("Additional Custom Style Keywords:", key="custom_style_sidebar_unique", height=80)
202
+ current_global_style_val_sidebar = st.session_state.global_style_additions
203
+ if st.button("Apply Global Styles", key="apply_styles_sidebar_unique"):
204
+ final_style_str_sidebar = style_presets_dict_sidebar[selected_style_key_sidebar];
205
+ if custom_kw_input_sidebar.strip(): final_style_str_sidebar = f"{final_style_str_sidebar}, {custom_kw_input_sidebar.strip()}" if final_style_str_sidebar else custom_kw_input_sidebar.strip()
206
+ st.session_state.global_style_additions = final_style_str_sidebar.strip(); current_global_style_val_sidebar = final_style_str_sidebar.strip()
207
+ if current_global_style_val_sidebar: st.success("Global styles applied!")
208
  else: st.info("Global style additions cleared.")
209
+ if current_global_style_val_sidebar: st.caption(f"Active: \"{current_global_style_val_sidebar}\"")
 
210
  with st.expander("Voice & Narration Style", expanded=False):
211
+ # ... (Keep as before)
212
+ engine_default_voice_val = "Rachel"
213
+ if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine: engine_default_voice_val = st.session_state.visual_engine.elevenlabs_voice_id
214
+ user_voice_id_override_input = st.text_input("ElevenLabs Voice ID (override):", value=engine_default_voice_val, key="el_voice_id_sidebar_unique")
215
+ narration_styles_map_sidebar = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
216
+ selected_narration_style_key_sidebar = st.selectbox("Narration Script Style:", list(narration_styles_map_sidebar.keys()), key="narr_style_sidebar_unique", index=0)
217
+ if st.button("Set Narrator Voice & Style", key="set_voice_btn_sidebar_unique"):
218
+ final_voice_id_el_sidebar = user_voice_id_override_input.strip() or st.session_state.get("ELEVENLABS_VOICE_ID_CONFIG", "Rachel")
219
+ if hasattr(st.session_state, 'visual_engine'): st.session_state.visual_engine.elevenlabs_voice_id = final_voice_id_el_sidebar
220
+ st.session_state.selected_voice_style_for_generation = narration_styles_map_sidebar[selected_narration_style_key_sidebar]
221
+ st.success(f"Narrator Voice ID: {final_voice_id_el_sidebar}. Script Style: {selected_narration_style_key_sidebar}")
222
+ logger.info(f"User updated 11L Voice ID: {final_voice_id_el_sidebar}, Script Style: {selected_narration_style_key_sidebar}")
223
 
224
  # --- Main Content Area ---
225
  st.header("🎬 Cinematic Storyboard & Treatment")
 
228
 
229
  if not st.session_state.story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
230
  else:
231
+ for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.story_treatment_scenes): # Renamed loop variables
232
+ scene_num_display_val = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
233
+ scene_title_display_val = scene_content_item_display.get('scene_title', 'Untitled Scene')
234
+ key_base_for_widgets_main = f"s{scene_num_display_val}_{''.join(filter(str.isalnum, scene_title_display_val[:10]))}_main_content_{i_main_loop_content}" # Renamed for uniqueness
 
235
 
236
+ if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"🎬 Director Note S{scene_num_display_val}: {scene_content_item_display['director_note']}")
237
+ st.subheader(f"SCENE {scene_num_display_val}: {scene_title_display_val.upper()}"); col_treatment_main, col_visual_main = st.columns([0.45, 0.55])
238
 
239
+ with col_treatment_main:
240
  with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
241
+ # (Display textual scene details - as before)
242
+ st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
 
243
  st.markdown("##### Shot, Pacing & Asset Controls")
244
+ # (Shot Type, Scene Duration, Asset Type Override selectboxes - as before)
245
+ current_user_shot_type_widget = st.session_state.story_treatment_scenes[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
246
+ try: shot_type_idx_widget = SHOT_TYPES_OPTIONS.index(current_user_shot_type_widget)
247
+ except ValueError: shot_type_idx_widget = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
248
+ new_user_shot_type_widget = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=shot_type_idx_widget, key=f"shot_type_widget_{key_base_for_widgets_main}")
249
+ if new_user_shot_type_widget != current_user_shot_type_widget: st.session_state.story_treatment_scenes[i_main_loop_content]['user_shot_type'] = new_user_shot_type_widget
250
+ current_user_duration_widget = st.session_state.story_treatment_scenes[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
251
+ new_user_duration_widget = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=current_user_duration_widget, step=1, key=f"duration_widget_{key_base_for_widgets_main}")
252
+ if new_user_duration_widget != current_user_duration_widget: st.session_state.story_treatment_scenes[i_main_loop_content]['user_scene_duration_secs'] = new_user_duration_widget
253
+ current_user_asset_type_widget = st.session_state.story_treatment_scenes[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
254
+ try: asset_type_idx_widget = ASSET_TYPE_OPTIONS.index(current_user_asset_type_widget)
255
+ except ValueError: asset_type_idx_widget = 0
256
+ new_user_asset_type_widget = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=asset_type_idx_widget, key=f"asset_type_sel_{key_base_for_widgets_main}", help="Choose asset type. 'Auto' uses AI suggestion.")
257
+ if new_user_asset_type_widget != current_user_asset_type_widget: st.session_state.story_treatment_scenes[i_main_loop_content]['user_selected_asset_type'] = new_user_asset_type_widget
 
 
 
 
258
  st.markdown("---")
259
+ prompt_for_current_asset_display = st.session_state.scene_generation_prompts[i_main_loop_content] if i_main_loop_content < len(st.session_state.scene_generation_prompts) else None
260
+ if prompt_for_current_asset_display:
261
+ with st.popover("πŸ‘οΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used:**"); st.code(prompt_for_current_asset_display, language='text')
262
+ pexels_query_display_val = scene_content_item_display.get('pexels_search_query_감독', None)
263
+ if pexels_query_display_val: st.caption(f"Pexels Fallback: `{pexels_query_display_val}`")
264
+
265
+ with col_visual_main:
266
+ # (Display logic for different asset types - as before)
267
+ asset_info_for_display = st.session_state.generated_scene_assets_info[i_main_loop_content] if i_main_loop_content < len(st.session_state.generated_scene_assets_info) else None
268
+ if asset_info_for_display and not asset_info_for_display.get('error') and asset_info_for_display.get('path') and os.path.exists(asset_info_for_display['path']):
269
+ path_to_asset_display = asset_info_for_display['path']; type_of_asset_display = asset_info_for_display.get('type', 'image')
270
+ if type_of_asset_display == 'image': st.image(path_to_asset_display, caption=f"S{scene_num_display_val} ({type_of_asset_display}): {scene_title_display_val}")
271
+ elif type_of_asset_display == 'video':
 
 
 
 
272
  try:
273
+ with open(path_to_asset_display, 'rb') as vf_read_main: video_bytes_content_main = vf_read_main.read()
274
+ st.video(video_bytes_content_main, format="video/mp4", start_time=0); st.caption(f"S{scene_num_display_val} ({type_of_asset_display}): {scene_title_display_val}")
275
+ except Exception as e_vid_disp_main: st.error(f"Error displaying video {path_to_asset_display}: {e_vid_disp_main}"); logger.error(f"Error displaying video: {e_vid_disp_main}", exc_info=True)
276
+ else: st.warning(f"Unknown asset type '{type_of_asset_display}' for S{scene_num_display_val}.")
277
  else:
278
  if st.session_state.story_treatment_scenes:
279
+ error_msg_display_main = asset_info_for_display.get('error_message', 'Visual pending/failed.') if asset_info_for_display else 'Visual pending/failed.'
280
+ st.caption(error_msg_display_main)
281
 
282
+ with st.popover(f"✏️ Edit S{scene_num_display_val} Treatment"):
283
+ feedback_for_treatment_input = st.text_area("Changes to treatment:", key=f"treat_fb_{key_base_for_widgets_main}", height=150)
284
+ if st.button(f"πŸ”„ Update S{scene_num_display_val} Treatment", key=f"regen_treat_btn_{key_base_for_widgets_main}"):
285
+ if feedback_for_treatment_input:
286
+ with st.status(f"Updating S{scene_num_display_val} Treatment & Asset...", expanded=True) as status_treat_regen_main:
287
+ user_shot_type_pref_treat = st.session_state.story_treatment_scenes[i_main_loop_content]['user_shot_type']
288
+ user_duration_pref_treat = st.session_state.story_treatment_scenes[i_main_loop_content]['user_scene_duration_secs']
289
+ user_asset_type_pref_treat = st.session_state.story_treatment_scenes[i_main_loop_content]['user_selected_asset_type']
290
+ regen_prompt_gemini_treat = create_scene_regeneration_prompt(scene_content_item_display, feedback_for_treatment_input, st.session_state.story_treatment_scenes)
 
 
291
  try:
292
+ updated_scene_data_from_gemini_treat = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_gemini_treat)
293
+ final_updated_scene_data_treat = {**updated_scene_data_from_gemini_treat}
294
+ final_updated_scene_data_treat['user_shot_type'] = user_shot_type_pref_treat; final_updated_scene_data_treat['user_scene_duration_secs'] = user_duration_pref_treat; final_updated_scene_data_treat['user_selected_asset_type'] = user_asset_type_pref_treat
295
+ st.session_state.story_treatment_scenes[i_main_loop_content] = final_updated_scene_data_treat
296
+ status_treat_regen_main.update(label="Treatment updated! Regenerating asset...", state="running")
297
+ version_num_asset_treat = 1
298
+ if asset_info_for_display and asset_info_for_display.get('path') and os.path.exists(asset_info_for_display['path']):
299
+ try: base_fn_treat,_=os.path.splitext(os.path.basename(asset_info_for_display['path'])); version_num_asset_treat = int(base_fn_treat.split('_v')[-1])+1 if '_v' in base_fn_treat else 2
300
+ except: version_num_asset_treat = 2
301
+ if generate_asset_for_scene_wrapper(i_main_loop_content, final_updated_scene_data_treat, version_num=version_num_asset_treat, user_selected_asset_type_override=user_asset_type_pref_treat): status_treat_regen_main.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
302
+ else: status_treat_regen_main.update(label="Treatment updated, asset failed.", state="complete", expanded=False)
 
 
 
 
 
303
  st.rerun()
304
+ except Exception as e_treat_regen_loop: status_treat_regen_main.update(label=f"Error: {e_treat_regen_loop}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_loop}", exc_info=True)
305
  else: st.warning("Please provide feedback for treatment.")
306
 
307
+ with st.popover(f"🎨 Edit S{scene_num_display_val} Visual Prompt/Asset"):
308
+ current_gen_prompt_edit_display = st.session_state.scene_generation_prompts[i_main_loop_content] if i_main_loop_content < len(st.session_state.scene_generation_prompts) else "No prompt."
309
+ st.caption("Current Asset Generation Prompt:"); st.code(current_gen_prompt_edit_display, language='text')
310
+ feedback_for_visual_input = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_{key_base_for_widgets_main}", height=150)
311
+ if st.button(f"πŸ”„ Update S{scene_num_display_val} Asset", key=f"regen_visual_btn_{key_base_for_widgets_main}"):
312
+ if feedback_for_visual_input:
313
+ with st.status(f"Refining prompt & asset for S{scene_num_display_val}...", expanded=True) as status_visual_regen_main:
314
+ user_asset_type_choice_viz = st.session_state.story_treatment_scenes[i_main_loop_content]['user_selected_asset_type']
315
+ is_video_type_for_regen = (user_asset_type_choice_viz == "Video Clip") or (user_asset_type_choice_viz == "Auto (Director's Choice)" and scene_content_item_display.get('suggested_asset_type_감독') == 'video_clip')
316
+ newly_constructed_asset_prompt_regen = ""
317
+ if not is_video_type_for_regen:
318
+ gemini_refinement_prompt_viz = create_visual_regeneration_prompt(current_gen_prompt_edit_display, feedback_for_visual_input, scene_content_item_display, st.session_state.character_definitions, st.session_state.global_style_additions)
319
+ try: newly_constructed_asset_prompt_regen = st.session_state.gemini_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz); st.session_state.scene_generation_prompts[i_main_loop_content] = newly_constructed_asset_prompt_regen; status_visual_regen_main.update(label="Image prompt refined! Regenerating asset...", state="running")
320
+ except Exception as e_gemini_refine_viz: status_visual_regen_main.update(label=f"Error refining prompt: {e_gemini_refine_viz}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz}", exc_info=True); continue
321
+ else:
322
+ logger.info(f"Reconstructing video motion prompt for S{scene_num_display_val}. Feedback (indirect): {feedback_for_visual_input}")
323
+ newly_constructed_asset_prompt_regen = construct_text_to_video_prompt_for_gen4(scene_content_item_display, st.session_state.global_style_additions); st.session_state.scene_generation_prompts[i_main_loop_content] = newly_constructed_asset_prompt_regen; status_visual_regen_main.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
324
+ if not newly_constructed_asset_prompt_regen: status_visual_regen_main.update(label="Prompt construction failed.", state="error"); continue
325
+ version_num_viz_asset_regen = 1
326
+ if asset_info_for_display and asset_info_for_display.get('path') and os.path.exists(asset_info_for_display['path']):
327
+ try: base_fn_viz_regen,_=os.path.splitext(os.path.basename(asset_info_for_display['path'])); version_num_viz_asset_regen = int(base_fn_viz_regen.split('_v')[-1])+1 if '_v' in base_fn_viz_regen else 2
328
+ except: version_num_viz_asset_regen = 2
329
+ if generate_asset_for_scene_wrapper(i_main_loop_content, st.session_state.story_treatment_scenes[i_main_loop_content], version_num=version_num_viz_asset_regen, user_selected_asset_type_override=user_asset_type_choice_viz): status_visual_regen_main.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
330
+ else: status_visual_regen_main.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  st.rerun()
332
  else: st.warning("Please provide feedback for visual asset.")
333
  st.markdown("---")
334
 
 
335
  if st.session_state.story_treatment_scenes and any(asset_info_item_loop and not asset_info_item_loop.get('error') and asset_info_item_loop.get('path') for asset_info_item_loop in st.session_state.generated_scene_assets_info if asset_info_item_loop is not None):
336
+ if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_btn_unique", type="primary", use_container_width=True):
337
+ with st.status("Assembling Ultra Animatic...", expanded=True) as status_vid_assembly_main:
338
+ assets_for_final_vid_assembly = []
339
+ for i_vid_assembly_loop, scene_data_for_vid_loop in enumerate(st.session_state.story_treatment_scenes):
340
+ asset_info_curr_scene_vid = st.session_state.generated_scene_assets_info[i_vid_assembly_loop] if i_vid_assembly_loop < len(st.session_state.generated_scene_assets_info) else None
341
+ if asset_info_curr_scene_vid and not asset_info_curr_scene_vid.get('error') and asset_info_curr_scene_vid.get('path') and os.path.exists(asset_info_curr_scene_vid['path']):
342
+ assets_for_final_vid_assembly.append({'path': asset_info_curr_scene_vid['path'], 'type': asset_info_curr_scene_vid.get('type', 'image'), 'scene_num': scene_data_for_vid_loop.get('scene_number', i_vid_assembly_loop + 1), 'key_action': scene_data_for_vid_loop.get('key_plot_beat', ''), 'duration': scene_data_for_vid_loop.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
343
+ status_vid_assembly_main.write(f"Adding S{scene_data_for_vid_loop.get('scene_number', i_vid_assembly_loop + 1)} ({asset_info_curr_scene_vid.get('type')}).")
344
+ else: logger.warning(f"Skipping S{scene_data_for_vid_loop.get('scene_number', i_vid_assembly_loop+1)} for video: No valid asset.")
345
+ if assets_for_final_vid_assembly:
346
+ status_vid_assembly_main.write("Calling video engine...");
347
+ st.session_state.video_path = st.session_state.visual_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_vid_assembly, overall_narration_path=st.session_state.overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
348
+ if st.session_state.video_path and os.path.exists(st.session_state.video_path): status_vid_assembly_main.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
349
+ else: status_vid_assembly_main.update(label="Video assembly failed. Check logs.", state="error", expanded=False); logger.error("Video assembly returned None or file does not exist.")
350
+ else: status_vid_assembly_main.update(label="No valid assets for video assembly.", state="error", expanded=False); logger.warning("No valid assets found for video assembly.")
 
 
 
 
 
 
 
 
 
 
 
 
351
  elif st.session_state.story_treatment_scenes: st.info("Generate visual assets before assembling the animatic.")
352
 
353
  if st.session_state.video_path and os.path.exists(st.session_state.video_path):
354
  st.header("🎬 Generated Cinematic Animatic");
355
  try:
356
+ with open(st.session_state.video_path, 'rb') as vf_obj_read_final: video_bytes_final_content = vf_obj_read_final.read()
357
+ st.video(video_bytes_final_content, format="video/mp4")
358
+ st.download_button(label="Download Ultra Animatic", data=video_bytes_final_content, file_name=os.path.basename(st.session_state.video_path), mime="video/mp4", use_container_width=True, key="download_video_main_btn_unique" )
359
+ except Exception as e_vid_final_disp_main: st.error(f"Error displaying final video: {e_vid_final_disp_main}"); logger.error(f"Error displaying final video: {e_vid_final_disp_main}", exc_info=True)
360
 
 
361
  st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")