mgbam commited on
Commit
8a6537e
·
verified ·
1 Parent(s): b1b7840

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +368 -508
app.py CHANGED
@@ -1,518 +1,378 @@
1
- # app.py
2
- import streamlit as st
3
- from core.gemini_handler import GeminiHandler
4
- from core.visual_engine import VisualEngine
5
- from core.prompt_engineering import (
6
- create_cinematic_treatment_prompt,
7
- construct_dalle_prompt,
8
- construct_text_to_video_prompt, # Import new function
9
- create_narration_script_prompt_enhanced,
10
- create_scene_regeneration_prompt,
11
- create_visual_regeneration_prompt
12
- )
 
 
 
 
 
13
  import os
 
 
 
 
 
14
  import logging
15
 
16
- # --- Configuration & Initialization ---
17
- st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
18
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
19
  logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # --- Global Definitions for New Features ---
22
- SHOT_TYPES_OPTIONS = [
23
- "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot",
24
- "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up",
25
- "Close-up", "Extreme Close-up", "Point of View (POV)",
26
- "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot",
27
- "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"
28
- ]
29
- DEFAULT_SCENE_DURATION_SECS = 5
30
- DEFAULT_SHOT_TYPE = "Director's Choice"
31
- ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"] # For user selection
32
-
33
-
34
- # --- Global State Variables & API Key Setup ---
35
- def load_api_key(key_name_streamlit, key_name_env, service_name):
36
- key = None; secrets_available = hasattr(st, 'secrets')
37
- try:
38
- if secrets_available and key_name_streamlit in st.secrets:
39
- key = st.secrets[key_name_streamlit]
40
- if key: logger.info(f"{service_name} API Key found in Streamlit secrets.")
41
- except Exception as e: logger.warning(f"Could not access st.secrets for {key_name_streamlit}: {e}")
42
- if not key and key_name_env in os.environ:
43
- key = os.environ[key_name_env]
44
- if key: logger.info(f"{service_name} API Key found in environment variable.")
45
- if not key: logger.warning(f"{service_name} API Key NOT FOUND. Related features may be disabled or use fallbacks.")
46
- return key
47
-
48
- if 'services_initialized' not in st.session_state:
49
- logger.info("Initializing services and API keys for the first time this session...")
50
- st.session_state.GEMINI_API_KEY = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
51
- st.session_state.OPENAI_API_KEY = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
52
- st.session_state.ELEVENLABS_API_KEY = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
53
- st.session_state.PEXELS_API_KEY = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
54
- st.session_state.ELEVENLABS_VOICE_ID_CONFIG = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
55
- st.session_state.RUNWAY_API_KEY = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML") # Load Runway Key
56
-
57
- if not st.session_state.GEMINI_API_KEY:
58
- st.error("CRITICAL: Gemini API Key is essential and missing!"); logger.critical("Gemini API Key missing. Halting."); st.stop()
59
-
60
- try:
61
- st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY)
62
- logger.info("GeminiHandler initialized successfully.")
63
- except Exception as e: st.error(f"Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
64
-
65
- try:
66
- default_voice_id = "Rachel"
67
- configured_voice_id = st.session_state.ELEVENLABS_VOICE_ID_CONFIG or default_voice_id
68
- st.session_state.visual_engine = VisualEngine(
69
- output_dir="temp_cinegen_media",
70
- default_elevenlabs_voice_id=configured_voice_id
71
- )
72
- st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
73
- st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY, voice_id_from_secret=st.session_state.ELEVENLABS_VOICE_ID_CONFIG)
74
- st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
75
- st.session_state.visual_engine.set_runway_api_key(st.session_state.RUNWAY_API_KEY) # Set Runway Key
76
- logger.info("VisualEngine initialized and API keys set (or attempted).")
77
- except Exception as e:
78
- st.error(f"Failed to init VisualEngine or set its API keys: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True)
79
- st.warning("VisualEngine critical setup issue. Some features will be disabled.")
80
- st.session_state.services_initialized = True; logger.info("Service initialization sequence complete.")
81
-
82
- # Initialize other session state variables
83
- # <<< MODIFIED START >>> : Renamed generated_visual_paths to generated_scene_assets
84
- for key, default_val in [
85
- ('story_treatment_scenes', []), ('scene_prompts', []), ('generated_scene_assets', []), # Stores dicts: {'path':..., 'type':...}
86
- ('video_path', None), ('character_definitions', {}), ('global_style_additions', ""),
87
- ('overall_narration_audio_path', None), ('narration_script_display', "")
88
- ]:
89
- if key not in st.session_state: st.session_state[key] = default_val
90
-
91
- def initialize_new_project():
92
- st.session_state.story_treatment_scenes = []
93
- st.session_state.scene_prompts = [] # Stores DALL-E or Text-to-Video prompts
94
- st.session_state.generated_scene_assets = [] # Stores dicts {'path': ..., 'type': ..., 'error': ...}
95
- st.session_state.video_path, st.session_state.overall_narration_audio_path, st.session_state.narration_script_display = None, None, ""
96
- logger.info("New project initialized.")
97
- # <<< MODIFIED END >>>
98
-
99
- # <<< MODIFIED START >>> : Updated function to use generate_scene_asset
100
- def generate_asset_for_scene_core(scene_index, scene_data, version=1, user_selected_asset_type="Auto (Director's Choice)"):
101
- """
102
- Generates a visual asset (image or video clip) for a scene.
103
- Returns True on success, False on failure.
104
- """
105
- # Determine asset type: user override > Gemini suggestion > default to image
106
- final_asset_type_decision = "image" # Default
107
- gemini_suggested_type = scene_data.get('suggested_asset_type_감독', 'image').lower()
108
-
109
- if user_selected_asset_type == "Image":
110
- final_asset_type_decision = "image"
111
- elif user_selected_asset_type == "Video Clip":
112
- final_asset_type_decision = "video_clip"
113
- elif user_selected_asset_type == "Auto (Director's Choice)":
114
- final_asset_type_decision = gemini_suggested_type if gemini_suggested_type == "video_clip" else "image"
115
-
116
- generate_as_video = (final_asset_type_decision == "video_clip")
117
- prompt_text_for_visual = ""
118
-
119
- if generate_as_video:
120
- # Construct prompt for text-to-video (e.g., RunwayML)
121
- prompt_text_for_visual = construct_text_to_video_prompt(scene_data, st.session_state.character_definitions, st.session_state.global_style_additions)
122
- # Note: seed_image_path could be an enhancement if DALL-E image is generated first
123
- else:
124
- # Construct prompt for DALL-E (image)
125
- prompt_text_for_visual = construct_dalle_prompt(scene_data, st.session_state.character_definitions, st.session_state.global_style_additions)
126
-
127
- if not prompt_text_for_visual:
128
- logger.error(f"Visual prompt construction failed for scene {scene_data.get('scene_number', scene_index+1)} (Type: {final_asset_type_decision})")
129
- return False
130
-
131
- # Ensure session state lists are long enough
132
- while len(st.session_state.scene_prompts) <= scene_index: st.session_state.scene_prompts.append("")
133
- while len(st.session_state.generated_scene_assets) <= scene_index: st.session_state.generated_scene_assets.append(None)
134
-
135
- st.session_state.scene_prompts[scene_index] = prompt_text_for_visual
136
-
137
- # Filename base (extension will be added by visual_engine)
138
- filename_base = f"scene_{scene_data.get('scene_number', scene_index+1)}_asset_v{version}"
139
- runway_duration = scene_data.get('video_clip_duration_estimate_secs_감독', DEFAULT_SCENE_DURATION_SECS)
140
- if runway_duration <= 0 : runway_duration = DEFAULT_SCENE_DURATION_SECS # Ensure positive duration
141
-
142
- asset_result = st.session_state.visual_engine.generate_scene_asset(
143
- image_prompt_text=prompt_text_for_visual, # This is generic, used for DALL-E or T2V
144
- scene_data=scene_data,
145
- scene_identifier_filename_base=filename_base,
146
- generate_as_video_clip=generate_as_video,
147
- runway_target_duration=runway_duration
148
- # input_image_for_runway=None # TODO: Could be an enhancement
149
- )
150
-
151
- st.session_state.generated_scene_assets[scene_index] = asset_result # Store the whole dict
152
-
153
- if asset_result and not asset_result['error'] and asset_result.get('path') and os.path.exists(asset_result['path']):
154
- logger.info(f"Asset ({asset_result.get('type')}) generated for Scene {scene_data.get('scene_number', scene_index+1)}: {os.path.basename(asset_result['path'])}")
155
- return True
156
- else:
157
- err_msg = asset_result.get('error_message', 'Unknown error') if asset_result else 'Asset result is None'
158
- logger.warning(f"Asset generation FAILED for Scene {scene_data.get('scene_number', scene_index+1)}. Type attempted: {final_asset_type_decision}. Path was: {asset_result.get('path') if asset_result else 'N/A'}. Error: {err_msg}")
159
- # Store a failure state
160
- st.session_state.generated_scene_assets[scene_index] = {'path': None, 'type': 'none', 'error': True, 'error_message': err_msg, 'prompt_used': prompt_text_for_visual}
161
- return False
162
- # <<< MODIFIED END >>>
163
-
164
- # --- UI Sidebar ---
165
- with st.sidebar:
166
- st.title("🎬 CineGen AI Ultra+")
167
- st.markdown("### Creative Seed")
168
- user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main_v5")
169
- genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main_v5")
170
- mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main_v5")
171
- num_scenes = st.slider("Number of Key Scenes:", 1, 10, 2, key="num_scenes_main_v5")
172
- creative_guidance_options = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
173
- selected_creative_guidance_key = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options.keys()), key="creative_guidance_select_v5")
174
- actual_creative_guidance = creative_guidance_options[selected_creative_guidance_key]
175
-
176
- if st.button("🌌 Generate Cinematic Treatment", type="primary", key="generate_treatment_btn_v5", use_container_width=True):
177
- initialize_new_project()
178
- if not user_idea.strip(): st.warning("Please provide a story idea.")
179
- else:
180
- with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status:
 
 
 
181
  try:
182
- status.write("Phase 1: Gemini crafting cinematic treatment... 📜"); logger.info("Phase 1: Cinematic Treatment Gen.")
183
- treatment_prompt = create_cinematic_treatment_prompt(user_idea, genre, mood, num_scenes, actual_creative_guidance)
184
- treatment_result_json_raw = st.session_state.gemini_handler.generate_story_breakdown(treatment_prompt) # Expect list of dicts
185
- if not isinstance(treatment_result_json_raw, list) or not treatment_result_json_raw: raise ValueError("Gemini returned invalid scene list format.")
186
-
187
- processed_scenes = []
188
- for scene_data_from_gemini in treatment_result_json_raw:
189
- scene_data_from_gemini['user_shot_type'] = scene_data_from_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE) # Default from Gemini's suggestion
190
- scene_data_from_gemini['user_scene_duration_secs'] = scene_data_from_gemini.get('video_clip_duration_estimate_secs_감독', DEFAULT_SCENE_DURATION_SECS)
191
- if scene_data_from_gemini['user_scene_duration_secs'] <=0: scene_data_from_gemini['user_scene_duration_secs'] = DEFAULT_SCENE_DURATION_SECS
192
- scene_data_from_gemini['user_selected_asset_type'] = "Auto (Director's Choice)" # Default for UI
193
- processed_scenes.append(scene_data_from_gemini)
194
- st.session_state.story_treatment_scenes = processed_scenes
195
-
196
- num_gen_scenes = len(st.session_state.story_treatment_scenes)
197
- # <<< MODIFIED START >>>
198
- st.session_state.scene_prompts = [""]*num_gen_scenes
199
- st.session_state.generated_scene_assets = [None]*num_gen_scenes # Initialize list for asset dicts
200
- # <<< MODIFIED END >>>
201
- logger.info(f"Phase 1 complete. {num_gen_scenes} scenes."); status.update(label="Treatment complete! Generating visuals...", state="running")
202
-
203
- status.write("Phase 2: Creating visual assets (Image/Video)... 🖼️🎬"); logger.info("Phase 2: Visual Asset Gen.")
204
- visual_successes = 0
205
- for i, sc_data in enumerate(st.session_state.story_treatment_scenes):
206
- sc_num_log = sc_data.get('scene_number', i+1)
207
- status.write(f" Asset for Scene {sc_num_log}..."); logger.info(f" Processing asset for Scene {sc_num_log}.")
208
- # <<< MODIFIED START >>> : Calling new function
209
- if generate_asset_for_scene_core(i, sc_data, version=1): # Default to 'Auto' asset type for initial gen
210
- visual_successes += 1
211
- # <<< MODIFIED END >>>
212
-
213
- current_status_label_ph2 = "Visual assets ready! "
214
- next_step_state = "running"
215
- if visual_successes == 0 and num_gen_scenes > 0:
216
- logger.error("Visual asset gen failed for all scenes."); current_status_label_ph2 = "Asset gen FAILED for all scenes."; next_step_state="error";
217
- status.update(label=current_status_label_ph2, state=next_step_state, expanded=True); st.stop()
218
- elif visual_successes < num_gen_scenes:
219
- logger.warning(f"Assets partially generated ({visual_successes}/{num_gen_scenes})."); current_status_label_ph2 = f"Assets partially generated ({visual_successes}/{num_gen_scenes}). "
220
- status.update(label=f"{current_status_label_ph2}Generating narration script...", state=next_step_state)
221
- if next_step_state == "error": st.stop()
222
-
223
- status.write("Phase 3: Generating narration script..."); logger.info("Phase 3: Narration Script Gen.")
224
- voice_style_for_prompt = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
225
- narr_prompt = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood, genre, voice_style_for_prompt)
226
- st.session_state.narration_script_display = st.session_state.gemini_handler.generate_image_prompt(narr_prompt) # This generates a string
227
- logger.info("Narration script generated."); status.update(label="Narration script ready! Synthesizing voice...", state="running")
228
-
229
- status.write("Phase 4: Synthesizing voice (ElevenLabs)... 🔊"); logger.info("Phase 4: Voice Synthesis.")
230
- st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(st.session_state.narration_script_display)
231
-
232
- final_label = "All components ready! Storyboard below. 🚀"
233
- final_state_val = "complete"
234
- if not st.session_state.overall_narration_audio_path:
235
- final_label = f"{current_status_label_ph2}Storyboard ready (Voiceover skipped or failed)."
236
- logger.warning("Voiceover was skipped or failed.")
237
- else: logger.info("Voiceover generated successfully.")
238
- status.update(label=final_label, state=final_state_val, expanded=False)
239
-
240
- except ValueError as ve: logger.error(f"ValueError: {ve}", exc_info=True); status.update(label=f"Input or Gemini response error: {ve}", state="error", expanded=True);
241
- except Exception as e: logger.error(f"Unhandled Exception: {e}", exc_info=True); status.update(label=f"An unexpected error occurred: {e}", state="error", expanded=True);
242
-
243
- st.markdown("---"); st.markdown("### Fine-Tuning Options")
244
- # ... (Character, Global Style, Voice expanders - no changes needed here for this fix) ...
245
- with st.expander("Define Characters", expanded=False):
246
- char_name = st.text_input("Character Name", key="char_name_adv_ultra_v5"); char_desc = st.text_area("Visual Description", key="char_desc_adv_ultra_v5", height=100, placeholder="e.g., Jax: rugged male astronaut...")
247
- if st.button("Save Character", key="add_char_adv_ultra_v5"):
248
- if char_name and char_desc: st.session_state.character_definitions[char_name.strip().lower()] = char_desc.strip(); st.success(f"Char '{char_name.strip()}' saved.")
249
- else: st.warning("Name and description needed.")
250
- if st.session_state.character_definitions: st.caption("Current Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.character_definitions.items()]
251
-
252
- with st.expander("Global Style Overrides", expanded=False):
253
- presets = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir, extreme detail, deep dynamic shadows, complex reflections on wet surfaces, cinematic film grain, desaturated palette with isolated vibrant neon accents (e.g. red, cyan), anamorphic lens distortion, atmospheric haze.", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy elements, painterly with photorealistic details, impossible architecture, bioluminescent flora, otherworldly color palette (e.g., magenta skies, turquoise rivers), style of Roger Dean meets Zdzisław Beksiński.", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi film aesthetic, tangible practical effects look, subtle light leaks, lens flares, warm filmic tones mixed with cool blues, detailed retro-futuristic technology with chunky buttons and CRT screens."}
254
- sel_preset = st.selectbox("Base Style Preset:", options=list(presets.keys()), key="style_preset_adv_ultra_v5")
255
- custom_kw = st.text_area("Additional Custom Style Keywords:", key="custom_style_adv_ultra_v5", height=80, placeholder="e.g., 'Dutch angle'")
256
- cur_style = st.session_state.global_style_additions
257
- if st.button("Apply Global Styles", key="apply_styles_adv_ultra_v5"):
258
- final_s = presets[sel_preset];
259
- if custom_kw.strip(): final_s = f"{final_s}, {custom_kw.strip()}" if final_s else custom_kw.strip()
260
- st.session_state.global_style_additions = final_s.strip(); cur_style = final_s.strip()
261
- if cur_style: st.success("Global styles applied!")
262
- else: st.info("Global style additions cleared.")
263
- if cur_style: st.caption(f"Active global styles: \"{cur_style}\"")
264
-
265
- with st.expander("Voice & Narration Style", expanded=False):
266
- default_voice_from_engine = "Rachel"
267
- if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine:
268
- default_voice_from_engine = st.session_state.visual_engine.elevenlabs_voice_id
269
-
270
- user_voice_id_override = st.text_input(
271
- "ElevenLabs Voice ID (optional override):",
272
- value=default_voice_from_engine,
273
- key="el_voice_id_override_v5",
274
- help=f"Defaulting to '{default_voice_from_engine}' from secrets/config. Enter a specific Voice ID from your ElevenLabs account to override."
275
- )
276
- prompt_v_styles = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
277
- sel_prompt_v_style_key = st.selectbox("Narration Script Style:", list(prompt_v_styles.keys()), key="narr_style_sel_v5", index=0)
278
-
279
- if st.button("Set Narrator Voice & Style", key="set_voice_btn_ultra_v5"):
280
- final_voice_id_to_use = user_voice_id_override.strip()
281
- if not final_voice_id_to_use:
282
- final_voice_id_to_use = st.session_state.get("ELEVENLABS_VOICE_ID_CONFIG", "Rachel")
283
-
284
- if hasattr(st.session_state, 'visual_engine'):
285
- st.session_state.visual_engine.elevenlabs_voice_id = final_voice_id_to_use
286
- st.session_state.selected_voice_style_for_generation = prompt_v_styles[sel_prompt_v_style_key]
287
- st.success(f"Narrator Voice ID set to: {final_voice_id_to_use}. Script Style: {sel_prompt_v_style_key}")
288
- logger.info(f"User updated ElevenLabs Voice ID to: {final_voice_id_to_use}, Script Style: {sel_prompt_v_style_key}")
289
-
290
-
291
- # --- Main Content Area ---
292
- st.header("🎬 Cinematic Storyboard & Treatment")
293
- if st.session_state.narration_script_display:
294
- with st.expander("📜 View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.narration_script_display}_")
295
-
296
- if not st.session_state.story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
297
- else:
298
- for i_main, scene_content_display in enumerate(st.session_state.story_treatment_scenes):
299
- scene_n = scene_content_display.get('scene_number', i_main + 1); scene_t = scene_content_display.get('scene_title', 'Untitled')
300
- key_base = f"s{scene_n}_{''.join(filter(str.isalnum, scene_t[:10]))}_v5_{i_main}"
301
- if "director_note" in scene_content_display and scene_content_display['director_note']: st.info(f"🎬 Director Note S{scene_n}: {scene_content_display['director_note']}")
302
- st.subheader(f"SCENE {scene_n}: {scene_t.upper()}"); col_d, col_v = st.columns([0.45, 0.55])
303
-
304
- with col_d: # Treatment and Controls Column
305
- with st.expander("📝 Scene Treatment & Controls", expanded=True):
306
- # Display scene textual details (emotional_beat, setting, etc.)
307
- st.markdown(f"**Beat:** {scene_content_display.get('emotional_beat', 'N/A')}")
308
- st.markdown(f"**Setting:** {scene_content_display.get('setting_description', 'N/A')}")
309
- st.markdown(f"**Chars:** {', '.join(scene_content_display.get('characters_involved', ['N/A']))}")
310
- st.markdown(f"**Focus Moment:** _{scene_content_display.get('character_focus_moment', 'N/A')}_")
311
- st.markdown(f"**Plot Beat:** {scene_content_display.get('key_plot_beat', 'N/A')}")
312
- st.markdown(f"**Dialogue Hook:** `\"{scene_content_display.get('suggested_dialogue_hook', '...')}\"`")
313
- st.markdown("---")
314
- st.markdown(f"**Dir. Visual Style:** _{scene_content_display.get('PROACTIVE_visual_style_감독', 'N/A')}_")
315
- st.markdown(f"**Dir. Camera:** _{scene_content_display.get('PROACTIVE_camera_work_감독', 'N/A')}_")
316
- st.markdown(f"**Dir. Sound:** _{scene_content_display.get('PROACTIVE_sound_design_감독', 'N/A')}_")
317
- st.markdown("---")
318
- st.markdown("##### Shot, Pacing & Asset Controls")
319
-
320
- # User Shot Type (Camera Angle)
321
- current_shot_type = st.session_state.story_treatment_scenes[i_main].get('user_shot_type', DEFAULT_SHOT_TYPE)
322
- try: shot_type_index = SHOT_TYPES_OPTIONS.index(current_shot_type)
323
- except ValueError: shot_type_index = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
324
- new_shot_type = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=shot_type_index, key=f"shot_type_widget_{key_base}")
325
- if new_shot_type != current_shot_type:
326
- st.session_state.story_treatment_scenes[i_main]['user_shot_type'] = new_shot_type
327
- # Consider if a re-run is needed or if DALL-E prompt should be updated based on this
328
-
329
- # User Scene Duration
330
- current_duration = st.session_state.story_treatment_scenes[i_main].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
331
- new_duration = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=current_duration, step=1, key=f"duration_widget_{key_base}")
332
- if new_duration != current_duration:
333
- st.session_state.story_treatment_scenes[i_main]['user_scene_duration_secs'] = new_duration
334
-
335
- # <<< MODIFIED START >>> : User Asset Type Selection
336
- current_user_asset_type = st.session_state.story_treatment_scenes[i_main].get('user_selected_asset_type', "Auto (Director's Choice)")
337
- try: asset_type_idx = ASSET_TYPE_OPTIONS.index(current_user_asset_type)
338
- except ValueError: asset_type_idx = 0 # Default to Auto
339
- new_user_asset_type = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=asset_type_idx, key=f"asset_type_sel_{key_base}",
340
- help="Choose 'Image' or 'Video Clip'. 'Auto' uses Gemini's suggestion.")
341
- if new_user_asset_type != current_user_asset_type:
342
- st.session_state.story_treatment_scenes[i_main]['user_selected_asset_type'] = new_user_asset_type
343
- # This change will be picked up by regeneration buttons
344
- # <<< MODIFIED END >>>
345
-
346
- st.markdown("---")
347
- # Display generated prompt for the asset
348
- current_prompt_for_asset = st.session_state.scene_prompts[i_main] if i_main < len(st.session_state.scene_prompts) else None
349
- if current_prompt_for_asset:
350
- with st.popover("👁️ View Asset Generation Prompt"):
351
- st.markdown(f"**Prompt used for current asset:**"); st.code(current_prompt_for_asset, language='text')
352
-
353
- pexels_q = scene_content_display.get('pexels_search_query_감독', None)
354
- if pexels_q: st.caption(f"Pexels Fallback Query: `{pexels_q}`")
355
-
356
- with col_v: # Visuals Column
357
- # <<< MODIFIED START >>> : Display logic for different asset types
358
- current_asset_data = st.session_state.generated_scene_assets[i_main] if i_main < len(st.session_state.generated_scene_assets) else None
359
- if current_asset_data and not current_asset_data.get('error') and current_asset_data.get('path') and os.path.exists(current_asset_data['path']):
360
- asset_path = current_asset_data['path']
361
- asset_type = current_asset_data.get('type', 'image') # Default to image if type missing
362
  if asset_type == 'image':
363
- st.image(asset_path, caption=f"Scene {scene_n} ({asset_type}): {scene_t}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364
  elif asset_type == 'video':
 
365
  try:
366
- with open(asset_path, 'rb') as vf: video_bytes = vf.read()
367
- st.video(video_bytes, format="video/mp4", start_time=0)
368
- st.caption(f"Scene {scene_n} ({asset_type}): {scene_t}")
369
- except Exception as e_vid:
370
- st.error(f"Error displaying video {asset_path}: {e_vid}")
371
- logger.error(f"Error displaying video {asset_path}: {e_vid}", exc_info=True)
372
- else:
373
- st.warning(f"Unknown asset type '{asset_type}' for Scene {scene_n}.")
374
- else: # No asset, or error during generation
375
- if st.session_state.story_treatment_scenes: # Check if treatment exists
376
- error_msg = current_asset_data.get('error_message', 'Visual pending or failed.') if current_asset_data else 'Visual pending or failed.'
377
- st.caption(error_msg)
378
- # <<< MODIFIED END >>>
379
-
380
- with st.popover(f"✏️ Edit Scene {scene_n} Treatment"):
381
- fb_script = st.text_area("Changes to treatment:", key=f"treat_fb_{key_base}", height=150)
382
- if st.button(f"🔄 Update Scene {scene_n} Treatment", key=f"regen_treat_btn_{key_base}"):
383
- if fb_script:
384
- with st.status(f"Updating Scene {scene_n} Treatment & Asset...", expanded=True) as s_treat_regen:
385
- # Preserve user's shot type, duration, and asset type choices
386
- user_shot_type = st.session_state.story_treatment_scenes[i_main]['user_shot_type']
387
- user_duration = st.session_state.story_treatment_scenes[i_main]['user_scene_duration_secs']
388
- user_asset_type_choice = st.session_state.story_treatment_scenes[i_main]['user_selected_asset_type']
389
-
390
- prompt_text = create_scene_regeneration_prompt(scene_content_display, fb_script, st.session_state.story_treatment_scenes)
391
- try:
392
- updated_sc_data_from_gemini = st.session_state.gemini_handler.regenerate_scene_script_details(prompt_text)
393
- # Merge, but prioritize user's UI choices for duration/shot/asset type
394
- updated_sc_data = {**updated_sc_data_from_gemini} # Start with Gemini's new script
395
- updated_sc_data['user_shot_type'] = user_shot_type
396
- updated_sc_data['user_scene_duration_secs'] = user_duration
397
- updated_sc_data['user_selected_asset_type'] = user_asset_type_choice
398
- # Gemini might re-suggest asset type/duration, but user's direct settings take precedence for next gen
399
- # We can log if Gemini's suggestion differs from user's explicit choice.
400
- if updated_sc_data.get('suggested_asset_type_감독') != user_asset_type_choice and user_asset_type_choice != "Auto (Director's Choice)":
401
- logger.info(f"Scene {scene_n}: User asset choice '{user_asset_type_choice}' overrides Gemini suggestion '{updated_sc_data.get('suggested_asset_type_감독')}'.")
402
-
403
-
404
- st.session_state.story_treatment_scenes[i_main] = updated_sc_data
405
- s_treat_regen.update(label="Treatment updated! Regenerating asset...", state="running")
406
-
407
- v_num = 1
408
- if current_asset_data and current_asset_data.get('path') and os.path.exists(current_asset_data['path']):
409
- try: b,_=os.path.splitext(os.path.basename(current_asset_data['path'])); v_num = int(b.split('_v')[-1])+1 if '_v' in b else 2
410
- except: v_num = 2
411
- else: v_num = 1
412
- # <<< MODIFIED START >>> : Call new function, pass user_selected_asset_type
413
- if generate_asset_for_scene_core(i_main, updated_sc_data, version=v_num, user_selected_asset_type=user_asset_type_choice):
414
- s_treat_regen.update(label="Treatment & Asset Updated! 🎉", state="complete", expanded=False)
415
- else: s_treat_regen.update(label="Treatment updated, asset failed.", state="complete", expanded=False)
416
- # <<< MODIFIED END >>>
417
- st.rerun()
418
- except Exception as e_regen: s_treat_regen.update(label=f"Error: {e_regen}", state="error"); logger.error(f"Scene treatment regen error: {e_regen}", exc_info=True)
419
- else: st.warning("Please provide feedback.")
420
-
421
- with st.popover(f"🎨 Edit Scene {scene_n} Visual Prompt"):
422
- prompt_to_edit = st.session_state.scene_prompts[i_main] if i_main < len(st.session_state.scene_prompts) else "No prompt generated yet."
423
- st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit, language='text')
424
- fb_visual = st.text_area("Changes for asset generation prompt:", key=f"visual_fb_{key_base}", height=150)
425
- if st.button(f"🔄 Update Scene {scene_n} Asset", key=f"regen_visual_btn_{key_base}"):
426
- if fb_visual:
427
- with st.status(f"Refining prompt & asset for Scene {scene_n}...", expanded=True) as s_visual_regen:
428
- user_asset_type_choice = st.session_state.story_treatment_scenes[i_main]['user_selected_asset_type']
429
- is_video_prompt = (user_asset_type_choice == "Video Clip") or \
430
- (user_asset_type_choice == "Auto (Director's Choice)" and \
431
- scene_content_display.get('suggested_asset_type_감독') == 'video_clip')
432
-
433
- # Note: Visual regeneration prompt is primarily for DALL-E (images).
434
- # For video, we might need a different refinement strategy or just regenerate with the same prompt construction.
435
- # For simplicity here, if it's a video, we'll regenerate the prompt using standard construction.
436
- # If it's an image, we use Gemini to refine the DALL-E prompt.
437
- new_asset_gen_prompt = ""
438
- if not is_video_prompt : # Refining an image prompt
439
- ref_req_prompt_for_gemini = create_visual_regeneration_prompt(prompt_to_edit, fb_visual, scene_content_display,
440
- st.session_state.character_definitions, st.session_state.global_style_additions)
441
- try:
442
- new_asset_gen_prompt = st.session_state.gemini_handler.refine_image_prompt_from_feedback(ref_req_prompt_for_gemini)
443
- st.session_state.scene_prompts[i_main] = new_asset_gen_prompt
444
- s_visual_regen.update(label="Image prompt refined by Gemini! Regenerating asset...", state="running")
445
- except Exception as e_gemini_refine:
446
- s_visual_regen.update(label=f"Error refining prompt: {e_gemini_refine}", state="error");
447
- logger.error(f"Visual prompt refinement error: {e_gemini_refine}", exc_info=True)
448
- continue # Skip asset generation if prompt refinement failed
449
- else: # For video, or auto choosing video, reconstruct the prompt
450
- new_asset_gen_prompt = construct_text_to_video_prompt(scene_content_display, st.session_state.character_definitions, st.session_state.global_style_additions)
451
- st.session_state.scene_prompts[i_main] = new_asset_gen_prompt
452
- s_visual_regen.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
453
-
454
-
455
- v_num = 1
456
- if current_asset_data and current_asset_data.get('path') and os.path.exists(current_asset_data['path']):
457
- try: b,_=os.path.splitext(os.path.basename(current_asset_data['path'])); v_num = int(b.split('_v')[-1])+1 if '_v' in b else 2
458
- except: v_num=2
459
- else: v_num = 1
460
-
461
- # <<< MODIFIED START >>> : Call new function
462
- # Pass the current scene_content_display as its prompt might have changed.
463
- # User asset type choice from the scene data for consistency
464
- if generate_asset_for_scene_core(i_main, st.session_state.story_treatment_scenes[i_main], version=v_num, user_selected_asset_type=user_asset_type_choice):
465
- s_visual_regen.update(label="Asset Updated! 🎉", state="complete", expanded=False)
466
- else: s_visual_regen.update(label="Prompt updated, asset regeneration failed.", state="complete", expanded=False)
467
- # <<< MODIFIED END >>>
468
- st.rerun()
469
- else: st.warning("Please provide feedback.")
470
- st.markdown("---")
471
-
472
- # Video Assembly Button
473
- # <<< MODIFIED START >>> : Check generated_scene_assets and use its data
474
- if st.session_state.story_treatment_scenes and any(asset_info and not asset_info.get('error') and asset_info.get('path') for asset_info in st.session_state.generated_scene_assets if asset_info is not None):
475
- if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_ultra_video_btn_v5", type="primary", use_container_width=True):
476
- with st.status("Assembling Ultra Animatic...", expanded=True) as status_vid:
477
- assets_for_video_assembly = []
478
- for i_v, sc_c in enumerate(st.session_state.story_treatment_scenes):
479
- asset_info = st.session_state.generated_scene_assets[i_v] if i_v < len(st.session_state.generated_scene_assets) else None
480
- if asset_info and not asset_info.get('error') and asset_info.get('path') and os.path.exists(asset_info['path']):
481
- assets_for_video_assembly.append({
482
- 'path': asset_info['path'],
483
- 'type': asset_info.get('type', 'image'), # Default to image if type missing
484
- 'scene_num': sc_c.get('scene_number', i_v + 1),
485
- 'key_action': sc_c.get('key_plot_beat', ''),
486
- 'duration': sc_c.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS) # Use user-set duration
487
- })
488
- status_vid.write(f"Adding Scene {sc_c.get('scene_number', i_v + 1)} ({asset_info.get('type')}).")
489
- else:
490
- logger.warning(f"Skipping Scene {sc_c.get('scene_number', i_v+1)} for video: No valid asset.")
491
-
492
- if assets_for_video_assembly:
493
- status_vid.write("Calling video engine...");
494
- st.session_state.video_path = st.session_state.visual_engine.assemble_animatic_from_assets( # Changed method name
495
- asset_data_list=assets_for_video_assembly, # Pass the list of asset dicts
496
- overall_narration_path=st.session_state.overall_narration_audio_path,
497
- output_filename="cinegen_ultra_animatic.mp4",
498
- fps=24
499
- )
500
- if st.session_state.video_path and os.path.exists(st.session_state.video_path):
501
- status_vid.update(label="Ultra animatic assembled! 🎉", state="complete", expanded=False); st.balloons()
502
- else:
503
- status_vid.update(label="Video assembly failed. Check logs.", state="error", expanded=False); logger.error("Video assembly returned None or file does not exist.")
504
- else:
505
- status_vid.update(label="No valid assets for video assembly.", state="error", expanded=False); logger.warning("No valid assets found for video assembly.")
506
- elif st.session_state.story_treatment_scenes: st.info("Generate visual assets before assembling video.")
507
- # <<< MODIFIED END >>>
508
-
509
- if st.session_state.video_path and os.path.exists(st.session_state.video_path):
510
- st.header("🎬 Generated Cinematic Animatic");
511
  try:
512
- with open(st.session_state.video_path, 'rb') as vf_obj: video_bytes = vf_obj.read()
513
- st.video(video_bytes, format="video/mp4")
514
- st.download_button(label="Download Ultra Animatic", data=video_bytes, file_name=os.path.basename(st.session_state.video_path), mime="video/mp4", use_container_width=True, key="download_ultra_video_btn_v5" )
515
- except Exception as e: st.error(f"Error displaying video: {e}"); logger.error(f"Error displaying video: {e}", exc_info=True)
516
-
517
- # --- Footer ---
518
- st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # core/visual_engine.py
2
+ from PIL import Image, ImageDraw, ImageFont, ImageOps
3
+ # --- MONKEY PATCH FOR Image.ANTIALIAS ---
4
+ try:
5
+ if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
6
+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
7
+ elif hasattr(Image, 'LANCZOS'): # Pillow 8
8
+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
9
+ elif not hasattr(Image, 'ANTIALIAS'):
10
+ print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. Video effects might fail.")
11
+ except Exception as e_mp: print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}")
12
+ # --- END MONKEY PATCH ---
13
+
14
+ from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
15
+ CompositeVideoClip, AudioFileClip)
16
+ import moviepy.video.fx.all as vfx
17
+ import numpy as np
18
  import os
19
+ import openai
20
+ import requests
21
+ import io
22
+ import time
23
+ import random
24
  import logging
25
 
 
 
 
26
  logger = logging.getLogger(__name__)
27
+ logger.setLevel(logging.INFO)
28
+
29
+ # --- ElevenLabs Client Import ---
30
+ ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
31
+ try:
32
+ from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
33
+ from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
34
+ ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
35
+ ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
36
+ except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
37
+
38
+ # --- RunwayML Client Import (Placeholder) ---
39
+ RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None
40
+ try:
41
+ logger.info("RunwayML SDK import is a placeholder.")
42
+ except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
43
+ except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
44
+
45
+
46
+ class VisualEngine:
47
+ def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
48
+ self.output_dir = output_dir
49
+ os.makedirs(self.output_dir, exist_ok=True)
50
+ self.font_filename = "DejaVuSans-Bold.ttf"
51
+ font_paths_to_try = [
52
+ self.font_filename,
53
+ f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
54
+ f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
55
+ f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf",
56
+ f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"
57
+ ]
58
+ self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
59
+ self.font_size_pil = 20
60
+ self.video_overlay_font_size = 30
61
+ self.video_overlay_font_color = 'white'
62
+ self.video_overlay_font = 'DejaVu-Sans-Bold'
63
 
64
+ try:
65
+ self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
66
+ if self.font_path_pil: logger.info(f"Pillow font loaded: {self.font_path_pil}.")
67
+ else: logger.warning("Using default Pillow font."); self.font_size_pil = 10
68
+ except IOError as e_font: logger.error(f"Pillow font loading IOError: {e_font}. Using default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
69
+
70
+ self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
71
+ self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
72
+ self.video_frame_size = (1280, 720)
73
+ self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
74
+ self.elevenlabs_voice_id = default_elevenlabs_voice_id
75
+ if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
76
+ else: self.elevenlabs_voice_settings = None
77
+ self.pexels_api_key = None; self.USE_PEXELS = False
78
+ self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
79
+ logger.info("VisualEngine initialized.")
80
+
81
+ def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
82
+ def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
83
+ self.elevenlabs_api_key=api_key
84
+ if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
85
+ if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
86
+ try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
87
+ except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
88
+ else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
89
+ def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
90
+ def set_runway_api_key(self, k):
91
+ self.runway_api_key = k
92
+ if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient:
93
+ try: self.USE_RUNWAYML = True; logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
94
+ except Exception as e: logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True); self.USE_RUNWAYML = False
95
+ elif k: self.USE_RUNWAYML = True; logger.info("RunwayML API Key set (direct API or placeholder).")
96
+ else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
97
+
98
+ def _get_text_dimensions(self, text_content, font_obj):
99
+ default_line_height = getattr(font_obj, 'size', self.font_size_pil)
100
+ if not text_content: return 0, default_line_height
101
+ try:
102
+ if hasattr(font_obj, 'getbbox'):
103
+ bbox = font_obj.getbbox(text_content); width = bbox[2] - bbox[0]; height = bbox[3] - bbox[1]
104
+ return width, height if height > 0 else default_line_height
105
+ elif hasattr(font_obj, 'getsize'):
106
+ width, height = font_obj.getsize(text_content)
107
+ return width, height if height > 0 else default_line_height
108
+ else: return int(len(text_content) * default_line_height * 0.6), int(default_line_height * 1.2)
109
+ except Exception as e: logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}"); return int(len(text_content) * self.font_size_pil * 0.6),int(self.font_size_pil * 1.2)
110
+
111
+ def _create_placeholder_image_content(self, text_description, filename, size=None):
112
+ if size is None: size = self.video_frame_size
113
+ img = Image.new('RGB', size, color=(20, 20, 40)); draw = ImageDraw.Draw(img)
114
+ padding = 25; max_text_width = size[0] - (2 * padding); lines = []
115
+ if not text_description: text_description = "(Placeholder: No text description provided)"
116
+ words = text_description.split(); current_line = ""
117
+ for word in words:
118
+ test_line = current_line + word + " "; line_width_test, _ = self._get_text_dimensions(test_line.strip(), self.font)
119
+ if line_width_test <= max_text_width: current_line = test_line
120
+ else:
121
+ if current_line.strip(): lines.append(current_line.strip())
122
+ word_width, _ = self._get_text_dimensions(word, self.font)
123
+ if word_width > max_text_width:
124
+ avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10
125
+ chars_that_fit = int(max_text_width / avg_char_w)
126
+ lines.append(word[:chars_that_fit-3] + "..." if len(word) > chars_that_fit else word) # Corrected line
127
+ current_line = ""
128
+ else: current_line = word + " "
129
+ if current_line.strip(): lines.append(current_line.strip())
130
+ if not lines and text_description:
131
+ avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10; chars_that_fit = int(max_text_width / avg_char_w)
132
+ lines.append(text_description[:chars_that_fit-3] + "..." if len(text_description) > chars_that_fit else text_description)
133
+ elif not lines: lines.append("(Placeholder Text Error)")
134
+ _, single_line_height = self._get_text_dimensions("Ay", self.font); single_line_height = single_line_height if single_line_height > 0 else (self.font_size_pil + 2)
135
+ line_spacing = 2; max_lines_to_display = min(len(lines), (size[1]-(2*padding))//(single_line_height+line_spacing)) if single_line_height > 0 else 1
136
+ if max_lines_to_display <= 0: max_lines_to_display = 1
137
+ total_text_block_height = max_lines_to_display * single_line_height + (max_lines_to_display-1)*line_spacing
138
+ y_text_start = padding + (size[1]-(2*padding)-total_text_block_height)/2.0; current_y = y_text_start
139
+ for i in range(max_lines_to_display):
140
+ line_content = lines[i]; line_width_actual, _ = self._get_text_dimensions(line_content, self.font)
141
+ x_text = max(padding, (size[0]-line_width_actual)/2.0)
142
+ draw.text((x_text, current_y), line_content, font=self.font, fill=(200,200,180)); current_y += single_line_height + line_spacing
143
+ if i==6 and max_lines_to_display > 7 and len(lines) > max_lines_to_display:
144
+ ellipsis_width, _ = self._get_text_dimensions("...",self.font); x_ellipsis = max(padding, (size[0]-ellipsis_width)/2.0)
145
+ draw.text((x_ellipsis, current_y), "...", font=self.font, fill=(200,200,180)); break
146
+ filepath = os.path.join(self.output_dir, filename)
147
+ try: img.save(filepath); return filepath
148
+ except Exception as e: logger.error(f"Error saving placeholder image {filepath}: {e}", exc_info=True); return None
149
+
150
+ def _search_pexels_image(self, query, output_filename_base):
151
+ if not self.USE_PEXELS or not self.pexels_api_key: return None
152
+ headers = {"Authorization": self.pexels_api_key}
153
+ params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"}
154
+ base_name, _ = os.path.splitext(output_filename_base)
155
+ pexels_filename = base_name + f"_pexels_{random.randint(1000,9999)}.jpg" # Use base_name
156
+ filepath = os.path.join(self.output_dir, pexels_filename)
157
+ try:
158
+ logger.info(f"Pexels search: '{query}'")
159
+ effective_query = " ".join(query.split()[:5])
160
+ params["query"] = effective_query
161
+ response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
162
+ response.raise_for_status()
163
+ data = response.json() # This line and subsequent ones are now correctly in the try block
164
+ if data.get("photos") and len(data["photos"]) > 0:
165
+ photo_details = data["photos"][0]
166
+ photo_url = photo_details["src"]["large2x"]
167
+ logger.info(f"Downloading Pexels image from: {photo_url}")
168
+ image_response = requests.get(photo_url, timeout=60)
169
+ image_response.raise_for_status()
170
+ img_data = Image.open(io.BytesIO(image_response.content))
171
+ if img_data.mode != 'RGB':
172
+ logger.debug(f"Pexels image mode is {img_data.mode}, converting to RGB.")
173
+ img_data = img_data.convert('RGB')
174
+ img_data.save(filepath)
175
+ logger.info(f"Pexels image saved successfully: {filepath}")
176
+ return filepath
177
+ else:
178
+ logger.info(f"No photos found on Pexels for query: '{effective_query}'")
179
+ return None
180
+ except requests.exceptions.RequestException as e_req: logger.error(f"Pexels request error for query '{query}': {e_req}", exc_info=True)
181
+ except json.JSONDecodeError as e_json: logger.error(f"Pexels JSON decode error for query '{query}': {e_json}", exc_info=True)
182
+ except Exception as e: logger.error(f"General Pexels error for query '{query}': {e}", exc_info=True)
183
+ return None
184
+
185
+ def _generate_video_clip_with_runwayml(self, pt, iip, sifnb, tds=5): # Renamed for clarity
186
+ if not self.USE_RUNWAYML or not self.runway_api_key: logger.warning("RunwayML disabled."); return None
187
+ if not iip or not os.path.exists(iip): logger.error(f"Runway Gen-4 needs input image. Path invalid: {iip}"); return None
188
+ runway_dur = 10 if tds > 7 else 5
189
+ ovfn = sifnb.replace(".png", f"_runway_gen4_d{runway_dur}s.mp4") # sifnb should be base name
190
+ ovfp = os.path.join(self.output_dir, ovfn)
191
+ logger.info(f"Runway Gen-4 (Placeholder) img: {os.path.basename(iip)}, motion: '{pt[:100]}...', dur: {runway_dur}s")
192
+ logger.warning("Using PLACEHOLDER video for Runway Gen-4.")
193
+ img_clip=None; txt_c=None; final_ph_clip=None
194
+ try:
195
+ img_clip = ImageClip(iip).set_duration(runway_dur)
196
+ txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(iip)}\nMotion: {pt[:50]}..."
197
+ txt_c = TextClip(txt, fontsize=24,color='white',font=self.video_overlay_font,bg_color='rgba(0,0,0,0.5)',size=(self.video_frame_size[0]*0.8,None),method='caption').set_duration(runway_dur).set_position('center')
198
+ final_ph_clip = CompositeVideoClip([img_clip, txt_c], size=img_clip.size)
199
+ final_ph_clip.write_videofile(ovfp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2)
200
+ logger.info(f"Runway Gen-4 placeholder video: {ovfp}"); return ovfp
201
+ except Exception as e: logger.error(f"Runway Gen-4 placeholder error: {e}",exc_info=True); return None
202
+ finally:
203
+ if img_clip and hasattr(img_clip,'close'): img_clip.close()
204
+ if txt_c and hasattr(txt_c,'close'): txt_c.close()
205
+ if final_ph_clip and hasattr(final_ph_clip,'close'): final_ph_clip.close()
206
+
207
+ def _create_placeholder_video_content(self, td, fn, dur=4, sz=None):
208
+ if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
209
+ try: tc = TextClip(td, fontsize=50,color='white',font=self.video_overlay_font,bg_color='black',size=sz,method='caption').set_duration(dur)
210
+ tc.write_videofile(fp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
211
+ except Exception as e: logger.error(f"Generic placeholder error {fp}: {e}",exc_info=True); return None
212
+ finally:
213
+ if tc and hasattr(tc,'close'): tc.close()
214
+
215
+ def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
216
+ scene_data, scene_identifier_filename_base, # This is base_name, no ext
217
+ generate_as_video_clip=False, runway_target_duration=5):
218
+ base_name = scene_identifier_filename_base # Already a base name
219
+ asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
220
+ input_image_for_runway_path = None
221
+ image_filename_for_base = base_name + "_base_image.png" # Specific name for base image file
222
+ temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
223
+
224
+ if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
225
+ max_r, att_n = 2, 0
226
+ for att_n in range(max_r):
227
  try:
228
+ img_fp_dalle = os.path.join(self.output_dir, image_filename_for_base)
229
+ logger.info(f"Attempt {att_n+1} DALL-E (base img): {image_generation_prompt_text[:100]}...")
230
+ cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
231
+ r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
232
+ iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
233
+ if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
234
+ ir = requests.get(iu, timeout=120); ir.raise_for_status()
235
+ id_img = Image.open(io.BytesIO(ir.content));
236
+ if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
237
+ id_img.save(img_fp_dalle); logger.info(f"DALL-E base image: {img_fp_dalle}");
238
+ input_image_for_runway_path = img_fp_dalle
239
+ temp_image_asset_info = {'path': img_fp_dalle, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp}
240
+ break
241
+ except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); temp_image_asset_info['error_message']=str(e)
242
+ except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); temp_image_asset_info['error_message']=str(e); break
243
+ if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
244
+
245
+ if temp_image_asset_info['error'] and self.USE_PEXELS:
246
+ pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
247
+ pp = self._search_pexels_image(pqt, image_filename_for_base) # Use base name for pexels
248
+ if pp: input_image_for_runway_path = pp; temp_image_asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
249
+ else: current_em = temp_image_asset_info.get('error_message',""); temp_image_asset_info['error_message']=(current_em + " Pexels failed.").strip()
250
+
251
+ if temp_image_asset_info['error']:
252
+ logger.warning("Base image (DALL-E/Pexels) failed. Placeholder base image.")
253
+ ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
254
+ php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_for_base)
255
+ if php: input_image_for_runway_path = php; temp_image_asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
256
+ else: current_em=temp_image_asset_info.get('error_message',"");temp_image_asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
257
+
258
+ if generate_as_video_clip:
259
+ if self.USE_RUNWAYML and input_image_for_runway_path:
260
+ video_path = self._generate_video_clip_with_runwayml(motion_prompt_text_for_video, input_image_for_runway_path, base_name, runway_target_duration) # Pass base_name
261
+ if video_path and os.path.exists(video_path):
262
+ return {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, 'base_image_path': input_image_for_runway_path}
263
+ else: asset_info = temp_image_asset_info; asset_info['error'] = True; asset_info['error_message'] = "RunwayML video gen failed; using base image."; asset_info['type'] = 'image'; return asset_info
264
+ elif not self.USE_RUNWAYML: asset_info = temp_image_asset_info; asset_info['error_message'] = "RunwayML disabled; using base image."; asset_info['type'] = 'image'; return asset_info
265
+ else: asset_info = temp_image_asset_info; asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image failed, Runway video not attempted.").strip(); asset_info['type'] = 'image'; return asset_info
266
+ else: return temp_image_asset_info
267
+
268
+ def generate_narration_audio(self, ttn, ofn="narration_overall.mp3"):
269
+ if not self.USE_ELEVENLABS or not self.elevenlabs_client or not ttn: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,ofn)
270
+ try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {ttn[:70]}..."); asm=None
271
+ if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
272
+ elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("Using 11L .generate_stream()")
273
+ elif hasattr(self.elevenlabs_client,'generate'):logger.info("Using 11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=ttn,voice=vp,model="eleven_multilingual_v2");
274
+ with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
275
+ else:logger.error("No 11L audio method.");return None
276
+ if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
277
+ if self.elevenlabs_voice_settings:
278
+ if hasattr(self.elevenlabs_voice_settings,'model_dump'):vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump()
279
+ elif hasattr(self.elevenlabs_voice_settings,'dict'):vps["voice_settings"]=self.elevenlabs_voice_settings.dict()
280
+ else:vps["voice_settings"]=self.elevenlabs_voice_settings
281
+ adi=asm(text=ttn,model_id="eleven_multilingual_v2",**vps)
282
+ with open(afp,"wb")as f:
283
+ for c in adi:
284
+ if c:f.write(c)
285
+ logger.info(f"11L audio (stream): {afp}");return afp
286
+ except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
287
+
288
+ def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
289
+ if not asset_data_list: logger.warning("No assets for animatic."); return None
290
+ processed_clips = []; narration_clip = None; final_clip = None
291
+ logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
292
+
293
+ for i, asset_info in enumerate(asset_data_list):
294
+ asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5)
295
+ scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '')
296
+ logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
297
+
298
+ if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue
299
+ if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue
300
+
301
+ current_scene_mvpy_clip = None
302
+ try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
  if asset_type == 'image':
304
+ pil_img = Image.open(asset_path); logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}")
305
+ img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
306
+ thumb = img_rgba.copy(); rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumb.thumbnail(self.video_frame_size,rf)
307
+ cv_rgba = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); xo,yo=(self.video_frame_size[0]-thumb.width)//2,(self.video_frame_size[1]-thumb.height)//2
308
+ cv_rgba.paste(thumb,(xo,yo),thumb)
309
+ final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3])
310
+ dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
311
+ frame_np = np.array(final_rgb_pil,dtype=np.uint8);
312
+ if not frame_np.flags['C_CONTIGUOUS']: frame_np=np.ascontiguousarray(frame_np,dtype=np.uint8)
313
+ logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}")
314
+ if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue
315
+ clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur)
316
+ mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
317
+ clip_fx = clip_base
318
+ try: es=random.uniform(1.03,1.08); clip_fx=clip_base.fx(vfx.resize,lambda t:1+(es-1)*(t/scene_dur) if scene_dur>0 else 1).set_position('center')
319
+ except Exception as e: logger.error(f"S{scene_num} Ken Burns error: {e}",exc_info=False)
320
+ current_scene_mvpy_clip = clip_fx
321
  elif asset_type == 'video':
322
+ src_clip=None
323
  try:
324
+ src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
325
+ tmp_clip=src_clip
326
+ if src_clip.duration!=scene_dur:
327
+ if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
328
+ else:
329
+ if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
330
+ else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
331
+ current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur)
332
+ if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
333
+ except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
334
+ finally:
335
+ if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip,'close'):src_clip.close()
336
+ else: logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip."); continue
337
+ if current_scene_mvpy_clip and key_action:
338
+ try:
339
+ to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
340
+ to_start=0.25
341
+ txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
342
+ current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
343
+ except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
344
+ if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
345
+ except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
346
+ finally:
347
+ if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
348
+ try: current_scene_mvpy_clip.close()
349
+ except: pass
350
+
351
+ if not processed_clips:logger.warning("No clips processed. Abort.");return None
352
+ td=0.75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  try:
354
+ logger.info(f"Concatenating {len(processed_clips)} clips.");
355
+ if len(processed_clips)>1:final_clip=concatenate_videoclips(processed_clips,padding=-td if td>0 else 0,method="compose")
356
+ elif processed_clips:final_clip=processed_clips[0]
357
+ if not final_clip:logger.error("Concatenation failed.");return None
358
+ logger.info(f"Concatenated dur:{final_clip.duration:.2f}s")
359
+ if td>0 and final_clip.duration>0:
360
+ if final_clip.duration>td*2:final_clip=final_clip.fx(vfx.fadein,td).fx(vfx.fadeout,td)
361
+ else:final_clip=final_clip.fx(vfx.fadein,min(td,final_clip.duration/2.0))
362
+ if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration>0:
363
+ try:narration_clip=AudioFileClip(overall_narration_path);final_clip=final_clip.set_audio(narration_clip);logger.info("Narration added.")
364
+ except Exception as e:logger.error(f"Narration add error:{e}",exc_info=True)
365
+ elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
366
+ if final_clip and final_clip.duration>0:
367
+ op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
368
+ final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
369
+ logger.info(f"Video created:{op}");return op
370
+ else:logger.error("Final clip invalid. No write.");return None
371
+ except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
372
+ finally:
373
+ logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
374
+ clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
375
+ for clip_obj in clips_to_close:
376
+ if clip_obj and hasattr(clip_obj, 'close'):
377
+ try: clip_obj.close()
378
+ except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {e_close}")