mgbam commited on
Commit
a48cea9
Β·
verified Β·
1 Parent(s): d1bb1cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +437 -371
app.py CHANGED
@@ -1,378 +1,444 @@
1
- # core/visual_engine.py
2
- from PIL import Image, ImageDraw, ImageFont, ImageOps
3
- # --- MONKEY PATCH FOR Image.ANTIALIAS ---
4
- try:
5
- if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
6
- if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
7
- elif hasattr(Image, 'LANCZOS'): # Pillow 8
8
- if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
9
- elif not hasattr(Image, 'ANTIALIAS'):
10
- print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. Video effects might fail.")
11
- except Exception as e_mp: print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}")
12
- # --- END MONKEY PATCH ---
13
-
14
- from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
15
- CompositeVideoClip, AudioFileClip)
16
- import moviepy.video.fx.all as vfx
17
- import numpy as np
18
  import os
19
- import openai
20
- import requests
21
- import io
22
- import time
23
- import random
24
  import logging
25
 
26
- logger = logging.getLogger(__name__)
27
- logger.setLevel(logging.INFO)
28
-
29
- # --- ElevenLabs Client Import ---
30
- ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
31
- try:
32
- from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
33
- from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
34
- ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
35
- ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
36
- except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
37
-
38
- # --- RunwayML Client Import (Placeholder) ---
39
- RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None
40
- try:
41
- logger.info("RunwayML SDK import is a placeholder.")
42
- except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
43
- except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
44
-
45
-
46
- class VisualEngine:
47
- def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
48
- self.output_dir = output_dir
49
- os.makedirs(self.output_dir, exist_ok=True)
50
- self.font_filename = "DejaVuSans-Bold.ttf"
51
- font_paths_to_try = [
52
- self.font_filename,
53
- f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
54
- f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
55
- f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf",
56
- f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"
57
- ]
58
- self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
59
- self.font_size_pil = 20
60
- self.video_overlay_font_size = 30
61
- self.video_overlay_font_color = 'white'
62
- self.video_overlay_font = 'DejaVu-Sans-Bold'
63
 
64
- try:
65
- self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
66
- if self.font_path_pil: logger.info(f"Pillow font loaded: {self.font_path_pil}.")
67
- else: logger.warning("Using default Pillow font."); self.font_size_pil = 10
68
- except IOError as e_font: logger.error(f"Pillow font loading IOError: {e_font}. Using default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
69
-
70
- self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
71
- self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
72
- self.video_frame_size = (1280, 720)
73
- self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
74
- self.elevenlabs_voice_id = default_elevenlabs_voice_id
75
- if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
76
- else: self.elevenlabs_voice_settings = None
77
- self.pexels_api_key = None; self.USE_PEXELS = False
78
- self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
79
- logger.info("VisualEngine initialized.")
80
-
81
- def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
82
- def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
83
- self.elevenlabs_api_key=api_key
84
- if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
85
- if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
86
- try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
87
- except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
88
- else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
89
- def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
90
- def set_runway_api_key(self, k):
91
- self.runway_api_key = k
92
- if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient:
93
- try: self.USE_RUNWAYML = True; logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
94
- except Exception as e: logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True); self.USE_RUNWAYML = False
95
- elif k: self.USE_RUNWAYML = True; logger.info("RunwayML API Key set (direct API or placeholder).")
96
- else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
97
-
98
- def _get_text_dimensions(self, text_content, font_obj):
99
- default_line_height = getattr(font_obj, 'size', self.font_size_pil)
100
- if not text_content: return 0, default_line_height
101
- try:
102
- if hasattr(font_obj, 'getbbox'):
103
- bbox = font_obj.getbbox(text_content); width = bbox[2] - bbox[0]; height = bbox[3] - bbox[1]
104
- return width, height if height > 0 else default_line_height
105
- elif hasattr(font_obj, 'getsize'):
106
- width, height = font_obj.getsize(text_content)
107
- return width, height if height > 0 else default_line_height
108
- else: return int(len(text_content) * default_line_height * 0.6), int(default_line_height * 1.2)
109
- except Exception as e: logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}"); return int(len(text_content) * self.font_size_pil * 0.6),int(self.font_size_pil * 1.2)
110
-
111
- def _create_placeholder_image_content(self, text_description, filename, size=None):
112
- if size is None: size = self.video_frame_size
113
- img = Image.new('RGB', size, color=(20, 20, 40)); draw = ImageDraw.Draw(img)
114
- padding = 25; max_text_width = size[0] - (2 * padding); lines = []
115
- if not text_description: text_description = "(Placeholder: No text description provided)"
116
- words = text_description.split(); current_line = ""
117
- for word in words:
118
- test_line = current_line + word + " "; line_width_test, _ = self._get_text_dimensions(test_line.strip(), self.font)
119
- if line_width_test <= max_text_width: current_line = test_line
120
- else:
121
- if current_line.strip(): lines.append(current_line.strip())
122
- word_width, _ = self._get_text_dimensions(word, self.font)
123
- if word_width > max_text_width:
124
- avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10
125
- chars_that_fit = int(max_text_width / avg_char_w)
126
- lines.append(word[:chars_that_fit-3] + "..." if len(word) > chars_that_fit else word) # Corrected line
127
- current_line = ""
128
- else: current_line = word + " "
129
- if current_line.strip(): lines.append(current_line.strip())
130
- if not lines and text_description:
131
- avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10; chars_that_fit = int(max_text_width / avg_char_w)
132
- lines.append(text_description[:chars_that_fit-3] + "..." if len(text_description) > chars_that_fit else text_description)
133
- elif not lines: lines.append("(Placeholder Text Error)")
134
- _, single_line_height = self._get_text_dimensions("Ay", self.font); single_line_height = single_line_height if single_line_height > 0 else (self.font_size_pil + 2)
135
- line_spacing = 2; max_lines_to_display = min(len(lines), (size[1]-(2*padding))//(single_line_height+line_spacing)) if single_line_height > 0 else 1
136
- if max_lines_to_display <= 0: max_lines_to_display = 1
137
- total_text_block_height = max_lines_to_display * single_line_height + (max_lines_to_display-1)*line_spacing
138
- y_text_start = padding + (size[1]-(2*padding)-total_text_block_height)/2.0; current_y = y_text_start
139
- for i in range(max_lines_to_display):
140
- line_content = lines[i]; line_width_actual, _ = self._get_text_dimensions(line_content, self.font)
141
- x_text = max(padding, (size[0]-line_width_actual)/2.0)
142
- draw.text((x_text, current_y), line_content, font=self.font, fill=(200,200,180)); current_y += single_line_height + line_spacing
143
- if i==6 and max_lines_to_display > 7 and len(lines) > max_lines_to_display:
144
- ellipsis_width, _ = self._get_text_dimensions("...",self.font); x_ellipsis = max(padding, (size[0]-ellipsis_width)/2.0)
145
- draw.text((x_ellipsis, current_y), "...", font=self.font, fill=(200,200,180)); break
146
- filepath = os.path.join(self.output_dir, filename)
147
- try: img.save(filepath); return filepath
148
- except Exception as e: logger.error(f"Error saving placeholder image {filepath}: {e}", exc_info=True); return None
149
-
150
- def _search_pexels_image(self, query, output_filename_base):
151
- if not self.USE_PEXELS or not self.pexels_api_key: return None
152
- headers = {"Authorization": self.pexels_api_key}
153
- params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"}
154
- base_name, _ = os.path.splitext(output_filename_base)
155
- pexels_filename = base_name + f"_pexels_{random.randint(1000,9999)}.jpg" # Use base_name
156
- filepath = os.path.join(self.output_dir, pexels_filename)
157
- try:
158
- logger.info(f"Pexels search: '{query}'")
159
- effective_query = " ".join(query.split()[:5])
160
- params["query"] = effective_query
161
- response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
162
- response.raise_for_status()
163
- data = response.json() # This line and subsequent ones are now correctly in the try block
164
- if data.get("photos") and len(data["photos"]) > 0:
165
- photo_details = data["photos"][0]
166
- photo_url = photo_details["src"]["large2x"]
167
- logger.info(f"Downloading Pexels image from: {photo_url}")
168
- image_response = requests.get(photo_url, timeout=60)
169
- image_response.raise_for_status()
170
- img_data = Image.open(io.BytesIO(image_response.content))
171
- if img_data.mode != 'RGB':
172
- logger.debug(f"Pexels image mode is {img_data.mode}, converting to RGB.")
173
- img_data = img_data.convert('RGB')
174
- img_data.save(filepath)
175
- logger.info(f"Pexels image saved successfully: {filepath}")
176
- return filepath
177
- else:
178
- logger.info(f"No photos found on Pexels for query: '{effective_query}'")
179
- return None
180
- except requests.exceptions.RequestException as e_req: logger.error(f"Pexels request error for query '{query}': {e_req}", exc_info=True)
181
- except json.JSONDecodeError as e_json: logger.error(f"Pexels JSON decode error for query '{query}': {e_json}", exc_info=True)
182
- except Exception as e: logger.error(f"General Pexels error for query '{query}': {e}", exc_info=True)
183
- return None
184
-
185
- def _generate_video_clip_with_runwayml(self, pt, iip, sifnb, tds=5): # Renamed for clarity
186
- if not self.USE_RUNWAYML or not self.runway_api_key: logger.warning("RunwayML disabled."); return None
187
- if not iip or not os.path.exists(iip): logger.error(f"Runway Gen-4 needs input image. Path invalid: {iip}"); return None
188
- runway_dur = 10 if tds > 7 else 5
189
- ovfn = sifnb.replace(".png", f"_runway_gen4_d{runway_dur}s.mp4") # sifnb should be base name
190
- ovfp = os.path.join(self.output_dir, ovfn)
191
- logger.info(f"Runway Gen-4 (Placeholder) img: {os.path.basename(iip)}, motion: '{pt[:100]}...', dur: {runway_dur}s")
192
- logger.warning("Using PLACEHOLDER video for Runway Gen-4.")
193
- img_clip=None; txt_c=None; final_ph_clip=None
194
- try:
195
- img_clip = ImageClip(iip).set_duration(runway_dur)
196
- txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(iip)}\nMotion: {pt[:50]}..."
197
- txt_c = TextClip(txt, fontsize=24,color='white',font=self.video_overlay_font,bg_color='rgba(0,0,0,0.5)',size=(self.video_frame_size[0]*0.8,None),method='caption').set_duration(runway_dur).set_position('center')
198
- final_ph_clip = CompositeVideoClip([img_clip, txt_c], size=img_clip.size)
199
- final_ph_clip.write_videofile(ovfp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2)
200
- logger.info(f"Runway Gen-4 placeholder video: {ovfp}"); return ovfp
201
- except Exception as e: logger.error(f"Runway Gen-4 placeholder error: {e}",exc_info=True); return None
202
- finally:
203
- if img_clip and hasattr(img_clip,'close'): img_clip.close()
204
- if txt_c and hasattr(txt_c,'close'): txt_c.close()
205
- if final_ph_clip and hasattr(final_ph_clip,'close'): final_ph_clip.close()
206
-
207
- def _create_placeholder_video_content(self, td, fn, dur=4, sz=None):
208
- if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
209
- try: tc = TextClip(td, fontsize=50,color='white',font=self.video_overlay_font,bg_color='black',size=sz,method='caption').set_duration(dur)
210
- tc.write_videofile(fp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
211
- except Exception as e: logger.error(f"Generic placeholder error {fp}: {e}",exc_info=True); return None
212
- finally:
213
- if tc and hasattr(tc,'close'): tc.close()
214
-
215
- def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
216
- scene_data, scene_identifier_filename_base, # This is base_name, no ext
217
- generate_as_video_clip=False, runway_target_duration=5):
218
- base_name = scene_identifier_filename_base # Already a base name
219
- asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
220
- input_image_for_runway_path = None
221
- image_filename_for_base = base_name + "_base_image.png" # Specific name for base image file
222
- temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
223
-
224
- if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
225
- max_r, att_n = 2, 0
226
- for att_n in range(max_r):
227
  try:
228
- img_fp_dalle = os.path.join(self.output_dir, image_filename_for_base)
229
- logger.info(f"Attempt {att_n+1} DALL-E (base img): {image_generation_prompt_text[:100]}...")
230
- cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
231
- r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
232
- iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
233
- if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
234
- ir = requests.get(iu, timeout=120); ir.raise_for_status()
235
- id_img = Image.open(io.BytesIO(ir.content));
236
- if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
237
- id_img.save(img_fp_dalle); logger.info(f"DALL-E base image: {img_fp_dalle}");
238
- input_image_for_runway_path = img_fp_dalle
239
- temp_image_asset_info = {'path': img_fp_dalle, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp}
240
- break
241
- except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); temp_image_asset_info['error_message']=str(e)
242
- except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); temp_image_asset_info['error_message']=str(e); break
243
- if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
244
-
245
- if temp_image_asset_info['error'] and self.USE_PEXELS:
246
- pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
247
- pp = self._search_pexels_image(pqt, image_filename_for_base) # Use base name for pexels
248
- if pp: input_image_for_runway_path = pp; temp_image_asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
249
- else: current_em = temp_image_asset_info.get('error_message',""); temp_image_asset_info['error_message']=(current_em + " Pexels failed.").strip()
250
-
251
- if temp_image_asset_info['error']:
252
- logger.warning("Base image (DALL-E/Pexels) failed. Placeholder base image.")
253
- ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
254
- php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_for_base)
255
- if php: input_image_for_runway_path = php; temp_image_asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
256
- else: current_em=temp_image_asset_info.get('error_message',"");temp_image_asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
257
-
258
- if generate_as_video_clip:
259
- if self.USE_RUNWAYML and input_image_for_runway_path:
260
- video_path = self._generate_video_clip_with_runwayml(motion_prompt_text_for_video, input_image_for_runway_path, base_name, runway_target_duration) # Pass base_name
261
- if video_path and os.path.exists(video_path):
262
- return {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, 'base_image_path': input_image_for_runway_path}
263
- else: asset_info = temp_image_asset_info; asset_info['error'] = True; asset_info['error_message'] = "RunwayML video gen failed; using base image."; asset_info['type'] = 'image'; return asset_info
264
- elif not self.USE_RUNWAYML: asset_info = temp_image_asset_info; asset_info['error_message'] = "RunwayML disabled; using base image."; asset_info['type'] = 'image'; return asset_info
265
- else: asset_info = temp_image_asset_info; asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image failed, Runway video not attempted.").strip(); asset_info['type'] = 'image'; return asset_info
266
- else: return temp_image_asset_info
267
-
268
- def generate_narration_audio(self, ttn, ofn="narration_overall.mp3"):
269
- if not self.USE_ELEVENLABS or not self.elevenlabs_client or not ttn: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,ofn)
270
- try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {ttn[:70]}..."); asm=None
271
- if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
272
- elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("Using 11L .generate_stream()")
273
- elif hasattr(self.elevenlabs_client,'generate'):logger.info("Using 11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=ttn,voice=vp,model="eleven_multilingual_v2");
274
- with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
275
- else:logger.error("No 11L audio method.");return None
276
- if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
277
- if self.elevenlabs_voice_settings:
278
- if hasattr(self.elevenlabs_voice_settings,'model_dump'):vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump()
279
- elif hasattr(self.elevenlabs_voice_settings,'dict'):vps["voice_settings"]=self.elevenlabs_voice_settings.dict()
280
- else:vps["voice_settings"]=self.elevenlabs_voice_settings
281
- adi=asm(text=ttn,model_id="eleven_multilingual_v2",**vps)
282
- with open(afp,"wb")as f:
283
- for c in adi:
284
- if c:f.write(c)
285
- logger.info(f"11L audio (stream): {afp}");return afp
286
- except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
287
-
288
- def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
289
- if not asset_data_list: logger.warning("No assets for animatic."); return None
290
- processed_clips = []; narration_clip = None; final_clip = None
291
- logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
292
-
293
- for i, asset_info in enumerate(asset_data_list):
294
- asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5)
295
- scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '')
296
- logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
297
-
298
- if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue
299
- if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue
300
-
301
- current_scene_mvpy_clip = None
302
- try:
303
- if asset_type == 'image':
304
- pil_img = Image.open(asset_path); logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}")
305
- img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
306
- thumb = img_rgba.copy(); rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumb.thumbnail(self.video_frame_size,rf)
307
- cv_rgba = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); xo,yo=(self.video_frame_size[0]-thumb.width)//2,(self.video_frame_size[1]-thumb.height)//2
308
- cv_rgba.paste(thumb,(xo,yo),thumb)
309
- final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3])
310
- dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
311
- frame_np = np.array(final_rgb_pil,dtype=np.uint8);
312
- if not frame_np.flags['C_CONTIGUOUS']: frame_np=np.ascontiguousarray(frame_np,dtype=np.uint8)
313
- logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}")
314
- if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue
315
- clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur)
316
- mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
317
- clip_fx = clip_base
318
- try: es=random.uniform(1.03,1.08); clip_fx=clip_base.fx(vfx.resize,lambda t:1+(es-1)*(t/scene_dur) if scene_dur>0 else 1).set_position('center')
319
- except Exception as e: logger.error(f"S{scene_num} Ken Burns error: {e}",exc_info=False)
320
- current_scene_mvpy_clip = clip_fx
321
- elif asset_type == 'video':
322
- src_clip=None
323
- try:
324
- src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
325
- tmp_clip=src_clip
326
- if src_clip.duration!=scene_dur:
327
- if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
328
- else:
329
- if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
330
- else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
331
- current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur)
332
- if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
333
- except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
334
- finally:
335
- if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip,'close'):src_clip.close()
336
- else: logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip."); continue
337
- if current_scene_mvpy_clip and key_action:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  try:
339
- to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
340
- to_start=0.25
341
- txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
342
- current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
343
- except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
344
- if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
345
- except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
346
- finally:
347
- if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
348
- try: current_scene_mvpy_clip.close()
349
- except: pass
350
-
351
- if not processed_clips:logger.warning("No clips processed. Abort.");return None
352
- td=0.75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  try:
354
- logger.info(f"Concatenating {len(processed_clips)} clips.");
355
- if len(processed_clips)>1:final_clip=concatenate_videoclips(processed_clips,padding=-td if td>0 else 0,method="compose")
356
- elif processed_clips:final_clip=processed_clips[0]
357
- if not final_clip:logger.error("Concatenation failed.");return None
358
- logger.info(f"Concatenated dur:{final_clip.duration:.2f}s")
359
- if td>0 and final_clip.duration>0:
360
- if final_clip.duration>td*2:final_clip=final_clip.fx(vfx.fadein,td).fx(vfx.fadeout,td)
361
- else:final_clip=final_clip.fx(vfx.fadein,min(td,final_clip.duration/2.0))
362
- if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration>0:
363
- try:narration_clip=AudioFileClip(overall_narration_path);final_clip=final_clip.set_audio(narration_clip);logger.info("Narration added.")
364
- except Exception as e:logger.error(f"Narration add error:{e}",exc_info=True)
365
- elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
366
- if final_clip and final_clip.duration>0:
367
- op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
368
- final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
369
- logger.info(f"Video created:{op}");return op
370
- else:logger.error("Final clip invalid. No write.");return None
371
- except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
372
- finally:
373
- logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
374
- clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
375
- for clip_obj in clips_to_close:
376
- if clip_obj and hasattr(clip_obj, 'close'):
377
- try: clip_obj.close()
378
- except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {e_close}")
 
1
+ # app.py
2
+ import streamlit as st
3
+ from core.gemini_handler import GeminiHandler
4
+ from core.visual_engine import VisualEngine
5
+ from core.prompt_engineering import (
6
+ create_cinematic_treatment_prompt,
7
+ construct_dalle_prompt,
8
+ construct_text_to_video_prompt_for_gen4, # <<< USE THIS FOR RUNWAY
9
+ create_narration_script_prompt_enhanced,
10
+ create_scene_regeneration_prompt,
11
+ create_visual_regeneration_prompt # This is for DALL-E image prompt refinement
12
+ )
 
 
 
 
 
13
  import os
 
 
 
 
 
14
  import logging
15
 
16
+ # --- Configuration & Initialization ---
17
+ st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
18
+ # Configure logging to be more verbose for debugging if needed
19
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
20
+ logger = logging.getLogger(__name__) # Get logger for this module
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ # --- Global Definitions ---
23
+ SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
24
+ DEFAULT_SCENE_DURATION_SECS = 5
25
+ DEFAULT_SHOT_TYPE = "Director's Choice"
26
+ ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
27
+
28
+
29
+ # --- Global State Variables & API Key Setup ---
30
+ def load_api_key(key_name_streamlit, key_name_env, service_name):
31
+ # (Keep this function as it was - robust)
32
+ key = None; secrets_available = hasattr(st, 'secrets')
33
+ try:
34
+ if secrets_available and key_name_streamlit in st.secrets:
35
+ key = st.secrets[key_name_streamlit]
36
+ if key: logger.info(f"{service_name} API Key found in Streamlit secrets.")
37
+ except Exception as e: logger.warning(f"Could not access st.secrets for {key_name_streamlit}: {e}")
38
+ if not key and key_name_env in os.environ:
39
+ key = os.environ[key_name_env]
40
+ if key: logger.info(f"{service_name} API Key found in environment variable.")
41
+ if not key: logger.warning(f"{service_name} API Key NOT FOUND. Related features may be disabled or use fallbacks.")
42
+ return key
43
+
44
+ if 'services_initialized' not in st.session_state:
45
+ logger.info("Initializing services and API keys for the first time this session...")
46
+ st.session_state.GEMINI_API_KEY = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
47
+ st.session_state.OPENAI_API_KEY = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
48
+ st.session_state.ELEVENLABS_API_KEY = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
49
+ st.session_state.PEXELS_API_KEY = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
50
+ st.session_state.ELEVENLABS_VOICE_ID_CONFIG = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
51
+ st.session_state.RUNWAY_API_KEY = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
52
+
53
+ if not st.session_state.GEMINI_API_KEY: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
54
+
55
+ try: st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY); logger.info("GeminiHandler initialized.")
56
+ except Exception as e: st.error(f"Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
57
+
58
+ try:
59
+ default_voice_id_el = "Rachel" # Fallback
60
+ configured_voice_id_el = st.session_state.ELEVENLABS_VOICE_ID_CONFIG or default_voice_id_el
61
+ st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=configured_voice_id_el)
62
+ st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
63
+ st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY, voice_id_from_secret=st.session_state.ELEVENLABS_VOICE_ID_CONFIG)
64
+ st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
65
+ st.session_state.visual_engine.set_runway_api_key(st.session_state.RUNWAY_API_KEY) # Pass Runway key
66
+ logger.info("VisualEngine initialized and API keys set.")
67
+ except Exception as e: st.error(f"Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue.")
68
+ st.session_state.services_initialized = True; logger.info("Service initialization complete.")
69
+
70
+ # Initialize other session state variables
71
+ for key_ss, default_val_ss in [ # Renamed loop vars
72
+ ('story_treatment_scenes', []), ('scene_generation_prompts', []), ('generated_scene_assets_info', []), # Stores full asset info dicts
73
+ ('video_path', None), ('character_definitions', {}), ('global_style_additions', ""),
74
+ ('overall_narration_audio_path', None), ('narration_script_display', "")
75
+ ]:
76
+ if key_ss not in st.session_state: st.session_state[key_ss] = default_val_ss
77
+
78
+ def initialize_new_project_state(): # Renamed
79
+ st.session_state.story_treatment_scenes = []
80
+ st.session_state.scene_generation_prompts = [] # Stores the prompt used for DALL-E or Runway
81
+ st.session_state.generated_scene_assets_info = [] # Stores dicts {'path':..., 'type':..., 'error':..., 'prompt_used':...}
82
+ st.session_state.video_path, st.session_state.overall_narration_audio_path, st.session_state.narration_script_display = None, None, ""
83
+ logger.info("New project state initialized.")
84
+
85
+ def generate_asset_for_scene_wrapper(scene_idx, scene_dict_data, version_num=1, user_selected_asset_type_override="Auto (Director's Choice)"): # Renamed
86
+ # Determine if video clip is desired based on user override or Gemini's suggestion
87
+ generate_as_video_clip_final = False
88
+ gemini_suggested_asset_type = scene_dict_data.get('suggested_asset_type_감독', 'image').lower()
89
+
90
+ if user_selected_asset_type_override == "Image":
91
+ generate_as_video_clip_final = False
92
+ elif user_selected_asset_type_override == "Video Clip":
93
+ generate_as_video_clip_final = True
94
+ elif user_selected_asset_type_override == "Auto (Director's Choice)": # Default
95
+ generate_as_video_clip_final = (gemini_suggested_asset_type == "video_clip")
96
+
97
+ # Prompt for base image generation (DALL-E or Pexels fallback)
98
+ image_gen_prompt_text = construct_dalle_prompt(scene_dict_data, st.session_state.character_definitions, st.session_state.global_style_additions)
99
+
100
+ # Prompt for video motion (Runway Gen-4) - only if generating video
101
+ motion_gen_prompt_text = ""
102
+ if generate_as_video_clip_final:
103
+ motion_gen_prompt_text = construct_text_to_video_prompt_for_gen4(scene_dict_data, st.session_state.global_style_additions)
104
+ if not motion_gen_prompt_text: # Fallback if specific motion prompt is empty
105
+ logger.warning(f"S{scene_dict_data.get('scene_number', scene_idx+1)}: Motion prompt empty, using generic for Runway.")
106
+ motion_gen_prompt_text = scene_dict_data.get('video_clip_motion_description_감독', "subtle ambient motion")
107
+
108
+
109
+ if not image_gen_prompt_text: # Base image prompt is always needed
110
+ logger.error(f"Base image prompt construction failed for S{scene_dict_data.get('scene_number', scene_idx+1)}"); return False
111
+
112
+ # Ensure session state lists are adequate
113
+ while len(st.session_state.scene_generation_prompts) <= scene_idx: st.session_state.scene_generation_prompts.append("")
114
+ while len(st.session_state.generated_scene_assets_info) <= scene_idx: st.session_state.generated_scene_assets_info.append(None)
115
+
116
+ # Store the relevant prompt (DALL-E for image, motion for video)
117
+ # The generate_scene_asset method will return the actual prompt it used if different internally.
118
+ st.session_state.scene_generation_prompts[scene_idx] = motion_gen_prompt_text if generate_as_video_clip_final else image_gen_prompt_text
119
+
120
+ filename_base_for_asset = f"scene_{scene_dict_data.get('scene_number', scene_idx+1)}_asset_v{version_num}" # Renamed
121
+ runway_dur_for_scene = scene_dict_data.get('video_clip_duration_estimate_secs_감독', scene_dict_data.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
122
+ if runway_dur_for_scene <= 0 : runway_dur_for_scene = DEFAULT_SCENE_DURATION_SECS
123
+
124
+ asset_result_dict = st.session_state.visual_engine.generate_scene_asset(
125
+ image_generation_prompt_text=image_gen_prompt_text, # For base DALL-E/Pexels image
126
+ motion_prompt_text_for_video=motion_gen_prompt_text, # For Runway motion
127
+ scene_data=scene_dict_data,
128
+ scene_identifier_filename_base=filename_base_for_asset,
129
+ generate_as_video_clip=generate_as_video_clip_final,
130
+ runway_target_duration=runway_dur_for_scene
131
+ )
132
+
133
+ st.session_state.generated_scene_assets_info[scene_idx] = asset_result_dict
134
+ # Update the stored prompt with what was actually used by the engine, if available from result
135
+ if asset_result_dict and asset_result_dict.get('prompt_used'):
136
+ st.session_state.scene_generation_prompts[scene_idx] = asset_result_dict['prompt_used']
137
+
138
+
139
+ if asset_result_dict and not asset_result_dict['error'] and asset_result_dict.get('path') and os.path.exists(asset_result_dict['path']):
140
+ logger.info(f"Asset ({asset_result_dict.get('type')}) generated for S{scene_dict_data.get('scene_number', scene_idx+1)}: {os.path.basename(asset_result_dict['path'])}")
141
+ return True
142
+ else:
143
+ err_msg_asset = asset_result_dict.get('error_message', 'Unknown error') if asset_result_dict else 'Asset result is None'
144
+ logger.warning(f"Asset gen FAILED for S{scene_dict_data.get('scene_number', scene_idx+1)}. Type attempted: {'Video' if generate_as_video_clip_final else 'Image'}. Error: {err_msg_asset}")
145
+ # Store a more detailed failure state if not already
146
+ if not st.session_state.generated_scene_assets_info[scene_idx] or not st.session_state.generated_scene_assets_info[scene_idx]['error']:
147
+ st.session_state.generated_scene_assets_info[scene_idx] = {'path': None, 'type': 'none', 'error': True, 'error_message': err_msg_asset, 'prompt_used': st.session_state.scene_generation_prompts[scene_idx]}
148
+ return False
149
+
150
+ # --- UI Sidebar ---
151
+ with st.sidebar:
152
+ # ... (Sidebar UI code as before, no changes needed for this fix) ...
153
+ st.title("🎬 CineGen AI Ultra+")
154
+ st.markdown("### Creative Seed")
155
+ user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main_v5")
156
+ genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main_v5")
157
+ mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main_v5")
158
+ num_scenes = st.slider("Number of Key Scenes:", 1, 10, 2, key="num_scenes_main_v5")
159
+ creative_guidance_options = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
160
+ selected_creative_guidance_key = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options.keys()), key="creative_guidance_select_v5")
161
+ actual_creative_guidance = creative_guidance_options[selected_creative_guidance_key]
162
+
163
+ if st.button("🌌 Generate Cinematic Treatment", type="primary", key="generate_treatment_btn_v5", use_container_width=True):
164
+ initialize_new_project_state() # Use renamed function
165
+ if not user_idea.strip(): st.warning("Please provide a story idea.")
166
+ else:
167
+ with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_op: # Renamed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  try:
169
+ status_op.write("Phase 1: Gemini crafting cinematic treatment... πŸ“œ"); logger.info("Phase 1: Cinematic Treatment Gen.")
170
+ treatment_gen_prompt = create_cinematic_treatment_prompt(user_idea, genre, mood, num_scenes, actual_creative_guidance) # Renamed
171
+ raw_treatment_result = st.session_state.gemini_handler.generate_story_breakdown(treatment_gen_prompt) # Renamed
172
+ if not isinstance(raw_treatment_result, list) or not raw_treatment_result: raise ValueError("Gemini returned invalid scene list format.")
173
+
174
+ processed_scene_list = [] # Renamed
175
+ for scene_from_gemini in raw_treatment_result: # Renamed
176
+ scene_from_gemini['user_shot_type'] = scene_from_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE)
177
+ # Use Gemini's video duration estimate if available for video clips, else default scene duration
178
+ gemini_dur_est = scene_from_gemini.get('video_clip_duration_estimate_secs_감독', 0)
179
+ scene_from_gemini['user_scene_duration_secs'] = gemini_dur_est if gemini_dur_est > 0 else DEFAULT_SCENE_DURATION_SECS
180
+ scene_from_gemini['user_selected_asset_type'] = "Auto (Director's Choice)" # UI default
181
+ processed_scene_list.append(scene_from_gemini)
182
+ st.session_state.story_treatment_scenes = processed_scene_list
183
+
184
+ num_generated_scenes = len(st.session_state.story_treatment_scenes) # Renamed
185
+ st.session_state.scene_generation_prompts = [""]*num_generated_scenes
186
+ st.session_state.generated_scene_assets_info = [None]*num_generated_scenes
187
+ logger.info(f"Phase 1 complete. {num_generated_scenes} scenes."); status_op.update(label="Treatment complete! βœ… Generating visuals...", state="running")
188
+
189
+ status_op.write("Phase 2: Creating visual assets (Image/Video)... πŸ–ΌοΈπŸŽ¬"); logger.info("Phase 2: Visual Asset Gen.")
190
+ successful_asset_count = 0 # Renamed
191
+ for i_scene, scene_data_item in enumerate(st.session_state.story_treatment_scenes): # Renamed
192
+ scene_num_display = scene_data_item.get('scene_number', i_scene+1) # Renamed
193
+ status_op.write(f" Asset for Scene {scene_num_display}..."); logger.info(f" Processing asset for Scene {scene_num_display}.")
194
+ if generate_asset_for_scene_wrapper(i_scene, scene_data_item, version_num=1): # Pass default 'Auto' for initial gen
195
+ successful_asset_count += 1
196
+
197
+ status_label_phase2 = "Visual assets ready! " # Renamed
198
+ next_op_state = "running" # Renamed
199
+ if successful_asset_count == 0 and num_generated_scenes > 0:
200
+ logger.error("Asset gen failed for all scenes."); status_label_phase2 = "Asset gen FAILED for all scenes."; next_op_state="error";
201
+ status_op.update(label=status_label_phase2, state=next_op_state, expanded=True); st.stop()
202
+ elif successful_asset_count < num_generated_scenes:
203
+ logger.warning(f"Assets partially generated ({successful_asset_count}/{num_generated_scenes})."); status_label_phase2 = f"Assets partially generated ({successful_asset_count}/{num_generated_scenes}). "
204
+ status_op.update(label=f"{status_label_phase2}Generating narration script...", state=next_op_state)
205
+ if next_op_state == "error": st.stop()
206
+
207
+ status_op.write("Phase 3: Generating narration script..."); logger.info("Phase 3: Narration Script Gen.")
208
+ voice_style_for_narration_prompt = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer") # Renamed
209
+ narration_gen_prompt = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood, genre, voice_style_for_narration_prompt) # Renamed
210
+ st.session_state.narration_script_display = st.session_state.gemini_handler.generate_image_prompt(narration_gen_prompt) # generate_image_prompt returns string
211
+ logger.info("Narration script generated."); status_op.update(label="Narration script ready! Synthesizing voice...", state="running")
212
+
213
+ status_op.write("Phase 4: Synthesizing voice (ElevenLabs)... πŸ”Š"); logger.info("Phase 4: Voice Synthesis.")
214
+ st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(st.session_state.narration_script_display)
215
+
216
+ final_status_label = "All components ready! Storyboard below. πŸš€" # Renamed
217
+ final_op_state = "complete" # Renamed
218
+ if not st.session_state.overall_narration_audio_path:
219
+ final_status_label = f"{status_label_phase2}Storyboard ready (Voiceover skipped or failed)."
220
+ logger.warning("Voiceover generation was skipped or failed.")
221
+ else: logger.info("Voiceover generated successfully.")
222
+ status_op.update(label=final_status_label, state=final_op_state, expanded=False)
223
+
224
+ except ValueError as ve_err: logger.error(f"ValueError in main generation: {ve_err}", exc_info=True); status_op.update(label=f"Input or Gemini response error: {ve_err}", state="error", expanded=True); # Renamed
225
+ except Exception as e_unhandled: logger.error(f"Unhandled Exception in main generation: {e_unhandled}", exc_info=True); status_op.update(label=f"An unexpected error: {e_unhandled}", state="error", expanded=True); # Renamed
226
+
227
+ # --- Sidebar Fine-Tuning Options (Characters, Global Style, Voice) ---
228
+ # (Keep these sections as they were in the previous correct version)
229
+ with st.expander("Define Characters", expanded=False):
230
+ char_name_input = st.text_input("Character Name", key="char_name_adv_ultra_v5_sb"); char_desc_input = st.text_area("Visual Description", key="char_desc_adv_ultra_v5_sb", height=100, placeholder="e.g., Jax: rugged male astronaut...")
231
+ if st.button("Save Character", key="add_char_adv_ultra_v5_sb"):
232
+ if char_name_input and char_desc_input: st.session_state.character_definitions[char_name_input.strip().lower()] = char_desc_input.strip(); st.success(f"Char '{char_name_input.strip()}' saved.")
233
+ else: st.warning("Name and description needed.")
234
+ if st.session_state.character_definitions: st.caption("Current Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.character_definitions.items()]
235
+
236
+ with st.expander("Global Style Overrides", expanded=False):
237
+ style_presets_dict = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."} # Truncated for brevity
238
+ selected_style_preset_key = st.selectbox("Base Style Preset:", options=list(style_presets_dict.keys()), key="style_preset_adv_ultra_v5_sb")
239
+ custom_style_keywords_input = st.text_area("Additional Custom Style Keywords:", key="custom_style_adv_ultra_v5_sb", height=80, placeholder="e.g., 'Dutch angle'")
240
+ current_global_style = st.session_state.global_style_additions
241
+ if st.button("Apply Global Styles", key="apply_styles_adv_ultra_v5_sb"):
242
+ final_style_str = style_presets_dict[selected_style_preset_key];
243
+ if custom_style_keywords_input.strip(): final_style_str = f"{final_style_str}, {custom_style_keywords_input.strip()}" if final_style_str else custom_style_keywords_input.strip()
244
+ st.session_state.global_style_additions = final_style_str.strip(); current_global_style = final_style_str.strip() # Update local var for immediate display
245
+ if current_global_style: st.success("Global styles applied!")
246
+ else: st.info("Global style additions cleared.")
247
+ if current_global_style: st.caption(f"Active global styles: \"{current_global_style}\"")
248
+
249
+ with st.expander("Voice & Narration Style", expanded=False):
250
+ engine_default_voice = "Rachel"
251
+ if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine: engine_default_voice = st.session_state.visual_engine.elevenlabs_voice_id
252
+ user_voice_id_input = st.text_input("ElevenLabs Voice ID (override):", value=engine_default_voice, key="el_voice_id_override_v5_sb", help=f"Defaulting to '{engine_default_voice}'.")
253
+ narration_prompt_styles_dict = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
254
+ selected_narration_style_key = st.selectbox("Narration Script Style:", list(narration_prompt_styles_dict.keys()), key="narr_style_sel_v5_sb", index=0)
255
+ if st.button("Set Narrator Voice & Style", key="set_voice_btn_ultra_v5_sb"):
256
+ final_voice_id_to_use_el = user_voice_id_input.strip() or st.session_state.get("ELEVENLABS_VOICE_ID_CONFIG", "Rachel") # Fallback
257
+ if hasattr(st.session_state, 'visual_engine'): st.session_state.visual_engine.elevenlabs_voice_id = final_voice_id_to_use_el
258
+ st.session_state.selected_voice_style_for_generation = narration_prompt_styles_dict[selected_narration_style_key]
259
+ st.success(f"Narrator Voice ID: {final_voice_id_to_use_el}. Script Style: {selected_narration_style_key}")
260
+ logger.info(f"User updated 11L Voice ID: {final_voice_id_to_use_el}, Script Style: {selected_narration_style_key}")
261
+
262
+
263
+ # --- Main Content Area ---
264
+ st.header("🎬 Cinematic Storyboard & Treatment")
265
+ if st.session_state.narration_script_display:
266
+ with st.expander("πŸ“œ View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.narration_script_display}_")
267
+
268
+ if not st.session_state.story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
269
+ else:
270
+ for i_main_loop, scene_content_item in enumerate(st.session_state.story_treatment_scenes): # Renamed
271
+ scene_num_val = scene_content_item.get('scene_number', i_main_loop + 1) # Renamed
272
+ scene_title_val = scene_content_item.get('scene_title', 'Untitled Scene') # Renamed
273
+ # Ensure unique keys for widgets within the loop
274
+ key_base_for_scene = f"s{scene_num_val}_{''.join(filter(str.isalnum, scene_title_val[:10]))}_main_{i_main_loop}" # Renamed
275
+
276
+ if "director_note" in scene_content_item and scene_content_item['director_note']: st.info(f"🎬 Director Note S{scene_num_val}: {scene_content_item['director_note']}")
277
+ st.subheader(f"SCENE {scene_num_val}: {scene_title_val.upper()}"); col_treatment, col_visual = st.columns([0.45, 0.55]) # Renamed
278
+
279
+ with col_treatment: # Treatment and Controls Column
280
+ with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
281
+ # ... (Display textual scene details - beat, setting, chars, etc. - as before) ...
282
+ st.markdown(f"**Beat:** {scene_content_item.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
283
+
284
+ st.markdown("##### Shot, Pacing & Asset Controls")
285
+ # User Shot Type (Camera Angle)
286
+ current_ui_shot_type = st.session_state.story_treatment_scenes[i_main_loop].get('user_shot_type', DEFAULT_SHOT_TYPE) # Renamed
287
+ try: shot_type_idx_val = SHOT_TYPES_OPTIONS.index(current_ui_shot_type) # Renamed
288
+ except ValueError: shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
289
+ new_ui_shot_type = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=shot_type_idx_val, key=f"shot_type_widget_{key_base_for_scene}") # Renamed
290
+ if new_ui_shot_type != current_ui_shot_type: st.session_state.story_treatment_scenes[i_main_loop]['user_shot_type'] = new_ui_shot_type
291
+
292
+ # User Scene Duration
293
+ current_ui_duration = st.session_state.story_treatment_scenes[i_main_loop].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS) # Renamed
294
+ new_ui_duration = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=current_ui_duration, step=1, key=f"duration_widget_{key_base_for_scene}") # Renamed
295
+ if new_ui_duration != current_ui_duration: st.session_state.story_treatment_scenes[i_main_loop]['user_scene_duration_secs'] = new_ui_duration
296
+
297
+ # User Asset Type Selection
298
+ current_ui_asset_type = st.session_state.story_treatment_scenes[i_main_loop].get('user_selected_asset_type', "Auto (Director's Choice)") # Renamed
299
+ try: asset_type_idx_val = ASSET_TYPE_OPTIONS.index(current_ui_asset_type) # Renamed
300
+ except ValueError: asset_type_idx_val = 0
301
+ new_ui_asset_type = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=asset_type_idx_val, key=f"asset_type_sel_{key_base_for_scene}", help="Choose 'Image' or 'Video Clip'. 'Auto' uses Gemini's suggestion.") # Renamed
302
+ if new_ui_asset_type != current_ui_asset_type: st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] = new_ui_asset_type
303
+ st.markdown("---")
304
+
305
+ # Display generated prompt for the asset
306
+ prompt_for_current_asset = st.session_state.scene_generation_prompts[i_main_loop] if i_main_loop < len(st.session_state.scene_generation_prompts) else None # Renamed
307
+ if prompt_for_current_asset:
308
+ with st.popover("πŸ‘οΈ View Asset Generation Prompt"):
309
+ st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_current_asset, language='text')
310
+
311
+ pexels_query_val = scene_content_item.get('pexels_search_query_감독', None) # Renamed
312
+ if pexels_query_val: st.caption(f"Pexels Fallback Query: `{pexels_query_val}`")
313
+
314
+ with col_visual: # Visuals Column
315
+ asset_info_for_scene = st.session_state.generated_scene_assets_info[i_main_loop] if i_main_loop < len(st.session_state.generated_scene_assets_info) else None # Renamed
316
+ if asset_info_for_scene and not asset_info_for_scene.get('error') and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
317
+ path_to_asset_file = asset_info_for_scene['path'] # Renamed
318
+ type_of_asset_file = asset_info_for_scene.get('type', 'image') # Renamed
319
+ if type_of_asset_file == 'image': st.image(path_to_asset_file, caption=f"Scene {scene_num_val} ({type_of_asset_file}): {scene_title_val}")
320
+ elif type_of_asset_file == 'video':
321
  try:
322
+ with open(path_to_asset_file, 'rb') as vf_read: video_bytes_data = vf_read.read() # Renamed
323
+ st.video(video_bytes_data, format="video/mp4", start_time=0); st.caption(f"Scene {scene_num_val} ({type_of_asset_file}): {scene_title_val}")
324
+ except Exception as e_vid_display: st.error(f"Error displaying video {path_to_asset_file}: {e_vid_display}"); logger.error(f"Error displaying video: {e_vid_display}", exc_info=True) # Renamed
325
+ else: st.warning(f"Unknown asset type '{type_of_asset_file}' for Scene {scene_num_val}.")
326
+ else:
327
+ if st.session_state.story_treatment_scenes:
328
+ error_message_display = asset_info_for_scene.get('error_message', 'Visual pending or failed.') if asset_info_for_scene else 'Visual pending or failed.' # Renamed
329
+ st.caption(error_message_display)
330
+
331
+ # --- Popovers for Editing Scene Treatment & Visual Prompt ---
332
+ with st.popover(f"✏️ Edit S{scene_num_val} Treatment"):
333
+ feedback_for_treatment = st.text_area("Changes to treatment:", key=f"treat_fb_{key_base_for_scene}", height=150) # Renamed
334
+ if st.button(f"πŸ”„ Update S{scene_num_val} Treatment", key=f"regen_treat_btn_{key_base_for_scene}"):
335
+ if feedback_for_treatment:
336
+ with st.status(f"Updating S{scene_num_val} Treatment & Asset...", expanded=True) as status_treatment_regen: # Renamed
337
+ user_shot_type_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_shot_type'] # Renamed
338
+ user_duration_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_scene_duration_secs'] # Renamed
339
+ user_asset_type_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] # Renamed
340
+
341
+ regen_prompt_for_gemini = create_scene_regeneration_prompt(scene_content_item, feedback_for_treatment, st.session_state.story_treatment_scenes) # Renamed
342
+ try:
343
+ updated_scene_data_gemini = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_for_gemini) # Renamed
344
+ final_updated_scene_data = {**updated_scene_data_gemini} # Renamed
345
+ final_updated_scene_data['user_shot_type'] = user_shot_type_pref
346
+ final_updated_scene_data['user_scene_duration_secs'] = user_duration_pref
347
+ final_updated_scene_data['user_selected_asset_type'] = user_asset_type_pref
348
+ st.session_state.story_treatment_scenes[i_main_loop] = final_updated_scene_data
349
+ status_treatment_regen.update(label="Treatment updated! Regenerating asset...", state="running")
350
+
351
+ version_num_asset = 1 # Renamed
352
+ if asset_info_for_scene and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
353
+ try: base_fn_asset,_=os.path.splitext(os.path.basename(asset_info_for_scene['path'])); version_num_asset = int(base_fn_asset.split('_v')[-1])+1 if '_v' in base_fn_asset else 2 # Renamed
354
+ except: version_num_asset = 2
355
+
356
+ if generate_asset_for_scene_wrapper(i_main_loop, final_updated_scene_data, version_num=version_num_asset, user_selected_asset_type_override=user_asset_type_pref):
357
+ status_treatment_regen.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
358
+ else: status_treatment_regen.update(label="Treatment updated, asset failed.", state="complete", expanded=False)
359
+ st.rerun()
360
+ except Exception as e_treat_regen_main: status_treatment_regen.update(label=f"Error: {e_treat_regen_main}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_main}", exc_info=True) # Renamed
361
+ else: st.warning("Please provide feedback for treatment.")
362
+
363
+ with st.popover(f"🎨 Edit S{scene_num_val} Visual Prompt/Asset"):
364
+ current_gen_prompt_display = st.session_state.scene_generation_prompts[i_main_loop] if i_main_loop < len(st.session_state.scene_generation_prompts) else "No prompt generated yet." # Renamed
365
+ st.caption("Current Asset Generation Prompt:"); st.code(current_gen_prompt_display, language='text')
366
+ feedback_for_visual = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_{key_base_for_scene}", height=150) # Renamed
367
+ if st.button(f"πŸ”„ Update S{scene_num_val} Asset", key=f"regen_visual_btn_{key_base_for_scene}"):
368
+ if feedback_for_visual:
369
+ with st.status(f"Refining prompt & regenerating asset for S{scene_num_val}...", expanded=True) as status_visual_regen: # Renamed
370
+ user_asset_type_choice_visual = st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] # Renamed
371
+ is_video_asset_type = (user_asset_type_choice_visual == "Video Clip") or \
372
+ (user_asset_type_choice_visual == "Auto (Director's Choice)" and scene_content_item.get('suggested_asset_type_감독') == 'video_clip')
373
+
374
+ newly_constructed_asset_prompt = "" # Renamed
375
+ if not is_video_asset_type: # Refining an IMAGE prompt
376
+ gemini_refinement_prompt = create_visual_regeneration_prompt(current_gen_prompt_display, feedback_for_visual, scene_content_item, st.session_state.character_definitions, st.session_state.global_style_additions) # Renamed
377
+ try:
378
+ newly_constructed_asset_prompt = st.session_state.gemini_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt)
379
+ st.session_state.scene_generation_prompts[i_main_loop] = newly_constructed_asset_prompt
380
+ status_visual_regen.update(label="Image prompt refined by Gemini! Regenerating asset...", state="running")
381
+ except Exception as e_gemini_prompt_refine: status_visual_regen.update(label=f"Error refining prompt: {e_gemini_prompt_refine}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_prompt_refine}", exc_info=True); continue # Skip asset gen
382
+ else: # For VIDEO, reconstruct the motion prompt based on current scene data and feedback (feedback isn't directly used by construct_text_to_video_prompt_for_gen4 here, but scene_data might have changed)
383
+ # For video, feedback should ideally modify scene_content_item's motion description first, then reconstruct.
384
+ # Simple reconstruction for now:
385
+ logger.info(f"Reconstructing video motion prompt for S{scene_num_val} based on feedback (indirectly via scene_data). Feedback was: {feedback_for_visual}")
386
+ newly_constructed_asset_prompt = construct_text_to_video_prompt_for_gen4(scene_content_item, st.session_state.global_style_additions)
387
+ st.session_state.scene_generation_prompts[i_main_loop] = newly_constructed_asset_prompt
388
+ status_visual_regen.update(label="Video motion prompt reconstructed! Regenerating asset...", state="running")
389
+
390
+ if not newly_constructed_asset_prompt: status_visual_regen.update(label="Prompt construction failed.", state="error"); continue
391
+
392
+ version_num_visual_asset = 1 # Renamed
393
+ if asset_info_for_scene and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
394
+ try: base_fn_viz_asset,_=os.path.splitext(os.path.basename(asset_info_for_scene['path'])); version_num_visual_asset = int(base_fn_viz_asset.split('_v')[-1])+1 if '_v' in base_fn_viz_asset else 2 # Renamed
395
+ except: version_num_visual_asset = 2
396
+
397
+ if generate_asset_for_scene_wrapper(i_main_loop, st.session_state.story_treatment_scenes[i_main_loop], version_num=version_num_visual_asset, user_selected_asset_type_override=user_asset_type_choice_visual):
398
+ status_visual_regen.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
399
+ else: status_visual_regen.update(label="Prompt updated, asset regeneration failed.", state="complete", expanded=False)
400
+ st.rerun()
401
+ else: st.warning("Please provide feedback for visual asset.")
402
+ st.markdown("---")
403
+
404
+ # Video Assembly Button
405
+ if st.session_state.story_treatment_scenes and any(asset_info_item_loop and not asset_info_item_loop.get('error') and asset_info_item_loop.get('path') for asset_info_item_loop in st.session_state.generated_scene_assets_info if asset_info_item_loop is not None):
406
+ if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_ultra_video_btn_v5_main", type="primary", use_container_width=True): # Unique key
407
+ with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly: # Renamed
408
+ assets_for_final_video = [] # Renamed
409
+ for i_vid_assembly, scene_data_for_vid in enumerate(st.session_state.story_treatment_scenes): # Renamed
410
+ asset_info_current_scene = st.session_state.generated_scene_assets_info[i_vid_assembly] if i_vid_assembly < len(st.session_state.generated_scene_assets_info) else None # Renamed
411
+ if asset_info_current_scene and not asset_info_current_scene.get('error') and asset_info_current_scene.get('path') and os.path.exists(asset_info_current_scene['path']):
412
+ assets_for_final_video.append({
413
+ 'path': asset_info_current_scene['path'],
414
+ 'type': asset_info_current_scene.get('type', 'image'),
415
+ 'scene_num': scene_data_for_vid.get('scene_number', i_vid_assembly + 1),
416
+ 'key_action': scene_data_for_vid.get('key_plot_beat', ''),
417
+ 'duration': scene_data_for_vid.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
418
+ })
419
+ status_video_assembly.write(f"Adding S{scene_data_for_vid.get('scene_number', i_vid_assembly + 1)} ({asset_info_current_scene.get('type')}).")
420
+ else: logger.warning(f"Skipping S{scene_data_for_vid.get('scene_number', i_vid_assembly+1)} for video: No valid asset.")
421
+
422
+ if assets_for_final_video:
423
+ status_video_assembly.write("Calling video engine...");
424
+ st.session_state.video_path = st.session_state.visual_engine.assemble_animatic_from_assets(
425
+ asset_data_list=assets_for_final_video,
426
+ overall_narration_path=st.session_state.overall_narration_audio_path,
427
+ output_filename="cinegen_ultra_animatic.mp4", fps=24
428
+ )
429
+ if st.session_state.video_path and os.path.exists(st.session_state.video_path):
430
+ status_video_assembly.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
431
+ else: status_video_assembly.update(label="Video assembly failed. Check logs.", state="error", expanded=False); logger.error("Video assembly returned None or file does not exist.")
432
+ else: status_video_assembly.update(label="No valid assets for video assembly.", state="error", expanded=False); logger.warning("No valid assets found for video assembly.")
433
+ elif st.session_state.story_treatment_scenes: st.info("Generate visual assets before assembling the animatic.")
434
+
435
+ if st.session_state.video_path and os.path.exists(st.session_state.video_path):
436
+ st.header("🎬 Generated Cinematic Animatic");
437
  try:
438
+ with open(st.session_state.video_path, 'rb') as vf_obj_read: video_bytes_content = vf_obj_read.read() # Renamed
439
+ st.video(video_bytes_content, format="video/mp4")
440
+ st.download_button(label="Download Ultra Animatic", data=video_bytes_content, file_name=os.path.basename(st.session_state.video_path), mime="video/mp4", use_container_width=True, key="download_ultra_video_btn_v5_main_dl" ) # Unique key
441
+ except Exception as e_vid_final_display: st.error(f"Error displaying final video: {e_vid_final_display}"); logger.error(f"Error displaying final video: {e_vid_final_display}", exc_info=True) # Renamed
442
+
443
+ # --- Footer ---
444
+ st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")