mgbam commited on
Commit
610a011
·
verified ·
1 Parent(s): 3084a6c

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +321 -336
core/visual_engine.py CHANGED
@@ -1,7 +1,17 @@
1
  # core/visual_engine.py
2
  from PIL import Image, ImageDraw, ImageFont, ImageOps
3
- import base64
4
- import mimetypes
 
 
 
 
 
 
 
 
 
 
5
  import numpy as np
6
  import os
7
  import openai
@@ -11,311 +21,274 @@ import time
11
  import random
12
  import logging
13
 
14
- # --- MoviePy Imports ---
15
- from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
16
- CompositeVideoClip, AudioFileClip)
17
- import moviepy.video.fx.all as vfx
18
-
19
- # --- MONKEY PATCH for Pillow/MoviePy compatibility ---
20
- try:
21
- if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
22
- if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
23
- elif hasattr(Image, 'LANCZOS'): # Pillow 8
24
- if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
25
- elif not hasattr(Image, 'ANTIALIAS'):
26
- print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. MoviePy effects might fail or look different.")
27
- except Exception as e_monkey_patch:
28
- print(f"WARNING: An unexpected error occurred during Pillow ANTIALIAS monkey-patch: {e_monkey_patch}")
29
-
30
  logger = logging.getLogger(__name__)
31
- # logger.setLevel(logging.DEBUG) # Uncomment for very verbose debugging
32
 
33
- # --- External Service Client Imports ---
34
- ELEVENLABS_CLIENT_IMPORTED = False
35
- ElevenLabsAPIClient = None
36
- Voice = None
37
- VoiceSettings = None
38
  try:
39
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
40
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
41
- ElevenLabsAPIClient = ImportedElevenLabsClient
42
- Voice = ImportedVoice
43
- VoiceSettings = ImportedVoiceSettings
44
- ELEVENLABS_CLIENT_IMPORTED = True
45
- logger.info("ElevenLabs client components imported successfully.")
46
- except ImportError:
47
- logger.warning("ElevenLabs SDK not found (pip install elevenlabs). Audio generation will be disabled.")
48
- except Exception as e_eleven_import:
49
- logger.warning(f"Error importing ElevenLabs client components: {e_eleven_import}. Audio generation disabled.")
50
 
51
- RUNWAYML_SDK_IMPORTED = False
52
- RunwayMLAPIClient = None
53
  try:
54
- from runwayml import RunwayML as ImportedRunwayMLClient
55
- RunwayMLAPIClient = ImportedRunwayMLClient
56
- RUNWAYML_SDK_IMPORTED = True
57
- logger.info("RunwayML SDK imported successfully.")
58
- except ImportError:
59
- logger.warning("RunwayML SDK not found (pip install runwayml). RunwayML video generation will be disabled.")
60
- except Exception as e_runway_sdk_import:
61
- logger.warning(f"Error importing RunwayML SDK: {e_runway_sdk_import}. RunwayML features disabled.")
62
 
63
 
64
  class VisualEngine:
65
- DEFAULT_FONT_SIZE_PIL = 10
66
- PREFERRED_FONT_SIZE_PIL = 20
67
- VIDEO_OVERLAY_FONT_SIZE = 30
68
- VIDEO_OVERLAY_FONT_COLOR = 'white'
69
- DEFAULT_MOVIEPY_FONT = 'DejaVu-Sans-Bold'
70
- PREFERRED_MOVIEPY_FONT = 'Liberation-Sans-Bold'
71
-
72
  def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
73
  self.output_dir = output_dir
74
  os.makedirs(self.output_dir, exist_ok=True)
75
- self.font_filename_pil = "DejaVuSans-Bold.ttf"
76
- font_paths_to_try = [ self.font_filename_pil, f"/usr/share/fonts/truetype/dejavu/{self.font_filename_pil}", f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"]
77
- self.font_path_pil_resolved = next((p for p in font_paths_to_try if os.path.exists(p)), None)
78
- self.font_pil = ImageFont.load_default(); self.current_font_size_pil = self.DEFAULT_FONT_SIZE_PIL
79
- if self.font_path_pil_resolved:
80
- try: self.font_pil = ImageFont.truetype(self.font_path_pil_resolved, self.PREFERRED_FONT_SIZE_PIL); self.current_font_size_pil = self.PREFERRED_FONT_SIZE_PIL; logger.info(f"Pillow font: {self.font_path_pil_resolved} sz {self.current_font_size_pil}."); self.video_overlay_font = 'DejaVu-Sans-Bold' if "dejavu" in self.font_path_pil_resolved.lower() else ('Liberation-Sans-Bold' if "liberation" in self.font_path_pil_resolved.lower() else self.DEFAULT_MOVIEPY_FONT)
81
- except IOError as e_font_load: logger.error(f"Pillow font IOError '{self.font_path_pil_resolved}': {e_font_load}. Default.")
82
- else: logger.warning("Custom Pillow font not found. Default.")
 
 
83
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
84
  self.video_frame_size = (1280, 720)
85
  self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id
86
  if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
87
  else: self.elevenlabs_voice_settings = None
88
  self.pexels_api_key = None; self.USE_PEXELS = False
89
- self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_ml_client_instance = None
90
- if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient and os.getenv("RUNWAYML_API_SECRET"):
91
- try: self.runway_ml_client_instance = RunwayMLAPIClient(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init from env var at startup.")
92
- except Exception as e_runway_init_startup: logger.error(f"Initial RunwayML client init failed: {e_runway_init_startup}"); self.USE_RUNWAYML = False
93
  logger.info("VisualEngine initialized.")
94
 
95
- def set_openai_api_key(self, api_key): self.openai_api_key = api_key; self.USE_AI_IMAGE_GENERATION = bool(api_key); logger.info(f"DALL-E status: {'Ready' if self.USE_AI_IMAGE_GENERATION else 'Disabled'}")
96
- def set_elevenlabs_api_key(self, api_key, voice_id_from_secret=None):
97
- self.elevenlabs_api_key = api_key
 
98
  if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
99
  if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
100
- try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS = bool(self.elevenlabs_client); logger.info(f"11L Client: {'Ready' if self.USE_ELEVENLABS else 'Failed'} (Voice: {self.elevenlabs_voice_id})")
101
- except Exception as e: logger.error(f"11L client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client=None
102
- else: self.USE_ELEVENLABS = False; logger.info(f"11L Disabled (key/SDK).")
103
- def set_pexels_api_key(self, api_key): self.pexels_api_key = api_key; self.USE_PEXELS = bool(api_key); logger.info(f"Pexels status: {'Ready' if self.USE_PEXELS else 'Disabled'}")
104
- def set_runway_api_key(self, api_key):
105
- self.runway_api_key = api_key
106
- if api_key:
107
- if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
108
- if not self.runway_ml_client_instance:
109
- try:
110
- original_env_secret = os.getenv("RUNWAYML_API_SECRET")
111
- if not original_env_secret: os.environ["RUNWAYML_API_SECRET"] = api_key; logger.info("Temp set RUNWAYML_API_SECRET for SDK.")
112
- self.runway_ml_client_instance = RunwayMLAPIClient(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init via set_runway_api_key.")
113
- if not original_env_secret: del os.environ["RUNWAYML_API_SECRET"]; logger.info("Cleared temp RUNWAYML_API_SECRET.")
114
- except Exception as e: logger.error(f"RunwayML Client init in set_runway_api_key fail: {e}", exc_info=True); self.USE_RUNWAYML=False;self.runway_ml_client_instance=None
115
- else: self.USE_RUNWAYML = True; logger.info("RunwayML Client already init.")
116
- else: logger.warning("RunwayML SDK not imported. Service disabled."); self.USE_RUNWAYML = False
117
- else: self.USE_RUNWAYML = False; self.runway_ml_client_instance = None; logger.info("RunwayML Disabled (no API key).")
118
 
119
- def _image_to_data_uri(self, image_path):
120
- # (Implementation from before)
121
- try: mime_type,_=mimetypes.guess_type(image_path)
122
- if not mime_type:ext=os.path.splitext(image_path)[1].lower();mime_map={".png":"image/png",".jpg":"image/jpeg",".jpeg":"image/jpeg"};mime_type=mime_map.get(ext,"application/octet-stream");
123
- if mime_type=="application/octet-stream":logger.warning(f"Unknown MIME for {image_path}, using {mime_type}.")
124
- with open(image_path,"rb")as image_file:encoded_string=base64.b64encode(image_file.read()).decode('utf-8')
125
- data_uri=f"data:{mime_type};base64,{encoded_string}";logger.debug(f"Data URI for {os.path.basename(image_path)} (start): {data_uri[:100]}...");return data_uri
126
- except FileNotFoundError:logger.error(f"Img not found {image_path} for data URI.");return None
127
- except Exception as e:logger.error(f"Error converting {image_path} to data URI:{e}",exc_info=True);return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- def _map_resolution_to_runway_ratio(self, width, height):
130
- # (Implementation from before)
131
- ratio_str=f"{width}:{height}";supported_ratios_gen4=["1280:720","720:1280","1104:832","832:1104","960:960","1584:672"];
132
- if ratio_str in supported_ratios_gen4:return ratio_str
133
- logger.warning(f"Res {ratio_str} not in Gen-4 list. Default 1280:720.");return "1280:720"
134
-
135
- def _get_text_dimensions(self, text_content, font_object):
136
- # (Implementation from before)
137
- dch=getattr(font_object,'size',self.current_font_size_pil);
138
- if not text_content:return 0,dch
139
- try:
140
- if hasattr(font_object,'getbbox'):bb=font_object.getbbox(text_content);w=bb[2]-bb[0];h=bb[3]-bb[1];return w,h if h>0 else dch
141
- elif hasattr(font_object,'getsize'):w,h=font_object.getsize(text_content);return w,h if h>0 else dch
142
- else:return int(len(text_content)*dch*0.6),int(dch*1.2)
143
- except Exception as e:logger.warning(f"Error in _get_text_dimensions:{e}");return int(len(text_content)*self.current_font_size_pil*0.6),int(self.current_font_size_pil*1.2)
144
 
145
- def _create_placeholder_image_content(self,text_description,filename,size=None):
146
- # <<< CORRECTED VERSION OF THIS METHOD >>>
147
- if size is None: size = self.video_frame_size
148
- img = Image.new('RGB', size, color=(20, 20, 40)); d = ImageDraw.Draw(img); padding = 25
149
- max_w = size[0] - (2 * padding); lines = []
150
- if not text_description: text_description = "(Placeholder Image)"
151
- words = text_description.split(); current_line_text = ""
152
- for word_idx, word in enumerate(words):
153
- prospective_addition = word + (" " if word_idx < len(words) - 1 else "")
154
- test_line_text = current_line_text + prospective_addition
155
- current_w, _ = self._get_text_dimensions(test_line_text, self.font_pil)
156
- if current_w == 0 and test_line_text.strip(): current_w = len(test_line_text) * (self.current_font_size_pil * 0.6)
157
 
158
- if current_w <= max_w: current_line_text = test_line_text
159
- else:
160
- if current_line_text.strip(): lines.append(current_line_text.strip())
161
- current_line_text = prospective_addition
162
- if current_line_text.strip(): lines.append(current_line_text.strip())
163
 
164
- if not lines and text_description:
165
- avg_char_w, _ = self._get_text_dimensions("W", self.font_pil); avg_char_w = avg_char_w or (self.current_font_size_pil * 0.6)
166
- chars_per_line = int(max_w / avg_char_w) if avg_char_w > 0 else 20
167
- lines.append(text_description[:chars_per_line] + ("..." if len(text_description) > chars_per_line else ""))
168
- elif not lines: lines.append("(Placeholder Error)")
169
 
170
- _, single_line_h = self._get_text_dimensions("Ay", self.font_pil); single_line_h = single_line_h if single_line_h > 0 else self.current_font_size_pil + 2
171
- max_lines = min(len(lines), (size[1] - (2 * padding)) // (single_line_h + 2)) if single_line_h > 0 else 1
172
- max_lines = max(1, max_lines)
173
-
174
- y_pos = padding + (size[1] - (2 * padding) - max_lines * (single_line_h + 2)) / 2.0
175
- for i in range(max_lines):
176
- line_text = lines[i]; line_w, _ = self._get_text_dimensions(line_text, self.font_pil)
177
- if line_w == 0 and line_text.strip(): line_w = len(line_text) * (self.current_font_size_pil * 0.6)
178
- x_pos = (size[0] - line_w) / 2.0
179
- try: d.text((x_pos, y_pos), line_text, font=self.font_pil, fill=(200, 200, 180))
180
- except Exception as e_draw: logger.error(f"Pillow d.text error: {e_draw} for '{line_text}'")
181
- y_pos += single_line_h + 2
182
- if i == 6 and max_lines > 7:
183
- try: d.text((x_pos, y_pos), "...", font=self.font_pil, fill=(200, 200, 180))
184
- except Exception as e_elip: logger.error(f"Pillow d.text ellipsis error: {e_elip}"); break
185
- filepath = os.path.join(self.output_dir, filename)
186
- try: img.save(filepath); return filepath
187
- except Exception as e_save: logger.error(f"Saving placeholder image '{filepath}' error: {e_save}", exc_info=True); return None
 
 
 
 
 
 
 
 
 
188
 
189
- def _search_pexels_image(self, query, output_filename_base):
190
- # <<< CORRECTED VERSION OF THIS METHOD >>>
191
- if not self.USE_PEXELS or not self.pexels_api_key: return None
192
- headers = {"Authorization": self.pexels_api_key}
193
- params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"}
194
- base_name_for_pexels, _ = os.path.splitext(output_filename_base)
195
- pexels_filename = base_name_for_pexels + f"_pexels_{random.randint(1000,9999)}.jpg"
196
- filepath = os.path.join(self.output_dir, pexels_filename)
197
  try:
198
- logger.info(f"Pexels: Searching for '{query}'")
199
- effective_query = " ".join(query.split()[:5])
200
- params["query"] = effective_query
201
- response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
202
- response.raise_for_status()
203
- data = response.json()
204
- if data.get("photos") and len(data["photos"]) > 0:
205
- photo_details = data["photos"][0]
206
- photo_url = photo_details.get("src", {}).get("large2x")
207
- if not photo_url: logger.warning(f"Pexels: 'large2x' URL missing for '{effective_query}'. Details: {photo_details}"); return None
208
- image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
209
- img_data_pil = Image.open(io.BytesIO(image_response.content))
210
- if img_data_pil.mode != 'RGB': img_data_pil = img_data_pil.convert('RGB')
211
- img_data_pil.save(filepath); logger.info(f"Pexels: Image saved to {filepath}"); return filepath
212
- else: logger.info(f"Pexels: No photos for '{effective_query}'."); return None
213
- except requests.exceptions.RequestException as e_req: logger.error(f"Pexels: RequestException for '{query}': {e_req}", exc_info=False); return None
214
- except Exception as e: logger.error(f"Pexels: General error for '{query}': {e}", exc_info=True); return None
215
 
216
- def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
217
- # (Implementation from previous response, with Runway SDK calls)
218
- if not self.USE_RUNWAYML or not self.runway_ml_client_instance: logger.warning("RunwayML not enabled/client not init. Skip video."); return None
219
- if not input_image_path or not os.path.exists(input_image_path): logger.error(f"Runway Gen-4 needs input image. Path invalid: {input_image_path}"); return None
220
- image_data_uri = self._image_to_data_uri(input_image_path)
221
- if not image_data_uri: return None
222
- runway_duration = 10 if target_duration_seconds >= 8 else 5
223
- runway_ratio_str = self._map_resolution_to_runway_ratio(self.video_frame_size[0], self.video_frame_size[1])
224
- base_name_runway, _ = os.path.splitext(scene_identifier_filename_base); output_video_filename = base_name_runway + f"_runway_gen4_d{runway_duration}s.mp4" # Corrected base name usage
225
- output_video_filepath = os.path.join(self.output_dir, output_video_filename)
226
- logger.info(f"Runway Gen-4 task: motion='{text_prompt_for_motion[:100]}...', img='{os.path.basename(input_image_path)}', dur={runway_duration}s, ratio='{runway_ratio_str}'")
227
  try:
228
- task_submission = self.runway_ml_client_instance.image_to_video.create(model='gen4_turbo', prompt_image=image_data_uri, prompt_text=text_prompt_for_motion, duration=runway_duration, ratio=runway_ratio_str)
229
- task_id = task_submission.id; logger.info(f"Runway Gen-4 task ID: {task_id}. Polling...")
230
- poll_interval=10; max_polls=36; start_poll_time = time.time()
231
- while time.time() - start_poll_time < max_polls * poll_interval:
232
- time.sleep(poll_interval); task_details = self.runway_ml_client_instance.tasks.retrieve(id=task_id)
233
- logger.info(f"Runway task {task_id} status: {task_details.status}")
234
- if task_details.status == 'SUCCEEDED':
235
- output_url = getattr(getattr(task_details,'output',None),'url',None) or (getattr(task_details,'artifacts',None) and task_details.artifacts[0].url if task_details.artifacts and hasattr(task_details.artifacts[0],'url') else None) or (getattr(task_details,'artifacts',None) and task_details.artifacts[0].download_url if task_details.artifacts and hasattr(task_details.artifacts[0],'download_url') else None)
236
- if not output_url: logger.error(f"Runway task {task_id} SUCCEEDED, but no output URL. Details: {vars(task_details) if hasattr(task_details,'__dict__') else task_details}"); return None
237
- logger.info(f"Runway task {task_id} SUCCEEDED. Downloading: {output_url}")
238
- video_response = requests.get(output_url, stream=True, timeout=300); video_response.raise_for_status()
239
- with open(output_video_filepath,'wb') as f:
240
- for chunk in video_response.iter_content(chunk_size=8192): f.write(chunk)
241
- logger.info(f"Runway Gen-4 video saved: {output_video_filepath}"); return output_video_filepath
242
- elif task_details.status in ['FAILED','ABORTED','ERROR']:
243
- em = getattr(task_details,'error_message',None) or getattr(getattr(task_details,'output',None),'error',"Unknown Runway error.")
244
- logger.error(f"Runway task {task_id} status: {task_details.status}. Error: {em}"); return None
245
- logger.warning(f"Runway task {task_id} timed out."); return None
246
- except AttributeError as ae: logger.error(f"RunwayML SDK AttrError: {ae}. SDK/methods changed?", exc_info=True); return None
247
- except Exception as e: logger.error(f"Runway Gen-4 API error: {e}", exc_info=True); return None
248
-
249
- def _create_placeholder_video_content(self, td, fn, dur=4, sz=None):
250
- # (Keep as before)
251
- if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
252
- try: tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur); tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
253
  except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
254
  finally:
255
  if tc and hasattr(tc, 'close'): tc.close()
256
 
257
- def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
 
 
258
  scene_data, scene_identifier_filename_base,
259
  generate_as_video_clip=False, runway_target_duration=5):
260
- # <<< THIS IS THE CORRECTED METHOD with fixed DALL-E loop >>>
261
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
262
- asset_info = {'path': None, 'type': 'none', 'error': True,
263
- 'prompt_used': image_generation_prompt_text,
264
- 'error_message': 'Asset generation init failed'}
 
265
  input_image_for_runway_path = None
266
- base_image_filename = base_name + ("_base_for_video.png" if generate_as_video_clip else ".png")
267
- base_image_filepath = os.path.join(self.output_dir, base_image_filename)
268
-
 
269
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
270
- max_retries = 2; attempt_count_dalle = 0
271
- for attempt_num_dalle in range(max_retries):
272
- attempt_count_dalle = attempt_num_dalle + 1
273
- try: # DALL-E attempt try block
274
- logger.info(f"Attempt {attempt_count_dalle} DALL-E (base img): {image_generation_prompt_text[:70]}...")
275
- client_oai = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
276
- response_oai = client_oai.images.generate(model=self.dalle_model,prompt=image_generation_prompt_text,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid")
277
- img_url_oai = response_oai.data[0].url
278
- revised_prompt_oai = getattr(response_oai.data[0],'revised_prompt',None)
279
- if revised_prompt_oai: logger.info(f"DALL-E revised: {revised_prompt_oai[:70]}...")
280
- img_response_get = requests.get(img_url_oai,timeout=120); img_response_get.raise_for_status()
281
- pil_img_oai = Image.open(io.BytesIO(img_response_get.content))
282
- if pil_img_oai.mode!='RGB': pil_img_oai=pil_img_oai.convert('RGB')
283
- pil_img_oai.save(base_image_filepath); logger.info(f"DALL-E base img saved: {base_image_filepath}")
284
- input_image_for_runway_path=base_image_filepath
285
- asset_info={'path':base_image_filepath,'type':'image','error':False,'prompt_used':image_generation_prompt_text,'revised_prompt':revised_prompt_oai}
286
- break # Success, exit loop
287
- except openai.RateLimitError as e_rl: logger.warning(f"OpenAI RateLimit Att {attempt_count_dalle}:{e_rl}.Retry...");time.sleep(5*attempt_count_dalle);asset_info['error_message']=str(e_rl)
288
- except openai.APIError as e_api_oai: logger.error(f"OpenAI APIError Att {attempt_count_dalle}:{e_api_oai}");asset_info['error_message']=str(e_api_oai);break
289
- except requests.exceptions.RequestException as e_req_oai: logger.error(f"Requests Err DALL-E Att {attempt_count_dalle}:{e_req_oai}");asset_info['error_message']=str(e_req_oai);break
290
- except Exception as e_gen_oai: logger.error(f"General DALL-E Err Att {attempt_count_dalle}:{e_gen_oai}",exc_info=True);asset_info['error_message']=str(e_gen_oai);break
291
- if asset_info['error']: logger.warning(f"DALL-E failed after {attempt_count_dalle} attempts for base img.")
292
 
293
- if asset_info['error'] and self.USE_PEXELS:
294
- logger.info("Trying Pexels for base img.");pqt=scene_data.get('pexels_search_query_감독',f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}");pp=self._search_pexels_image(pqt,base_image_filename);
295
- if pp:input_image_for_runway_path=pp;asset_info={'path':pp,'type':'image','error':False,'prompt_used':f"Pexels:{pqt}"}
296
- else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Pexels failed for base.").strip()
297
-
298
- if asset_info['error']:
299
- logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");ppt=asset_info.get('prompt_used',image_generation_prompt_text);php=self._create_placeholder_image_content(f"[Base Placeholder]{ppt[:70]}...",base_image_filename);
300
- if php:input_image_for_runway_path=php;asset_info={'path':php,'type':'image','error':False,'prompt_used':ppt}
301
- else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Base placeholder failed.").strip()
302
 
 
 
 
 
 
 
 
 
303
  if generate_as_video_clip:
304
- if not input_image_for_runway_path:logger.error("RunwayML video: base img failed.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"")+" Base img miss, Runway abort.").strip();asset_info['type']='none';return asset_info
305
- if self.USE_RUNWAYML:
306
- video_path=self._generate_video_clip_with_runwayml(motion_prompt_text_for_video,input_image_for_runway_path,base_name,runway_target_duration)
307
- if video_path and os.path.exists(video_path):asset_info={'path':video_path,'type':'video','error':False,'prompt_used':motion_prompt_text_for_video,'base_image_path':input_image_for_runway_path}
308
- else:logger.warning(f"RunwayML video failed for {base_name}. Fallback to base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
309
- else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
310
- return asset_info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
 
312
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
313
- # (Keep as before)
314
  if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename)
315
  try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None
316
- if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
317
- elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("Using 11L .generate_stream()")
318
- elif hasattr(self.elevenlabs_client,'generate'):logger.info("Using 11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=text_to_narrate,voice=vp,model="eleven_multilingual_v2");
319
  with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
320
  else:logger.error("No 11L audio method.");return None
321
  if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
@@ -325,102 +298,114 @@ class VisualEngine:
325
  else:vps["voice_settings"]=self.elevenlabs_voice_settings
326
  adi=asm(text=text_to_narrate,model_id="eleven_multilingual_v2",**vps)
327
  with open(afp,"wb")as f:
328
- for c_chunk in adi: # Renamed c to c_chunk
329
- if c_chunk:f.write(c_chunk)
330
  logger.info(f"11L audio (stream): {afp}");return afp
331
- except Exception as e_11l:logger.error(f"11L audio error: {e_11l}",exc_info=True);return None # Renamed e to e_11l
332
 
 
333
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
334
- # (Keep as in the version with robust image processing, C-contiguous array, debug saves, and pix_fmt)
335
  if not asset_data_list: logger.warning("No assets for animatic."); return None
336
- processed_clips = []; narration_clip_mvpy = None; final_composite_video_clip = None # Renamed variables
337
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
338
- for i, asset_info_dict in enumerate(asset_data_list): # Renamed asset_info to asset_info_dict
339
- asset_p, asset_t, scene_d = asset_info_dict.get('path'), asset_info_dict.get('type'), asset_info_dict.get('duration', 4.5)
340
- scene_n, key_act = asset_info_dict.get('scene_num', i + 1), asset_info_dict.get('key_action', '')
341
- logger.info(f"S{scene_n}: Path='{asset_p}', Type='{asset_t}', Dur='{scene_d}'s")
342
- if not (asset_p and os.path.exists(asset_p)): logger.warning(f"S{scene_n}: Not found '{asset_p}'. Skip."); continue
343
- if scene_d <= 0: logger.warning(f"S{scene_n}: Invalid duration ({scene_d}s). Skip."); continue
344
- current_scene_clip_mvpy = None # Renamed current_scene_mvpy_clip
 
 
 
345
  try:
346
- if asset_t == 'image':
347
- # ... (Robust image processing logic from previous full version) ...
348
- pil_img_opened = Image.open(asset_p); logger.debug(f"S{scene_n}: Loaded img. Mode:{pil_img_opened.mode}, Size:{pil_img_opened.size}")
349
- img_rgba_converted = pil_img_opened.convert('RGBA') if pil_img_opened.mode != 'RGBA' else pil_img_opened.copy()
350
- thumb_img = img_rgba_converted.copy(); res_filter = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumb_img.thumbnail(self.video_frame_size,res_filter)
351
- canvas_for_rgba = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); x_offset,y_offset=(self.video_frame_size[0]-thumb_img.width)//2,(self.video_frame_size[1]-thumb_img.height)//2
352
- canvas_for_rgba.paste(thumb_img,(x_offset,y_offset),thumb_img)
353
- final_rgb_for_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_for_pil.paste(canvas_for_rgba,mask=canvas_for_rgba.split()[3])
354
- debug_path_pre_numpy = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_n}.png"); final_rgb_for_pil.save(debug_path_pre_numpy); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_n} to {debug_path_pre_numpy}")
355
- numpy_frame = np.array(final_rgb_for_pil,dtype=np.uint8);
356
- if not numpy_frame.flags['C_CONTIGUOUS']: numpy_frame=np.ascontiguousarray(numpy_frame,dtype=np.uint8)
357
- logger.debug(f"S{scene_n}: NumPy for MoviePy. Shape:{numpy_frame.shape}, DType:{numpy_frame.dtype}, C-Contig:{numpy_frame.flags['C_CONTIGUOUS']}")
358
- if numpy_frame.size==0 or numpy_frame.ndim!=3 or numpy_frame.shape[2]!=3: logger.error(f"S{scene_n}: Invalid NumPy. Skip."); continue
359
- image_clip_base = ImageClip(numpy_frame,transparent=False).set_duration(scene_d)
360
- moviepy_debug_frame_save_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_n}.png"); image_clip_base.save_frame(moviepy_debug_frame_save_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_n} to {moviepy_debug_frame_save_path}")
361
- image_clip_with_fx = image_clip_base
362
- try: end_scale_kb=random.uniform(1.03,1.08); image_clip_with_fx=image_clip_base.fx(vfx.resize,lambda time_t:1+(end_scale_kb-1)*(time_t/scene_d) if scene_d>0 else 1).set_position('center')
363
- except Exception as e_kb: logger.error(f"S{scene_n} Ken Burns error: {e_kb}",exc_info=False)
364
- current_scene_mvpy_clip = image_clip_with_fx
365
- elif asset_t == 'video':
366
- # ... (Video processing logic from previous full version) ...
367
- source_video_file_clip=None
 
 
 
 
 
368
  try:
369
- source_video_file_clip=VideoFileClip(asset_p,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
370
- temp_video_clip_obj=source_video_file_clip
371
- if source_video_file_clip.duration!=scene_d:
372
- if source_video_file_clip.duration>scene_d:temp_video_clip_obj=source_video_file_clip.subclip(0,scene_d)
373
  else:
374
- if scene_d/source_video_file_clip.duration > 1.5 and source_video_file_clip.duration>0.1:temp_video_clip_obj=source_video_file_clip.loop(duration=scene_d)
375
- else:temp_video_clip_obj=source_video_file_clip.set_duration(source_video_file_clip.duration);logger.info(f"S{scene_n} Video clip ({source_video_file_clip.duration:.2f}s) shorter than target ({scene_d:.2f}s).")
376
- current_scene_mvpy_clip=temp_video_clip_obj.set_duration(scene_d)
377
  if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
378
- except Exception as e_vidload:logger.error(f"S{scene_n} Video load error '{asset_p}':{e_vidload}",exc_info=True);continue
379
  finally:
380
- if source_video_file_clip and source_video_file_clip is not current_scene_mvpy_clip and hasattr(source_video_file_clip,'close'):source_video_file_clip.close()
381
- else: logger.warning(f"S{scene_n} Unknown asset type '{asset_t}'. Skip."); continue
382
 
383
- if current_scene_mvpy_clip and key_act: # Text Overlay
384
  try:
385
- text_overlay_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
386
- text_overlay_s_time=0.25
387
- if text_overlay_dur > 0:
388
- text_clip_obj=TextClip(f"Scene {scene_n}\n{key_act}",fontsize=self.VIDEO_OVERLAY_FONT_SIZE,color=self.VIDEO_OVERLAY_FONT_COLOR,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(text_overlay_dur).set_start(text_overlay_s_time).set_position(('center',0.92),relative=True)
389
- current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,text_clip_obj],size=self.video_frame_size,use_bgclip=True)
390
- else: logger.warning(f"S{scene_n}: Text overlay duration zero. Skip text.")
391
- except Exception as e_txtclip:logger.error(f"S{scene_n} TextClip error:{e_txtclip}. No text.",exc_info=True)
392
- if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_n} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
393
- except Exception as e_asset_loop:logger.error(f"MAJOR Error S{scene_n} ({asset_p}):{e_asset_loop}",exc_info=True)
394
  finally:
395
  if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
396
- try: current_scene_mvpy_clip.close()
397
- except: pass
398
 
399
  if not processed_clips:logger.warning("No clips processed. Abort.");return None
400
- transition_val=0.75
401
  try:
402
  logger.info(f"Concatenating {len(processed_clips)} clips.");
403
- if len(processed_clips)>1:final_composite_video_clip=concatenate_videoclips(processed_clips,padding=-transition_val if transition_val>0 else 0,method="compose")
404
- elif processed_clips:final_composite_video_clip=processed_clips[0]
405
- if not final_composite_video_clip:logger.error("Concatenation failed.");return None
406
- logger.info(f"Concatenated dur:{final_composite_video_clip.duration:.2f}s")
407
- if transition_val>0 and final_composite_video_clip.duration>0:
408
- if final_composite_video_clip.duration>transition_val*2:final_composite_video_clip=final_composite_video_clip.fx(vfx.fadein,transition_val).fx(vfx.fadeout,transition_val)
409
- else:final_composite_video_clip=final_composite_video_clip.fx(vfx.fadein,min(transition_val,final_composite_video_clip.duration/2.0))
410
- if overall_narration_path and os.path.exists(overall_narration_path) and final_composite_video_clip.duration>0:
411
- try:narration_clip_mvpy=AudioFileClip(overall_narration_path);final_composite_video_clip=final_composite_video_clip.set_audio(narration_clip_mvpy);logger.info("Narration added.")
412
- except Exception as e_narr:logger.error(f"Narration add error:{e_narr}",exc_info=True)
413
- elif final_composite_video_clip.duration<=0:logger.warning("Video no duration. No audio.")
414
- if final_composite_video_clip and final_composite_video_clip.duration>0:
415
- output_vid_path=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{output_vid_path} (Dur:{final_composite_video_clip.duration:.2f}s)")
416
- final_composite_video_clip.write_videofile(output_vid_path,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
417
- logger.info(f"Video created:{output_vid_path}");return output_vid_path
418
  else:logger.error("Final clip invalid. No write.");return None
419
- except Exception as e_vid_write:logger.error(f"Video write error:{e_vid_write}",exc_info=True);return None
420
  finally:
421
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
422
- all_clips_to_close_list = processed_clips + ([narration_clip_mvpy] if narration_clip_mvpy else []) + ([final_composite_video_clip] if final_composite_video_clip else [])
423
- for clip_to_close_item in all_clips_to_close_list:
424
- if clip_to_close_item and hasattr(clip_to_close_item, 'close'):
425
- try: clip_to_close_item.close()
426
- except Exception as e_final_close: logger.warning(f"Ignoring error while closing a clip: {type(clip_to_close_item).__name__} - {e_final_close}")
 
 
 
 
 
 
 
1
  # core/visual_engine.py
2
  from PIL import Image, ImageDraw, ImageFont, ImageOps
3
+ # --- MONKEY PATCH ---
4
+ try:
5
+ if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'):
6
+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
7
+ elif hasattr(Image, 'LANCZOS'):
8
+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
9
+ elif not hasattr(Image, 'ANTIALIAS'): print("WARNING: Pillow ANTIALIAS/Resampling issue.")
10
+ except Exception as e_mp: print(f"WARNING: ANTIALIAS patch error: {e_mp}")
11
+
12
+ from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
13
+ CompositeVideoClip, AudioFileClip)
14
+ import moviepy.video.fx.all as vfx
15
  import numpy as np
16
  import os
17
  import openai
 
21
  import random
22
  import logging
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  logger = logging.getLogger(__name__)
25
+ logger.setLevel(logging.INFO)
26
 
27
+ # --- SERVICE CLIENT IMPORTS (Keep as before) ---
28
+ ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
 
 
 
29
  try:
30
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
31
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
32
+ ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
33
+ ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
34
+ except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
 
 
 
 
 
 
35
 
36
+ RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None # Placeholder
 
37
  try:
38
+ # from runwayml import RunwayClient # Hypothetical actual import
39
+ # RunwayMLClient = RunwayClient
40
+ # RUNWAYML_SDK_IMPORTED = True
41
+ logger.info("RunwayML SDK import is a placeholder.")
42
+ except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
43
+ except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
 
 
44
 
45
 
46
  class VisualEngine:
 
 
 
 
 
 
 
47
  def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
48
  self.output_dir = output_dir
49
  os.makedirs(self.output_dir, exist_ok=True)
50
+ self.font_filename = "DejaVuSans-Bold.ttf"
51
+ font_paths_to_try = [ self.font_filename, "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", "/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", "/System/Library/Fonts/Supplemental/Arial.ttf", "C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"]
52
+ self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
53
+ self.font_size_pil = 20; self.video_overlay_font_size = 30; self.video_overlay_font_color = 'white'
54
+ self.video_overlay_font = 'DejaVu-Sans-Bold'
55
+ try:
56
+ self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
57
+ if self.font_path_pil: logger.info(f"Pillow font: {self.font_path_pil}.")
58
+ else: logger.warning("Default Pillow font."); self.font_size_pil = 10
59
+ except IOError as e_font: logger.error(f"Pillow font IOError: {e_font}. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
60
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
61
  self.video_frame_size = (1280, 720)
62
  self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id
63
  if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
64
  else: self.elevenlabs_voice_settings = None
65
  self.pexels_api_key = None; self.USE_PEXELS = False
66
+ self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None # Placeholder client
 
 
 
67
  logger.info("VisualEngine initialized.")
68
 
69
+ # --- API Key Setters (Keep as before) ---
70
+ def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
71
+ def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
72
+ self.elevenlabs_api_key=api_key
73
  if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
74
  if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
75
+ try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
76
+ except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
77
+ else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
78
+ def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
79
+ def set_runway_api_key(self, k): # For RunwayML
80
+ self.runway_api_key = k
81
+ if k: # For Gen-4, we might not need an SDK client if using direct HTTP, or an SDK client might be initialized here
82
+ # if RUNWAYML_SDK_IMPORTED and RunwayMLClient:
83
+ # try:
84
+ # # self.runway_client = RunwayMLClient(api_key=k) # Actual SDK client init
85
+ # self.USE_RUNWAYML = True; logger.info("RunwayML Client (Placeholder SDK) Ready.")
86
+ # except Exception as e: logger.error(f"RunwayML client init error: {e}", exc_info=True); self.USE_RUNWAYML = False
87
+ # else: # No SDK, or direct HTTP calls are planned
88
+ self.USE_RUNWAYML = True; logger.info("RunwayML API Key set. (SDK integration is placeholder).")
89
+ else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
 
 
 
90
 
91
+ # --- Helper Methods _get_text_dimensions, _create_placeholder_image_content, _search_pexels_image (Keep as before) ---
92
+ def _get_text_dimensions(self,tc,fo): di=fo.size if hasattr(fo,'size') else self.font_size_pil; return (0,di) if not tc else (lambda b:(b[2]-b[0],b[3]-b[1] if b[3]-b[1]>0 else di))(fo.getbbox(tc)) if hasattr(fo,'getbbox') else (lambda s:(s[0],s[1] if s[1]>0 else di))(fo.getsize(tc)) if hasattr(fo,'getsize') else (int(len(tc)*di*0.6),int(di*1.2))
93
+ def _create_placeholder_image_content(self,td,fn,sz=None):
94
+ if sz is None: sz = self.video_frame_size; img=Image.new('RGB',sz,color=(20,20,40));d=ImageDraw.Draw(img);pd=25;mw=sz[0]-(2*pd);ls=[];
95
+ if not td: td="(Placeholder Image)"
96
+ ws=td.split();cl=""
97
+ for w in ws: tl=cl+w+" ";raw_w,_=self._get_text_dimensions(tl,self.font);w=raw_w if raw_w > 0 else len(tl)*(self.font_size_pil*0.6);
98
+ if w<=mw:cl=tl;else:
99
+ if cl:ls.append(cl.strip());cl=w+" "
100
+ if cl.strip():ls.append(cl.strip())
101
+ if not ls and td:ls.append(td[:int(mw//(self._get_text_dimensions("A",self.font)[0]or 10))]+"..." if td else "(Text too long)");elif not ls:ls.append("(Placeholder Error)")
102
+ _,slh=self._get_text_dimensions("Ay",self.font);slh=slh if slh>0 else self.font_size_pil+2;mld=min(len(ls),(sz[1]-(2*pd))//(slh+2)) if slh>0 else 1;
103
+ if mld<=0:mld=1;yts=pd+(sz[1]-(2*pd)-mld*(slh+2))/2.0;yt=yts
104
+ for i in range(mld):lc=ls[i];lw,_=self._get_text_dimensions(lc,self.font);xt=(sz[0]-lw)/2.0;d.text((xt,yt),lc,font=self.font,fill=(200,200,180));yt+=slh+2
105
+ if i==6 and mld>7:d.text((xt,yt),"...",font=self.font,fill=(200,200,180));break
106
+ fp=os.path.join(self.output_dir,fn);
107
+ try:img.save(fp);return fp
108
+ except Exception as e:logger.error(f"Save placeholder img {fp}: {e}",exc_info=True);return None
109
+ def _search_pexels_image(self, q, ofnb):
110
+ if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"}
111
+ pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn)
112
+ try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20)
113
+ r.raise_for_status();d=r.json()
114
+ if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();id=Image.open(io.BytesIO(ir.content))
115
+ if id.mode!='RGB':id=id.convert('RGB');id.save(fp);logger.info(f"Pexels saved: {fp}");return fp
116
+ else: logger.info(f"No Pexels for: '{eq}'")
117
+ except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None
118
 
119
+ # --- RunwayML Video Generation (Gen-4 Aligned Placeholder) ---
120
+ def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
121
+ """
122
+ Placeholder for Runway Gen-4. Requires an input image and a text prompt for motion.
123
+ target_duration_seconds should ideally be 5 or 10 for Gen-4.
124
+ """
125
+ if not self.USE_RUNWAYML or not self.runway_api_key:
126
+ logger.warning("RunwayML not enabled/API key missing. Cannot generate video clip.")
127
+ return None
128
+ if not input_image_path or not os.path.exists(input_image_path):
129
+ logger.error(f"Runway Gen-4 requires an input image. Path not provided or invalid: {input_image_path}")
130
+ return None
 
 
 
131
 
132
+ # Gen-4 produces 5s or 10s. We can aim for the closest or let user choose via app.py if more control is needed.
133
+ # For simplicity, let's assume target_duration_seconds from Gemini/user is a suggestion.
134
+ # Actual API call would specify duration if supported, or model has fixed outputs.
135
+ runway_duration_param = 10 if target_duration_seconds > 7 else 5 # Example logic to map to 5s or 10s
 
 
 
 
 
 
 
 
136
 
137
+ output_video_filename = scene_identifier_filename_base.replace(".png", f"_runway_gen4_d{runway_duration_param}s.mp4")
138
+ output_video_filepath = os.path.join(self.output_dir, output_video_filename)
 
 
 
139
 
140
+ logger.info(f"Attempting Runway Gen-4 (Placeholder) with image: {os.path.basename(input_image_path)}, motion prompt: '{text_prompt_for_motion[:100]}...', target duration: {runway_duration_param}s")
 
 
 
 
141
 
142
+ # --- ACTUAL RUNWAY GEN-4 API/SDK CALL WOULD GO HERE ---
143
+ # This would involve:
144
+ # 1. Uploading input_image_path (if API requires it, or providing a URL).
145
+ # 2. Submitting the job with text_prompt_for_motion and desired parameters (duration, seed, etc.).
146
+ # 3. Polling for completion.
147
+ # 4. Downloading the resulting video to output_video_filepath.
148
+ # Example (very hypothetical SDK structure):
149
+ # try:
150
+ # if not self.runway_client: self.runway_client = RunwayMLClient(api_key=self.runway_api_key)
151
+ # runway_task = self.runway_client.gen4.generate(
152
+ # image_path=input_image_path,
153
+ # text_prompt=text_prompt_for_motion,
154
+ # duration_seconds=runway_duration_param, # Or let model default
155
+ # # ... other Gen-4 parameters like seed, motion_score, upscale, etc.
156
+ # )
157
+ # runway_task.wait_for_completion() # Blocks until done
158
+ # if runway_task.status == 'succeeded':
159
+ # runway_task.download_video(output_video_filepath)
160
+ # logger.info(f"Runway Gen-4 video saved to: {output_video_filepath}")
161
+ # return output_video_filepath
162
+ # else:
163
+ # logger.error(f"Runway Gen-4 task failed. Status: {runway_task.status}, Error: {runway_task.error_message}")
164
+ # return None
165
+ # except Exception as e_runway:
166
+ # logger.error(f"Error during actual Runway Gen-4 call: {e_runway}", exc_info=True)
167
+ # return None
168
+ # --- END ACTUAL RUNWAY GEN-4 API/SDK CALL ---
169
 
170
+ logger.warning("Using PLACEHOLDER video generation for Runway Gen-4.")
171
+ # Create a dummy video using the input image as a static frame for the placeholder
 
 
 
 
 
 
172
  try:
173
+ img_clip = ImageClip(input_image_path).set_duration(runway_duration_param)
174
+ # Add a text overlay to indicate it's a placeholder
175
+ txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(input_image_path)}\nMotion: {text_prompt_for_motion[:50]}..."
176
+ txt_clip = TextClip(txt, fontsize=24, color='white', font=self.video_overlay_font,
177
+ bg_color='rgba(0,0,0,0.5)', size=(self.video_frame_size[0]*0.8, None),
178
+ method='caption').set_duration(runway_duration_param).set_position('center')
179
+ final_placeholder_clip = CompositeVideoClip([img_clip, txt_clip], size=img_clip.size)
180
+ final_placeholder_clip.write_videofile(output_video_filepath, fps=fps, codec='libx264', preset='ultrafast', logger=None, threads=2)
181
+ logger.info(f"Runway Gen-4 placeholder video saved: {output_video_filepath}")
182
+ if hasattr(img_clip, 'close'): img_clip.close()
183
+ if hasattr(txt_clip, 'close'): txt_clip.close()
184
+ if hasattr(final_placeholder_clip, 'close'): final_placeholder_clip.close()
185
+ return output_video_filepath
186
+ except Exception as e_placeholder:
187
+ logger.error(f"Failed to create Runway Gen-4 placeholder video: {e_placeholder}", exc_info=True)
188
+ return None
 
189
 
190
+ def _create_placeholder_video_content(self, text_description, filename, duration=4, size=None): # Generic placeholder if input_image not available
191
+ # ... (Keep as before, used if Runway is selected but input image gen fails) ...
192
+ if size is None: size = self.video_frame_size; fp = os.path.join(self.output_dir, filename); tc = None
 
 
 
 
 
 
 
 
193
  try:
194
+ tc = TextClip(text_description, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=size, method='caption').set_duration(duration)
195
+ tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
196
+ logger.info(f"Generic placeholder video: {fp}"); return fp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
198
  finally:
199
  if tc and hasattr(tc, 'close'): tc.close()
200
 
201
+ # --- generate_scene_asset (Updated for Gen-4 Workflow) ---
202
+ def generate_scene_asset(self, image_generation_prompt_text, # For DALL-E / Pexels
203
+ motion_prompt_text_for_video, # For Runway Gen-4 (motion only)
204
  scene_data, scene_identifier_filename_base,
205
  generate_as_video_clip=False, runway_target_duration=5):
 
206
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
207
+ asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
208
+
209
+ # STEP 1: Generate the input image (DALL-E/Pexels/Placeholder) regardless of final asset type if video is chosen.
210
+ # This image will serve as the base for Runway Gen-4 if generate_as_video_clip is True.
211
  input_image_for_runway_path = None
212
+ image_filename_with_ext = base_name + "_base_image.png" # Differentiate base image filename
213
+ image_filepath = os.path.join(self.output_dir, image_filename_with_ext)
214
+ temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
215
+
216
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
217
+ max_r, att_n = 2, 0
218
+ for att_n in range(max_r):
219
+ try:
220
+ logger.info(f"Attempt {att_n+1} DALL-E (for base image): {image_generation_prompt_text[:100]}...")
221
+ cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
222
+ r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
223
+ iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
224
+ if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
225
+ ir = requests.get(iu, timeout=120); ir.raise_for_status()
226
+ id_img = Image.open(io.BytesIO(ir.content)) # Renamed to avoid conflict
227
+ if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
228
+ id_img.save(image_filepath); logger.info(f"DALL-E base image saved: {image_filepath}");
229
+ input_image_for_runway_path = image_filepath
230
+ temp_image_asset_info = {'path': image_filepath, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp}
231
+ break # Success
232
+ except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); temp_image_asset_info['error_message']=str(e)
233
+ except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); temp_image_asset_info['error_message']=str(e); break
234
+ if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
 
 
 
 
235
 
236
+ if temp_image_asset_info['error'] and self.USE_PEXELS : # Try Pexels if DALL-E failed
237
+ pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
238
+ pp = self._search_pexels_image(pqt, image_filename_with_ext) # Pass base image filename
239
+ if pp: input_image_for_runway_path = pp; temp_image_asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
240
+ else: current_em = temp_image_asset_info.get('error_message',""); temp_image_asset_info['error_message']=(current_em + " Pexels failed.").strip()
 
 
 
 
241
 
242
+ if temp_image_asset_info['error']: # Fallback to placeholder for base image
243
+ logger.warning("Base image generation (DALL-E/Pexels) failed. Using placeholder for base image.")
244
+ ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
245
+ php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_with_ext)
246
+ if php: input_image_for_runway_path = php; temp_image_asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
247
+ else: current_em=temp_image_asset_info.get('error_message',"");temp_image_asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
248
+
249
+ # STEP 2: If video clip is requested and base image was successfully created, generate video with RunwayML
250
  if generate_as_video_clip:
251
+ if self.USE_RUNWAYML and input_image_for_runway_path:
252
+ logger.info(f"Proceeding to Runway Gen-4 video clip generation for {base_name} using base image: {input_image_for_runway_path}")
253
+ video_path = self._generate_video_clip_with_runwayml(
254
+ text_prompt_for_motion=motion_prompt_text_for_video, # Use the motion-specific prompt
255
+ input_image_path=input_image_for_runway_path,
256
+ scene_identifier_filename_base=base_name, # Will append _runway_gen4.mp4
257
+ target_duration_seconds=runway_target_duration
258
+ )
259
+ if video_path and os.path.exists(video_path):
260
+ asset_info = {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, 'base_image_path': input_image_for_runway_path}
261
+ return asset_info # Successfully generated video
262
+ else:
263
+ logger.warning(f"RunwayML video clip generation failed for {base_name}. Using the base image as fallback.")
264
+ asset_info = temp_image_asset_info # Fallback to the base image
265
+ asset_info['error'] = True # Indicate video step failed, though base image might be okay
266
+ asset_info['error_message'] = "RunwayML video generation step failed; using base image."
267
+ asset_info['type'] = 'image' # Explicitly set to image as it's the fallback
268
+ return asset_info
269
+ elif not self.USE_RUNWAYML:
270
+ logger.warning("RunwayML selected but not enabled/configured. Using base image.")
271
+ asset_info = temp_image_asset_info
272
+ asset_info['error_message'] = "RunwayML disabled; using base image."
273
+ asset_info['type'] = 'image'
274
+ return asset_info
275
+ else: # No input_image_for_runway_path
276
+ logger.error("Cannot generate RunwayML video: base image generation failed entirely.")
277
+ asset_info = temp_image_asset_info # This will have error=True
278
+ asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image failed, so Runway video not attempted.").strip()
279
+ asset_info['type'] = 'image' # Even though it failed, its type was image
280
+ return asset_info
281
+ else: # Image was requested directly
282
+ asset_info = temp_image_asset_info # Return the result of the base image generation
283
+ return asset_info
284
 
285
+ # --- generate_narration_audio (Keep as before) ---
286
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
 
287
  if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename)
288
  try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None
289
+ if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("11L .text_to_speech.stream()")
290
+ elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("11L .generate_stream()")
291
+ elif hasattr(self.elevenlabs_client,'generate'):logger.info("11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=text_to_narrate,voice=vp,model="eleven_multilingual_v2");
292
  with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
293
  else:logger.error("No 11L audio method.");return None
294
  if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
 
298
  else:vps["voice_settings"]=self.elevenlabs_voice_settings
299
  adi=asm(text=text_to_narrate,model_id="eleven_multilingual_v2",**vps)
300
  with open(afp,"wb")as f:
301
+ for c in adi:
302
+ if c:f.write(c)
303
  logger.info(f"11L audio (stream): {afp}");return afp
304
+ except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
305
 
306
+ # --- assemble_animatic_from_assets (Keep robust version from previous step, ensuring C-contiguous array and debug saves) ---
307
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
 
308
  if not asset_data_list: logger.warning("No assets for animatic."); return None
309
+ processed_clips = []; narration_clip = None; final_clip = None
310
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
311
+
312
+ for i, asset_info in enumerate(asset_data_list):
313
+ asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5)
314
+ scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '')
315
+ logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
316
+
317
+ if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue
318
+ if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue
319
+
320
+ current_scene_mvpy_clip = None
321
  try:
322
+ if asset_type == 'image':
323
+ pil_img = Image.open(asset_path); logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}")
324
+ img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
325
+ thumb = img_rgba.copy(); rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumb.thumbnail(self.video_frame_size,rf)
326
+ cv_rgba = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); xo,yo=(self.video_frame_size[0]-thumb.width)//2,(self.video_frame_size[1]-thumb.height)//2
327
+ cv_rgba.paste(thumb,(xo,yo),thumb)
328
+ final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3])
329
+
330
+ # CRITICAL DEBUG: Save image fed to NumPy array
331
+ dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
332
+
333
+ frame_np = np.array(final_rgb_pil,dtype=np.uint8);
334
+ if not frame_np.flags['C_CONTIGUOUS']: frame_np=np.ascontiguousarray(frame_np,dtype=np.uint8)
335
+ logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}")
336
+ if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue
337
+
338
+ clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur)
339
+ # CRITICAL DEBUG: Save frame from MoviePy clip
340
+ mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
341
+
342
+ clip_fx = clip_base
343
+ try: es=random.uniform(1.03,1.08); clip_fx=clip_base.fx(vfx.resize,lambda t:1+(es-1)*(t/scene_dur) if scene_dur>0 else 1).set_position('center')
344
+ except Exception as e: logger.error(f"S{scene_num} Ken Burns error: {e}",exc_info=False)
345
+ current_scene_mvpy_clip = clip_fx
346
+
347
+ elif asset_type == 'video':
348
+ src_clip=None
349
  try:
350
+ src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None)
351
+ tmp_clip=src_clip
352
+ if src_clip.duration!=scene_dur:
353
+ if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
354
  else:
355
+ if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
356
+ else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
357
+ current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur)
358
  if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
359
+ except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
360
  finally:
361
+ if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip,'close'):src_clip.close()
362
+ else: logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip."); continue
363
 
364
+ if current_scene_mvpy_clip and key_action:
365
  try:
366
+ to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
367
+ to_start=0.25 # (current_scene_mvpy_clip.duration-to_dur)/2.0
368
+ txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
369
+ current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
370
+ except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
371
+ if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
372
+ except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
 
 
373
  finally:
374
  if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
375
+ try: current_scene_mvpy_clip.close() # This might close the clip if it's a VideoFileClip directly
376
+ except: pass # Avoid error during cleanup
377
 
378
  if not processed_clips:logger.warning("No clips processed. Abort.");return None
379
+ td=0.75
380
  try:
381
  logger.info(f"Concatenating {len(processed_clips)} clips.");
382
+ if len(processed_clips)>1:final_clip=concatenate_videoclips(processed_clips,padding=-td if td>0 else 0,method="compose")
383
+ elif processed_clips:final_clip=processed_clips[0]
384
+ if not final_clip:logger.error("Concatenation failed.");return None
385
+ logger.info(f"Concatenated dur:{final_clip.duration:.2f}s")
386
+ if td>0 and final_clip.duration>0:
387
+ if final_clip.duration>td*2:final_clip=final_clip.fx(vfx.fadein,td).fx(vfx.fadeout,td)
388
+ else:final_clip=final_clip.fx(vfx.fadein,min(td,final_clip.duration/2.0))
389
+ if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration>0:
390
+ try:narration_clip=AudioFileClip(overall_narration_path);final_clip=final_clip.set_audio(narration_clip);logger.info("Narration added.")
391
+ except Exception as e:logger.error(f"Narration add error:{e}",exc_info=True)
392
+ elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
393
+ if final_clip and final_clip.duration>0:
394
+ op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
395
+ final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"]) # Added pix_fmt
396
+ logger.info(f"Video created:{op}");return op
397
  else:logger.error("Final clip invalid. No write.");return None
398
+ except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
399
  finally:
400
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
401
+ # Close clips individually to catch errors without stopping others
402
+ for clip_obj in processed_clips:
403
+ if clip_obj and hasattr(clip_obj, 'close'):
404
+ try: clip_obj.close()
405
+ except Exception as e_close: logger.warning(f"Ignoring error closing a processed clip: {e_close}")
406
+ if narration_clip and hasattr(narration_clip, 'close'):
407
+ try: narration_clip.close()
408
+ except Exception as e_close_audio: logger.warning(f"Ignoring error closing narration clip: {e_close_audio}")
409
+ if final_clip and hasattr(final_clip, 'close'): # final_composite_clip_obj was renamed to final_clip
410
+ try: final_clip.close()
411
+ except Exception as e_close_final: logger.warning(f"Ignoring error closing final composite clip: {e_close_final}")