mgbam commited on
Commit
cd8e0e1
·
verified ·
1 Parent(s): b6860f8

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +207 -539
core/visual_engine.py CHANGED
@@ -2,16 +2,13 @@
2
  from PIL import Image, ImageDraw, ImageFont, ImageOps
3
  # --- MONKEY PATCH FOR Image.ANTIALIAS ---
4
  try:
5
- if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
6
- if not hasattr(Image, 'ANTIALIAS'):
7
- Image.ANTIALIAS = Image.Resampling.LANCZOS
8
- elif hasattr(Image, 'LANCZOS'): # Pillow 8
9
- if not hasattr(Image, 'ANTIALIAS'):
10
- Image.ANTIALIAS = Image.LANCZOS
11
  elif not hasattr(Image, 'ANTIALIAS'):
12
- print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. Video effects might fail.")
13
- except Exception as e_mp:
14
- print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}")
15
  # --- END MONKEY PATCH ---
16
 
17
  from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
@@ -30,30 +27,20 @@ logger = logging.getLogger(__name__)
30
  logger.setLevel(logging.INFO)
31
 
32
  # --- ElevenLabs Client Import ---
33
- ELEVENLABS_CLIENT_IMPORTED = False
34
- ElevenLabsAPIClient = None
35
- Voice = None
36
- VoiceSettings = None
37
  try:
38
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
39
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
40
- ElevenLabsAPIClient = ImportedElevenLabsClient
41
- Voice = ImportedVoice
42
- VoiceSettings = ImportedVoiceSettings
43
- ELEVENLABS_CLIENT_IMPORTED = True
44
- logger.info("ElevenLabs client components imported.")
45
- except Exception as e_eleven:
46
- logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
47
 
48
  # --- RunwayML Client Import (Placeholder) ---
49
- RUNWAYML_SDK_IMPORTED = False
50
- RunwayMLClient = None
51
  try:
52
  logger.info("RunwayML SDK import is a placeholder.")
53
- except ImportError:
54
- logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
55
- except Exception as e_runway_sdk:
56
- logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
57
 
58
 
59
  class VisualEngine:
@@ -65,8 +52,7 @@ class VisualEngine:
65
  self.font_filename,
66
  f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
67
  f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
68
- f"/System/Library/Fonts/Supplemental/Arial.ttf",
69
- f"C:/Windows/Fonts/arial.ttf",
70
  f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"
71
  ]
72
  self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
@@ -76,177 +62,93 @@ class VisualEngine:
76
  self.video_overlay_font = 'DejaVu-Sans-Bold'
77
 
78
  try:
79
- if self.font_path_pil:
80
- self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil)
81
- logger.info(f"Pillow font loaded: {self.font_path_pil}.")
82
- else:
83
- self.font = ImageFont.load_default()
84
- logger.warning("Using default Pillow font.")
85
- self.font_size_pil = 10
86
- except IOError as e_font:
87
- logger.error(f"Pillow font loading IOError: {e_font}. Using default.")
88
- self.font = ImageFont.load_default()
89
- self.font_size_pil = 10
90
 
91
- self.openai_api_key = None
92
- self.USE_AI_IMAGE_GENERATION = False
93
- self.dalle_model = "dall-e-3"
94
- self.image_size_dalle3 = "1792x1024"
95
  self.video_frame_size = (1280, 720)
96
- self.elevenlabs_api_key = None
97
- self.USE_ELEVENLABS = False
98
- self.elevenlabs_client = None
99
  self.elevenlabs_voice_id = default_elevenlabs_voice_id
100
- if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
101
- self.elevenlabs_voice_settings = VoiceSettings(
102
- stability=0.60,
103
- similarity_boost=0.80,
104
- style=0.15,
105
- use_speaker_boost=True
106
- )
107
- else:
108
- self.elevenlabs_voice_settings = None
109
- self.pexels_api_key = None
110
- self.USE_PEXELS = False
111
- self.runway_api_key = None
112
- self.USE_RUNWAYML = False
113
- self.runway_client = None
114
  logger.info("VisualEngine initialized.")
115
 
116
- def set_openai_api_key(self, k):
117
- self.openai_api_key = k
118
- self.USE_AI_IMAGE_GENERATION = bool(k)
119
- logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
120
-
121
- def set_elevenlabs_api_key(self, api_key, voice_id_from_secret=None):
122
- self.elevenlabs_api_key = api_key
123
- if voice_id_from_secret:
124
- self.elevenlabs_voice_id = voice_id_from_secret
125
  if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
126
- try:
127
- self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
128
- self.USE_ELEVENLABS = bool(self.elevenlabs_client)
129
- logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
130
- except Exception as e:
131
- logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True)
132
- self.USE_ELEVENLABS = False
133
- else:
134
- self.USE_ELEVENLABS = False
135
- logger.info("ElevenLabs Disabled (no key or SDK).")
136
-
137
- def set_pexels_api_key(self, k):
138
- self.pexels_api_key = k
139
- self.USE_PEXELS = bool(k)
140
- logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
141
-
142
  def set_runway_api_key(self, k):
143
  self.runway_api_key = k
144
- if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient:
145
- try:
146
- self.USE_RUNWAYML = True
147
- logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
148
- except Exception as e:
149
- logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True)
150
- self.USE_RUNWAYML = False
151
- elif k:
152
- self.USE_RUNWAYML = True
153
- logger.info("RunwayML API Key set (direct API or placeholder).")
154
- else:
155
- self.USE_RUNWAYML = False
156
- logger.info("RunwayML Disabled (no API key).")
157
 
158
  def _get_text_dimensions(self, text_content, font_obj):
159
  default_line_height = getattr(font_obj, 'size', self.font_size_pil)
160
- if not text_content:
161
- return 0, default_line_height
162
  try:
163
  if hasattr(font_obj, 'getbbox'):
164
- bbox = font_obj.getbbox(text_content)
165
- width = bbox[2] - bbox[0]
166
- height = bbox[3] - bbox[1]
167
  return width, height if height > 0 else default_line_height
168
  elif hasattr(font_obj, 'getsize'):
169
  width, height = font_obj.getsize(text_content)
170
  return width, height if height > 0 else default_line_height
171
- else:
172
- return int(len(text_content) * default_line_height * 0.6), int(default_line_height * 1.2)
173
- except Exception as e:
174
- logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}")
175
- return int(len(text_content) * self.font_size_pil * 0.6), int(self.font_size_pil * 1.2)
176
 
177
  def _create_placeholder_image_content(self, text_description, filename, size=None):
178
- if size is None:
179
- size = self.video_frame_size
180
- img = Image.new('RGB', size, color=(20, 20, 40))
181
- draw = ImageDraw.Draw(img)
182
- padding = 25
183
- max_text_width = size[0] - (2 * padding)
184
- lines = []
185
- if not text_description:
186
- text_description = "(Placeholder: No text description provided)"
187
- words = text_description.split()
188
- current_line = ""
189
  for word in words:
190
- test_line = current_line + word + " "
191
- line_width_test, _ = self._get_text_dimensions(test_line.strip(), self.font)
192
- if line_width_test <= max_text_width:
193
- current_line = test_line
194
  else:
195
- if current_line.strip():
196
- lines.append(current_line.strip())
197
  word_width, _ = self._get_text_dimensions(word, self.font)
198
  if word_width > max_text_width:
199
  avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10
200
  chars_that_fit = int(max_text_width / avg_char_w) if avg_char_w > 0 else 10
201
- if len(word) > chars_that_fit:
202
- lines.append(word[:chars_that_fit-3] + "...")
203
- else:
204
- lines.append(word)
205
  current_line = ""
206
- else:
207
- current_line = word + " "
208
- if current_line.strip():
209
- lines.append(current_line.strip())
210
  if not lines and text_description:
211
- avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10
212
- chars_that_fit = int(max_text_width / avg_char_w) if avg_char_w > 0 else 10
213
- if len(text_description) > chars_that_fit:
214
- lines.append(text_description[:chars_that_fit-3] + "...")
215
- else:
216
- lines.append(text_description)
217
- elif not lines:
218
- lines.append("(Placeholder Text Error)")
219
- _, single_line_height = self._get_text_dimensions("Ay", self.font)
220
- single_line_height = single_line_height if single_line_height > 0 else (self.font_size_pil + 2)
221
- line_spacing = 2
222
- max_lines_to_display = min(len(lines), (size[1] - (2 * padding)) // (single_line_height + line_spacing)) if single_line_height > 0 else 1
223
- if max_lines_to_display <= 0:
224
- max_lines_to_display = 1
225
- total_text_block_height = max_lines_to_display * single_line_height + (max_lines_to_display - 1) * line_spacing
226
- y_text_start = padding + (size[1] - (2 * padding) - total_text_block_height) / 2.0
227
- current_y = y_text_start
228
  for i in range(max_lines_to_display):
229
- line_content = lines[i]
230
- line_width_actual, _ = self._get_text_dimensions(line_content, self.font)
231
- x_text = max(padding, (size[0] - line_width_actual) / 2.0)
232
- draw.text((x_text, current_y), line_content, font=self.font, fill=(200, 200, 180))
233
- current_y += single_line_height + line_spacing
234
- if i == 6 and max_lines_to_display > 7 and len(lines) > max_lines_to_display:
235
- ellipsis_width, _ = self._get_text_dimensions("...", self.font)
236
- x_ellipsis = max(padding, (size[0] - ellipsis_width) / 2.0)
237
- draw.text((x_ellipsis, current_y), "...", font=self.font, fill=(200, 200, 180))
238
- break
239
  filepath = os.path.join(self.output_dir, filename)
240
- try:
241
- img.save(filepath)
242
- return filepath
243
- except Exception as e:
244
- logger.error(f"Error saving placeholder image {filepath}: {e}", exc_info=True)
245
- return None
246
 
247
  def _search_pexels_image(self, query, output_filename_base):
248
- if not self.USE_PEXELS or not self.pexels_api_key:
249
- return None
250
  headers = {"Authorization": self.pexels_api_key}
251
  params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"}
252
  base_name, _ = os.path.splitext(output_filename_base)
@@ -275,79 +177,53 @@ class VisualEngine:
275
  else:
276
  logger.info(f"No photos found on Pexels for query: '{effective_query}'")
277
  return None
278
- except requests.exceptions.RequestException as e_req:
279
- logger.error(f"Pexels request error for query '{query}': {e_req}", exc_info=True)
280
- except json.JSONDecodeError as e_json:
281
- logger.error(f"Pexels JSON decode error for query '{query}': {e_json}", exc_info=True)
282
- except Exception as e:
283
- logger.error(f"General Pexels error for query '{query}': {e}", exc_info=True)
284
  return None
285
 
286
  def _generate_video_clip_with_runwayml(self, pt, iip, sifnb, tds=5):
287
- if not self.USE_RUNWAYML or not self.runway_api_key:
288
- logger.warning("RunwayML disabled.")
289
- return None
290
- if not iip or not os.path.exists(iip):
291
- logger.error(f"Runway Gen-4 needs input image. Path invalid: {iip}")
292
- return None
293
  runway_dur = 10 if tds > 7 else 5
294
- ovfn = sifnb.replace(".png", f"_runway_gen4_d{runway_dur}s.mp4")
295
  ovfp = os.path.join(self.output_dir, ovfn)
296
  logger.info(f"Runway Gen-4 (Placeholder) img: {os.path.basename(iip)}, motion: '{pt[:100]}...', dur: {runway_dur}s")
297
  logger.warning("Using PLACEHOLDER video for Runway Gen-4.")
298
- img_clip = None
299
- txt_c = None
300
- final_ph_clip = None
301
  try:
302
  img_clip = ImageClip(iip).set_duration(runway_dur)
303
  txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(iip)}\nMotion: {pt[:50]}..."
304
- txt_c = TextClip(
305
- txt,
306
- fontsize=24,
307
- color='white',
308
- font=self.video_overlay_font,
309
- bg_color='rgba(0,0,0,0.5)',
310
- size=(self.video_frame_size[0] * 0.8, None),
311
- method='caption'
312
- ).set_duration(runway_dur).set_position('center')
313
  final_ph_clip = CompositeVideoClip([img_clip, txt_c], size=img_clip.size)
314
- final_ph_clip.write_videofile(ovfp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
315
- logger.info(f"Runway Gen-4 placeholder video: {ovfp}")
316
- return ovfp
317
- except Exception as e:
318
- logger.error(f"Runway Gen-4 placeholder error: {e}", exc_info=True)
319
- return None
320
  finally:
321
- if img_clip and hasattr(img_clip, 'close'):
322
- img_clip.close()
323
- if txt_c and hasattr(txt_c, 'close'):
324
- txt_c.close()
325
- if final_ph_clip and hasattr(final_ph_clip, 'close'):
326
- final_ph_clip.close()
327
 
328
- def _create_placeholder_video_content(self, text_description, filename, duration=4, size=None):
329
  if size is None:
330
  size = self.video_frame_size
331
  filepath = os.path.join(self.output_dir, filename)
332
- txt_clip = None
333
  try:
334
- txt_clip = TextClip(
335
- text_description,
336
- fontsize=50,
337
- color='white',
338
- font=self.video_overlay_font,
339
- bg_color='black',
340
- size=size,
341
- method='caption'
342
- ).set_duration(duration)
343
- txt_clip.write_videofile(
344
- filepath,
345
- fps=24,
346
- codec='libx264',
347
- preset='ultrafast',
348
- logger=None,
349
- threads=2
350
- )
351
  logger.info(f"Generic placeholder video created successfully: {filepath}")
352
  return filepath
353
  except Exception as e:
@@ -364,20 +240,10 @@ class VisualEngine:
364
  scene_data, scene_identifier_filename_base,
365
  generate_as_video_clip=False, runway_target_duration=5):
366
  base_name = scene_identifier_filename_base
367
- asset_info = {
368
- 'path': None,
369
- 'type': 'none',
370
- 'error': True,
371
- 'prompt_used': image_generation_prompt_text,
372
- 'error_message': 'Generation not attempted'
373
- }
374
  input_image_for_runway_path = None
375
  image_filename_for_base = base_name + "_base_image.png"
376
- temp_image_asset_info = {
377
- 'error': True,
378
- 'prompt_used': image_generation_prompt_text,
379
- 'error_message': 'Base image generation not attempted'
380
- }
381
 
382
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
383
  max_r, att_n = 2, 0
@@ -386,349 +252,151 @@ class VisualEngine:
386
  img_fp_dalle = os.path.join(self.output_dir, image_filename_for_base)
387
  logger.info(f"Attempt {att_n+1} DALL-E (base img): {image_generation_prompt_text[:100]}...")
388
  cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
389
- r = cl.images.generate(
390
- model=self.dalle_model,
391
- prompt=image_generation_prompt_text,
392
- n=1,
393
- size=self.image_size_dalle3,
394
- quality="hd",
395
- response_format="url",
396
- style="vivid"
397
- )
398
- iu = r.data[0].url
399
- rp = getattr(r.data[0], 'revised_prompt', None)
400
- if rp:
401
- logger.info(f"DALL-E revised: {rp[:100]}...")
402
- ir = requests.get(iu, timeout=120)
403
- ir.raise_for_status()
404
- id_img = Image.open(io.BytesIO(ir.content))
405
- if id_img.mode != 'RGB':
406
- id_img = id_img.convert('RGB')
407
- id_img.save(img_fp_dalle)
408
- logger.info(f"DALL-E base image: {img_fp_dalle}")
409
  input_image_for_runway_path = img_fp_dalle
410
- temp_image_asset_info = {
411
- 'path': img_fp_dalle,
412
- 'type': 'image',
413
- 'error': False,
414
- 'prompt_used': image_generation_prompt_text,
415
- 'revised_prompt': rp
416
- }
417
  break
418
- except openai.RateLimitError as e:
419
- logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry...")
420
- time.sleep(5 * (att_n + 1))
421
- temp_image_asset_info['error_message'] = str(e)
422
- except Exception as e:
423
- logger.error(f"DALL-E error: {e}", exc_info=True)
424
- temp_image_asset_info['error_message'] = str(e)
425
- break
426
- if temp_image_asset_info['error']:
427
- logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
428
-
429
  if temp_image_asset_info['error'] and self.USE_PEXELS:
430
- pqt = scene_data.get('pexels_search_query_감독',
431
- f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
432
  pp = self._search_pexels_image(pqt, image_filename_for_base)
433
- if pp:
434
- input_image_for_runway_path = pp
435
- temp_image_asset_info = {
436
- 'path': pp,
437
- 'type': 'image',
438
- 'error': False,
439
- 'prompt_used': f"Pexels: {pqt}"
440
- }
441
- else:
442
- current_em = temp_image_asset_info.get('error_message', "")
443
- temp_image_asset_info['error_message'] = (current_em + " Pexels failed.").strip()
444
 
445
  if temp_image_asset_info['error']:
446
  logger.warning("Base image (DALL-E/Pexels) failed. Placeholder base image.")
447
  ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
448
- php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_for_base)
449
- if php:
450
- input_image_for_runway_path = php
451
- temp_image_asset_info = {
452
- 'path': php,
453
- 'type': 'image',
454
- 'error': False,
455
- 'prompt_used': ppt
456
- }
457
- else:
458
- current_em = temp_image_asset_info.get('error_message', "")
459
- temp_image_asset_info['error_message'] = (current_em + " Base placeholder failed.").strip()
460
-
461
  if generate_as_video_clip:
462
  if self.USE_RUNWAYML and input_image_for_runway_path:
463
- video_path = self._generate_video_clip_with_runwayml(
464
- motion_prompt_text_for_video,
465
- input_image_for_runway_path,
466
- base_name,
467
- runway_target_duration
468
- )
469
  if video_path and os.path.exists(video_path):
470
- return {
471
- 'path': video_path,
472
- 'type': 'video',
473
- 'error': False,
474
- 'prompt_used': motion_prompt_text_for_video,
475
- 'base_image_path': input_image_for_runway_path
476
- }
477
- else:
478
- asset_info = temp_image_asset_info
479
- asset_info['error'] = True
480
- asset_info['error_message'] = "RunwayML video gen failed; using base image."
481
- asset_info['type'] = 'image'
482
- return asset_info
483
- elif not self.USE_RUNWAYML:
484
- asset_info = temp_image_asset_info
485
- asset_info['error_message'] = "RunwayML disabled; using base image."
486
- asset_info['type'] = 'image'
487
- return asset_info
488
- else:
489
- asset_info = temp_image_asset_info
490
- asset_info['error_message'] = (asset_info.get('error_message', "") + " Base image failed, Runway video not attempted.").strip()
491
- asset_info['type'] = 'image'
492
- return asset_info
493
- else:
494
- return temp_image_asset_info
495
 
496
  def generate_narration_audio(self, ttn, ofn="narration_overall.mp3"):
497
- if not self.USE_ELEVENLABS or not self.elevenlabs_client or not ttn:
498
- logger.info("11L skip.")
499
- return None
500
- afp = os.path.join(self.output_dir, ofn)
501
- try:
502
- logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {ttn[:70]}...")
503
- asm = None
504
- if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'):
505
- asm = self.elevenlabs_client.text_to_speech.stream
506
- logger.info("Using 11L .text_to_speech.stream()")
507
- elif hasattr(self.elevenlabs_client, 'generate_stream'):
508
- asm = self.elevenlabs_client.generate_stream
509
- logger.info("Using 11L .generate_stream()")
510
- elif hasattr(self.elevenlabs_client, 'generate'):
511
- logger.info("Using 11L .generate()")
512
- vp = Voice(voice_id=str(self.elevenlabs_voice_id),
513
- settings=self.elevenlabs_voice_settings) if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id)
514
- ab = self.elevenlabs_client.generate(text=ttn, voice=vp, model="eleven_multilingual_v2")
515
- with open(afp, "wb") as f:
516
- f.write(ab)
517
- logger.info(f"11L audio (non-stream): {afp}")
518
- return afp
519
- else:
520
- logger.error("No 11L audio method.")
521
- return None
522
-
523
- if asm:
524
- vps = {"voice_id": str(self.elevenlabs_voice_id)}
525
- if self.elevenlabs_voice_settings:
526
- if hasattr(self.elevenlabs_voice_settings, 'model_dump'):
527
- vps["voice_settings"] = self.elevenlabs_voice_settings.model_dump()
528
- elif hasattr(self.elevenlabs_voice_settings, 'dict'):
529
- vps["voice_settings"] = self.elevenlabs_voice_settings.dict()
530
- else:
531
- vps["voice_settings"] = self.elevenlabs_voice_settings
532
- adi = asm(text=ttn, model_id="eleven_multilingual_v2", **vps)
533
- with open(afp, "wb") as f:
534
- for c in adi:
535
- if c:
536
- f.write(c)
537
- logger.info(f"11L audio (stream): {afp}")
538
- return afp
539
- except Exception as e:
540
- logger.error(f"11L audio error: {e}", exc_info=True)
541
- return None
542
 
543
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
544
- if not asset_data_list:
545
- logger.warning("No assets for animatic.")
546
- return None
547
- processed_clips = []
548
- narration_clip = None
549
- final_clip = None
550
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
551
 
552
  for i, asset_info in enumerate(asset_data_list):
553
- asset_path = asset_info.get('path')
554
- asset_type = asset_info.get('type')
555
- scene_dur = asset_info.get('duration', 4.5)
556
- scene_num = asset_info.get('scene_num', i + 1)
557
- key_action = asset_info.get('key_action', '')
558
  logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
559
 
560
- if not (asset_path and os.path.exists(asset_path)):
561
- logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip.")
562
- continue
563
- if scene_dur <= 0:
564
- logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip.")
565
- continue
566
 
567
  current_scene_mvpy_clip = None
568
  try:
569
  if asset_type == 'image':
570
- pil_img = Image.open(asset_path)
571
- logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}")
572
  img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
573
- thumb = img_rgba.copy()
574
- rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling, 'LANCZOS') else Image.BILINEAR
575
- thumb.thumbnail(self.video_frame_size, rf)
576
- cv_rgba = Image.new('RGBA', self.video_frame_size, (0, 0, 0, 0))
577
- xo = (self.video_frame_size[0] - thumb.width) // 2
578
- yo = (self.video_frame_size[1] - thumb.height) // 2
579
- cv_rgba.paste(thumb, (xo, yo), thumb)
580
- final_rgb_pil = Image.new("RGB", self.video_frame_size, (0, 0, 0))
581
- final_rgb_pil.paste(cv_rgba, mask=cv_rgba.split()[3])
582
- dbg_path = os.path.join(self.output_dir, f"debug_PRE_NUMPY_S{scene_num}.png")
583
- final_rgb_pil.save(dbg_path)
584
- logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
585
- frame_np = np.array(final_rgb_pil, dtype=np.uint8)
586
- if not frame_np.flags['C_CONTIGUOUS']:
587
- frame_np = np.ascontiguousarray(frame_np, dtype=np.uint8)
588
  logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}")
589
- if frame_np.size == 0 or frame_np.ndim != 3 or frame_np.shape[2] != 3:
590
- logger.error(f"S{scene_num}: Invalid NumPy. Skip.")
591
- continue
592
- clip_base = ImageClip(frame_np, transparent=False).set_duration(scene_dur)
593
- mvpy_dbg_path = os.path.join(self.output_dir, f"debug_MOVIEPY_FRAME_S{scene_num}.png")
594
- clip_base.save_frame(mvpy_dbg_path, t=0.1)
595
- logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
596
  clip_fx = clip_base
597
- try:
598
- es = random.uniform(1.03, 1.08)
599
- clip_fx = clip_base.fx(
600
- vfx.resize,
601
- lambda t: 1 + (es - 1) * (t / scene_dur) if scene_dur > 0 else 1
602
- ).set_position('center')
603
- except Exception as e:
604
- logger.error(f"S{scene_num} Ken Burns error: {e}", exc_info=False)
605
  current_scene_mvpy_clip = clip_fx
606
  elif asset_type == 'video':
607
- src_clip = None
608
  try:
609
- src_clip = VideoFileClip(
610
- asset_path,
611
- target_resolution=(self.video_frame_size[1], self.video_frame_size[0]) if self.video_frame_size else None,
612
- audio=False
613
- )
614
- tmp_clip = src_clip
615
- if src_clip.duration != scene_dur:
616
- if src_clip.duration > scene_dur:
617
- tmp_clip = src_clip.subclip(0, scene_dur)
618
  else:
619
- if scene_dur / src_clip.duration > 1.5 and src_clip.duration > 0.1:
620
- tmp_clip = src_clip.loop(duration=scene_dur)
621
- else:
622
- tmp_clip = src_clip.set_duration(src_clip.duration)
623
- logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
624
- current_scene_mvpy_clip = tmp_clip.set_duration(scene_dur)
625
- if current_scene_mvpy_clip.size != list(self.video_frame_size):
626
- current_scene_mvpy_clip = current_scene_mvpy_clip.resize(self.video_frame_size)
627
- except Exception as e:
628
- logger.error(f"S{scene_num} Video load error '{asset_path}':{e}", exc_info=True)
629
- continue
630
  finally:
631
- if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip, 'close'):
632
- src_clip.close()
633
- else:
634
- logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip.")
635
- continue
636
-
637
  if current_scene_mvpy_clip and key_action:
638
  try:
639
- to_dur = min(current_scene_mvpy_clip.duration - 0.5, current_scene_mvpy_clip.duration * 0.8) if current_scene_mvpy_clip.duration > 0.5 else current_scene_mvpy_clip.duration
640
- to_start = 0.25
641
- txt_c = TextClip(
642
- f"Scene {scene_num}\n{key_action}",
643
- fontsize=self.video_overlay_font_size,
644
- color=self.video_overlay_font_color,
645
- font=self.video_overlay_font,
646
- bg_color='rgba(10,10,20,0.7)',
647
- method='caption',
648
- align='West',
649
- size=(self.video_frame_size[0] * 0.9, None),
650
- kerning=-1,
651
- stroke_color='black',
652
- stroke_width=1.5
653
- ).set_duration(to_dur).set_start(to_start).set_position(('center', 0.92), relative=True)
654
- current_scene_mvpy_clip = CompositeVideoClip(
655
- [current_scene_mvpy_clip, txt_c],
656
- size=self.video_frame_size,
657
- use_bgclip=True
658
- )
659
- except Exception as e:
660
- logger.error(f"S{scene_num} TextClip error:{e}. No text.", exc_info=True)
661
-
662
- if current_scene_mvpy_clip:
663
- processed_clips.append(current_scene_mvpy_clip)
664
- logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
665
- except Exception as e:
666
- logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}", exc_info=True)
667
  finally:
668
- if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip, 'close'):
669
- try:
670
- current_scene_mvpy_clip.close()
671
- except:
672
- pass
673
 
674
- if not processed_clips:
675
- logger.warning("No clips processed. Abort.")
676
- return None
677
- td = 0.75
678
  try:
679
- logger.info(f"Concatenating {len(processed_clips)} clips.")
680
- if len(processed_clips) > 1:
681
- final_clip = concatenate_videoclips(processed_clips, padding=-td if td > 0 else 0, method="compose")
682
- elif processed_clips:
683
- final_clip = processed_clips[0]
684
- if not final_clip:
685
- logger.error("Concatenation failed.")
686
- return None
687
  logger.info(f"Concatenated dur:{final_clip.duration:.2f}s")
688
- if td > 0 and final_clip.duration > 0:
689
- if final_clip.duration > td * 2:
690
- final_clip = final_clip.fx(vfx.fadein, td).fx(vfx.fadeout, td)
691
- else:
692
- final_clip = final_clip.fx(vfx.fadein, min(td, final_clip.duration / 2.0))
693
- if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration > 0:
694
- try:
695
- narration_clip = AudioFileClip(overall_narration_path)
696
- final_clip = final_clip.set_audio(narration_clip)
697
- logger.info("Narration added.")
698
- except Exception as e:
699
- logger.error(f"Narration add error:{e}", exc_info=True)
700
- elif final_clip.duration <= 0:
701
- logger.warning("Video no duration. No audio.")
702
- if final_clip and final_clip.duration > 0:
703
- op = os.path.join(self.output_dir, output_filename)
704
- logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
705
- final_clip.write_videofile(
706
- op,
707
- fps=fps,
708
- codec='libx264',
709
- preset='medium',
710
- audio_codec='aac',
711
- temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
712
- remove_temp=True,
713
- threads=os.cpu_count() or 2,
714
- logger='bar',
715
- bitrate="5000k",
716
- ffmpeg_params=["-pix_fmt", "yuv420p"]
717
- )
718
- logger.info(f"Video created:{op}")
719
- return op
720
- else:
721
- logger.error("Final clip invalid. No write.")
722
- return None
723
- except Exception as e:
724
- logger.error(f"Video write error:{e}", exc_info=True)
725
- return None
726
  finally:
727
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
728
  clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
729
  for clip_obj in clips_to_close:
730
  if clip_obj and hasattr(clip_obj, 'close'):
731
- try:
732
- clip_obj.close()
733
- except Exception as e_close:
734
- logger.warning(f"Ignoring error while closing a clip: {e_close}")
 
2
  from PIL import Image, ImageDraw, ImageFont, ImageOps
3
  # --- MONKEY PATCH FOR Image.ANTIALIAS ---
4
  try:
5
+ if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
6
+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
7
+ elif hasattr(Image, 'LANCZOS'): # Pillow 8
8
+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
 
 
9
  elif not hasattr(Image, 'ANTIALIAS'):
10
+ print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. Video effects might fail.")
11
+ except Exception as e_mp: print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}")
 
12
  # --- END MONKEY PATCH ---
13
 
14
  from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
 
27
  logger.setLevel(logging.INFO)
28
 
29
  # --- ElevenLabs Client Import ---
30
+ ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
 
 
 
31
  try:
32
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
33
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
34
+ ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
35
+ ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
36
+ except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
 
 
 
 
37
 
38
  # --- RunwayML Client Import (Placeholder) ---
39
+ RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None
 
40
  try:
41
  logger.info("RunwayML SDK import is a placeholder.")
42
+ except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
43
+ except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
 
 
44
 
45
 
46
  class VisualEngine:
 
52
  self.font_filename,
53
  f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
54
  f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
55
+ f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf",
 
56
  f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"
57
  ]
58
  self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
 
62
  self.video_overlay_font = 'DejaVu-Sans-Bold'
63
 
64
  try:
65
+ self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
66
+ if self.font_path_pil: logger.info(f"Pillow font loaded: {self.font_path_pil}.")
67
+ else: logger.warning("Using default Pillow font."); self.font_size_pil = 10
68
+ except IOError as e_font: logger.error(f"Pillow font loading IOError: {e_font}. Using default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
 
 
 
 
 
 
 
69
 
70
+ self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
71
+ self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
 
 
72
  self.video_frame_size = (1280, 720)
73
+ self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
 
 
74
  self.elevenlabs_voice_id = default_elevenlabs_voice_id
75
+ if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
76
+ else: self.elevenlabs_voice_settings = None
77
+ self.pexels_api_key = None; self.USE_PEXELS = False
78
+ self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
 
 
 
 
 
 
 
 
 
 
79
  logger.info("VisualEngine initialized.")
80
 
81
+ def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
82
+ def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
83
+ self.elevenlabs_api_key=api_key
84
+ if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
 
 
 
 
 
85
  if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
86
+ try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
87
+ except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
88
+ else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
89
+ def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
 
 
 
 
 
 
 
 
 
 
 
 
90
  def set_runway_api_key(self, k):
91
  self.runway_api_key = k
92
+ if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient: # This SDK part is still hypothetical
93
+ try: self.USE_RUNWAYML = True; logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
94
+ except Exception as e: logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True); self.USE_RUNWAYML = False
95
+ elif k: self.USE_RUNWAYML = True; logger.info("RunwayML API Key set (direct API or placeholder).")
96
+ else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
 
 
 
 
 
 
 
 
97
 
98
  def _get_text_dimensions(self, text_content, font_obj):
99
  default_line_height = getattr(font_obj, 'size', self.font_size_pil)
100
+ if not text_content: return 0, default_line_height
 
101
  try:
102
  if hasattr(font_obj, 'getbbox'):
103
+ bbox = font_obj.getbbox(text_content); width = bbox[2] - bbox[0]; height = bbox[3] - bbox[1]
 
 
104
  return width, height if height > 0 else default_line_height
105
  elif hasattr(font_obj, 'getsize'):
106
  width, height = font_obj.getsize(text_content)
107
  return width, height if height > 0 else default_line_height
108
+ else: return int(len(text_content) * default_line_height * 0.6), int(default_line_height * 1.2)
109
+ except Exception as e: logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}"); return int(len(text_content) * self.font_size_pil * 0.6),int(self.font_size_pil * 1.2)
 
 
 
110
 
111
  def _create_placeholder_image_content(self, text_description, filename, size=None):
112
+ if size is None: size = self.video_frame_size
113
+ img = Image.new('RGB', size, color=(20, 20, 40)); draw = ImageDraw.Draw(img)
114
+ padding = 25; max_text_width = size[0] - (2 * padding); lines = []
115
+ if not text_description: text_description = "(Placeholder: No text description provided)"
116
+ words = text_description.split(); current_line = ""
 
 
 
 
 
 
117
  for word in words:
118
+ test_line = current_line + word + " "; line_width_test, _ = self._get_text_dimensions(test_line.strip(), self.font)
119
+ if line_width_test <= max_text_width: current_line = test_line
 
 
120
  else:
121
+ if current_line.strip(): lines.append(current_line.strip())
 
122
  word_width, _ = self._get_text_dimensions(word, self.font)
123
  if word_width > max_text_width:
124
  avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10
125
  chars_that_fit = int(max_text_width / avg_char_w) if avg_char_w > 0 else 10
126
+ lines.append(word[:chars_that_fit-3] + "..." if len(word) > chars_that_fit else word)
 
 
 
127
  current_line = ""
128
+ else: current_line = word + " "
129
+ if current_line.strip(): lines.append(current_line.strip())
 
 
130
  if not lines and text_description:
131
+ avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10; chars_that_fit = int(max_text_width / avg_char_w) if avg_char_w > 0 else 10
132
+ lines.append(text_description[:chars_that_fit-3] + "..." if len(text_description) > chars_that_fit else text_description)
133
+ elif not lines: lines.append("(Placeholder Text Error)")
134
+ _, single_line_height = self._get_text_dimensions("Ay", self.font); single_line_height = single_line_height if single_line_height > 0 else (self.font_size_pil + 2)
135
+ line_spacing = 2; max_lines_to_display = min(len(lines), (size[1]-(2*padding))//(single_line_height+line_spacing)) if single_line_height > 0 else 1
136
+ if max_lines_to_display <= 0: max_lines_to_display = 1
137
+ total_text_block_height = max_lines_to_display * single_line_height + (max_lines_to_display-1)*line_spacing
138
+ y_text_start = padding + (size[1]-(2*padding)-total_text_block_height)/2.0; current_y = y_text_start
 
 
 
 
 
 
 
 
 
139
  for i in range(max_lines_to_display):
140
+ line_content = lines[i]; line_width_actual, _ = self._get_text_dimensions(line_content, self.font)
141
+ x_text = max(padding, (size[0]-line_width_actual)/2.0)
142
+ draw.text((x_text, current_y), line_content, font=self.font, fill=(200,200,180)); current_y += single_line_height + line_spacing
143
+ if i==6 and max_lines_to_display > 7 and len(lines) > max_lines_to_display:
144
+ ellipsis_width, _ = self._get_text_dimensions("...",self.font); x_ellipsis = max(padding, (size[0]-ellipsis_width)/2.0)
145
+ draw.text((x_ellipsis, current_y), "...", font=self.font, fill=(200,200,180)); break
 
 
 
 
146
  filepath = os.path.join(self.output_dir, filename)
147
+ try: img.save(filepath); return filepath
148
+ except Exception as e: logger.error(f"Error saving placeholder image {filepath}: {e}", exc_info=True); return None
 
 
 
 
149
 
150
  def _search_pexels_image(self, query, output_filename_base):
151
+ if not self.USE_PEXELS or not self.pexels_api_key: return None
 
152
  headers = {"Authorization": self.pexels_api_key}
153
  params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"}
154
  base_name, _ = os.path.splitext(output_filename_base)
 
177
  else:
178
  logger.info(f"No photos found on Pexels for query: '{effective_query}'")
179
  return None
180
+ except requests.exceptions.RequestException as e_req: logger.error(f"Pexels request error for query '{query}': {e_req}", exc_info=True)
181
+ except json.JSONDecodeError as e_json: logger.error(f"Pexels JSON decode error for query '{query}': {e_json}", exc_info=True)
182
+ except Exception as e: logger.error(f"General Pexels error for query '{query}': {e}", exc_info=True)
 
 
 
183
  return None
184
 
185
  def _generate_video_clip_with_runwayml(self, pt, iip, sifnb, tds=5):
186
+ if not self.USE_RUNWAYML or not self.runway_api_key: logger.warning("RunwayML disabled."); return None
187
+ if not iip or not os.path.exists(iip): logger.error(f"Runway Gen-4 needs input image. Path invalid: {iip}"); return None
 
 
 
 
188
  runway_dur = 10 if tds > 7 else 5
189
+ ovfn = sifnb.replace(".png", f"_runway_gen4_d{runway_dur}s.mp4") # sifnb should be base name
190
  ovfp = os.path.join(self.output_dir, ovfn)
191
  logger.info(f"Runway Gen-4 (Placeholder) img: {os.path.basename(iip)}, motion: '{pt[:100]}...', dur: {runway_dur}s")
192
  logger.warning("Using PLACEHOLDER video for Runway Gen-4.")
193
+ img_clip=None; txt_c=None; final_ph_clip=None
 
 
194
  try:
195
  img_clip = ImageClip(iip).set_duration(runway_dur)
196
  txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(iip)}\nMotion: {pt[:50]}..."
197
+ txt_c = TextClip(txt, fontsize=24,color='white',font=self.video_overlay_font,bg_color='rgba(0,0,0,0.5)',size=(self.video_frame_size[0]*0.8,None),method='caption').set_duration(runway_dur).set_position('center')
 
 
 
 
 
 
 
 
198
  final_ph_clip = CompositeVideoClip([img_clip, txt_c], size=img_clip.size)
199
+ final_ph_clip.write_videofile(ovfp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2)
200
+ logger.info(f"Runway Gen-4 placeholder video: {ovfp}"); return ovfp
201
+ except Exception as e: logger.error(f"Runway Gen-4 placeholder error: {e}",exc_info=True); return None
 
 
 
202
  finally:
203
+ if img_clip and hasattr(img_clip,'close'): img_clip.close()
204
+ if txt_c and hasattr(txt_c,'close'): txt_c.close()
205
+ if final_ph_clip and hasattr(final_ph_clip,'close'): final_ph_clip.close()
 
 
 
206
 
207
+ def _create_placeholder_video_content(self, text_description, filename, duration=4, size=None): # Generic placeholder
208
  if size is None:
209
  size = self.video_frame_size
210
  filepath = os.path.join(self.output_dir, filename)
211
+ txt_clip = None # Initialize for finally block
212
  try:
213
+ txt_clip = TextClip(text_description,
214
+ fontsize=50,
215
+ color='white',
216
+ font=self.video_overlay_font,
217
+ bg_color='black',
218
+ size=size,
219
+ method='caption').set_duration(duration)
220
+
221
+ txt_clip.write_videofile(filepath,
222
+ fps=24,
223
+ codec='libx264',
224
+ preset='ultrafast',
225
+ logger=None,
226
+ threads=2)
 
 
 
227
  logger.info(f"Generic placeholder video created successfully: {filepath}")
228
  return filepath
229
  except Exception as e:
 
240
  scene_data, scene_identifier_filename_base,
241
  generate_as_video_clip=False, runway_target_duration=5):
242
  base_name = scene_identifier_filename_base
243
+ asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
 
 
 
 
 
 
244
  input_image_for_runway_path = None
245
  image_filename_for_base = base_name + "_base_image.png"
246
+ temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
 
 
 
 
247
 
248
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
249
  max_r, att_n = 2, 0
 
252
  img_fp_dalle = os.path.join(self.output_dir, image_filename_for_base)
253
  logger.info(f"Attempt {att_n+1} DALL-E (base img): {image_generation_prompt_text[:100]}...")
254
  cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
255
+ r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
256
+ iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
257
+ if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
258
+ ir = requests.get(iu, timeout=120); ir.raise_for_status()
259
+ id_img = Image.open(io.BytesIO(ir.content));
260
+ if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
261
+ id_img.save(img_fp_dalle); logger.info(f"DALL-E base image: {img_fp_dalle}");
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  input_image_for_runway_path = img_fp_dalle
263
+ temp_image_asset_info = {'path': img_fp_dalle, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp}
 
 
 
 
 
 
264
  break
265
+ except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); temp_image_asset_info['error_message']=str(e)
266
+ except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); temp_image_asset_info['error_message']=str(e); break
267
+ if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
268
+
 
 
 
 
 
 
 
269
  if temp_image_asset_info['error'] and self.USE_PEXELS:
270
+ pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
 
271
  pp = self._search_pexels_image(pqt, image_filename_for_base)
272
+ if pp: input_image_for_runway_path = pp; temp_image_asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
273
+ else: current_em = temp_image_asset_info.get('error_message',""); temp_image_asset_info['error_message']=(current_em + " Pexels failed.").strip()
 
 
 
 
 
 
 
 
 
274
 
275
  if temp_image_asset_info['error']:
276
  logger.warning("Base image (DALL-E/Pexels) failed. Placeholder base image.")
277
  ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
278
+ php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_for_base) # Use image_filename_for_base
279
+ if php: input_image_for_runway_path = php; temp_image_asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
280
+ else: current_em=temp_image_asset_info.get('error_message',"");temp_image_asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
281
+
 
 
 
 
 
 
 
 
 
282
  if generate_as_video_clip:
283
  if self.USE_RUNWAYML and input_image_for_runway_path:
284
+ video_path = self._generate_video_clip_with_runwayml(motion_prompt_text_for_video, input_image_for_runway_path, base_name, runway_target_duration)
 
 
 
 
 
285
  if video_path and os.path.exists(video_path):
286
+ return {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, 'base_image_path': input_image_for_runway_path}
287
+ else: asset_info = temp_image_asset_info; asset_info['error'] = True; asset_info['error_message'] = "RunwayML video gen failed; using base image."; asset_info['type'] = 'image'; return asset_info
288
+ elif not self.USE_RUNWAYML: asset_info = temp_image_asset_info; asset_info['error_message'] = "RunwayML disabled; using base image."; asset_info['type'] = 'image'; return asset_info
289
+ else: asset_info = temp_image_asset_info; asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image failed, Runway video not attempted.").strip(); asset_info['type'] = 'image'; return asset_info
290
+ else: return temp_image_asset_info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
  def generate_narration_audio(self, ttn, ofn="narration_overall.mp3"):
293
+ if not self.USE_ELEVENLABS or not self.elevenlabs_client or not ttn: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,ofn)
294
+ try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {ttn[:70]}..."); asm=None
295
+ if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
296
+ elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("Using 11L .generate_stream()")
297
+ elif hasattr(self.elevenlabs_client,'generate'):logger.info("Using 11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=ttn,voice=vp,model="eleven_multilingual_v2");
298
+ with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
299
+ else:logger.error("No 11L audio method.");return None
300
+ if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
301
+ if self.elevenlabs_voice_settings:
302
+ if hasattr(self.elevenlabs_voice_settings,'model_dump'):vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump()
303
+ elif hasattr(self.elevenlabs_voice_settings,'dict'):vps["voice_settings"]=self.elevenlabs_voice_settings.dict()
304
+ else:vps["voice_settings"]=self.elevenlabs_voice_settings
305
+ adi=asm(text=ttn,model_id="eleven_multilingual_v2",**vps)
306
+ with open(afp,"wb")as f:
307
+ for c in adi:
308
+ if c:f.write(c)
309
+ logger.info(f"11L audio (stream): {afp}");return afp
310
+ except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
313
+ if not asset_data_list: logger.warning("No assets for animatic."); return None
314
+ processed_clips = []; narration_clip = None; final_clip = None
 
 
 
 
315
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
316
 
317
  for i, asset_info in enumerate(asset_data_list):
318
+ asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5)
319
+ scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '')
 
 
 
320
  logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
321
 
322
+ if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue
323
+ if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue
 
 
 
 
324
 
325
  current_scene_mvpy_clip = None
326
  try:
327
  if asset_type == 'image':
328
+ pil_img = Image.open(asset_path); logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}")
 
329
  img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
330
+ thumb = img_rgba.copy(); rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumb.thumbnail(self.video_frame_size,rf)
331
+ cv_rgba = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); xo,yo=(self.video_frame_size[0]-thumb.width)//2,(self.video_frame_size[1]-thumb.height)//2
332
+ cv_rgba.paste(thumb,(xo,yo),thumb)
333
+ final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3])
334
+ dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
335
+ frame_np = np.array(final_rgb_pil,dtype=np.uint8);
336
+ if not frame_np.flags['C_CONTIGUOUS']: frame_np=np.ascontiguousarray(frame_np,dtype=np.uint8)
 
 
 
 
 
 
 
 
337
  logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}")
338
+ if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue
339
+ clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur)
340
+ mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
 
 
 
 
341
  clip_fx = clip_base
342
+ try: es=random.uniform(1.03,1.08); clip_fx=clip_base.fx(vfx.resize,lambda t:1+(es-1)*(t/scene_dur) if scene_dur>0 else 1).set_position('center')
343
+ except Exception as e: logger.error(f"S{scene_num} Ken Burns error: {e}",exc_info=False)
 
 
 
 
 
 
344
  current_scene_mvpy_clip = clip_fx
345
  elif asset_type == 'video':
346
+ src_clip=None
347
  try:
348
+ src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
349
+ tmp_clip=src_clip
350
+ if src_clip.duration!=scene_dur:
351
+ if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
 
 
 
 
 
352
  else:
353
+ if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
354
+ else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
355
+ current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur)
356
+ if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
357
+ except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
 
 
 
 
 
 
358
  finally:
359
+ if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip,'close'):src_clip.close()
360
+ else: logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip."); continue
 
 
 
 
361
  if current_scene_mvpy_clip and key_action:
362
  try:
363
+ to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
364
+ to_start=0.25
365
+ txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
366
+ current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
367
+ except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
368
+ if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
369
+ except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  finally:
371
+ if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
372
+ try: current_scene_mvpy_clip.close()
373
+ except: pass
 
 
374
 
375
+ if not processed_clips:logger.warning("No clips processed. Abort.");return None
376
+ td=0.75
 
 
377
  try:
378
+ logger.info(f"Concatenating {len(processed_clips)} clips.");
379
+ if len(processed_clips)>1:final_clip=concatenate_videoclips(processed_clips,padding=-td if td>0 else 0,method="compose")
380
+ elif processed_clips:final_clip=processed_clips[0]
381
+ if not final_clip:logger.error("Concatenation failed.");return None
 
 
 
 
382
  logger.info(f"Concatenated dur:{final_clip.duration:.2f}s")
383
+ if td>0 and final_clip.duration>0:
384
+ if final_clip.duration>td*2:final_clip=final_clip.fx(vfx.fadein,td).fx(vfx.fadeout,td)
385
+ else:final_clip=final_clip.fx(vfx.fadein,min(td,final_clip.duration/2.0))
386
+ if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration>0:
387
+ try:narration_clip=AudioFileClip(overall_narration_path);final_clip=final_clip.set_audio(narration_clip);logger.info("Narration added.")
388
+ except Exception as e:logger.error(f"Narration add error:{e}",exc_info=True)
389
+ elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
390
+ if final_clip and final_clip.duration>0:
391
+ op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
392
+ final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
393
+ logger.info(f"Video created:{op}");return op
394
+ else:logger.error("Final clip invalid. No write.");return None
395
+ except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  finally:
397
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
398
  clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
399
  for clip_obj in clips_to_close:
400
  if clip_obj and hasattr(clip_obj, 'close'):
401
+ try: clip_obj.close()
402
+ except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {e_close}")