File size: 49,373 Bytes
e22eb13 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a8bd09 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 f13d4b2 287c9ca 7a3f79b 62ec987 7a3f79b 62ec987 d4d0117 62ec987 7a3f79b 62ec987 d4d0117 62ec987 d4d0117 7a3f79b 62ec987 d4d0117 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b d4d0117 62ec987 7a3f79b 7a8bd09 62ec987 7a8bd09 62ec987 7a8bd09 62ec987 7a8bd09 62ec987 7a8bd09 62ec987 7a8bd09 7a3f79b 62ec987 7a3f79b 7a8bd09 62ec987 7a3f79b 62ec987 7a8bd09 7a3f79b 7a8bd09 62ec987 7a8bd09 62ec987 7a3f79b 7a8bd09 62ec987 7a3f79b 7a8bd09 62ec987 7a3f79b 62ec987 7a3f79b 7a8bd09 62ec987 7a8bd09 62ec987 7a8bd09 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a8bd09 62ec987 7a8bd09 7a3f79b 62ec987 7a3f79b 7a8bd09 62ec987 7a3f79b 62ec987 7a8bd09 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b 62ec987 7a3f79b bf873b0 62ec987 bf873b0 4e3ee0b bf873b0 d4d0117 bf873b0 7a8bd09 8583908 bf873b0 62ec987 d4d0117 62ec987 d4d0117 62ec987 d4d0117 62ec987 bf873b0 62ec987 bf873b0 3313da9 d4d0117 bf873b0 cb93f9c bf873b0 7a3f79b bf873b0 d4d0117 7a3f79b d4d0117 62ec987 59af6e7 62ec987 d4d0117 62ec987 d4d0117 7a3f79b bf873b0 d4d0117 62ec987 d4d0117 bf873b0 b97795f d4d0117 754c854 bf873b0 d4d0117 62ec987 d4d0117 62ec987 d4d0117 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 |
# core/visual_engine.py
from PIL import Image, ImageDraw, ImageFont, ImageOps
import base64
import mimetypes
import numpy as np
import os
import openai # OpenAI v1.x.x+
import requests
import io
import time
import random
import logging
from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
CompositeVideoClip, AudioFileClip)
import moviepy.video.fx.all as vfx
try: # MONKEY PATCH for Pillow/MoviePy compatibility
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
elif hasattr(Image, 'LANCZOS'): # Pillow 8
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
elif not hasattr(Image, 'ANTIALIAS'):
print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. MoviePy effects might fail or look different.")
except Exception as e_monkey_patch:
print(f"WARNING: An unexpected error occurred during Pillow ANTIALIAS monkey-patch: {e_monkey_patch}")
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG) # Uncomment for verbose debugging during development
ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
try:
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components (SDK v1.x.x pattern) imported successfully.")
except ImportError: logger.warning("ElevenLabs SDK not found (expected 'pip install elevenlabs>=1.0.0'). Audio generation will be disabled.")
except Exception as e_eleven_import_general: logger.warning(f"General error importing ElevenLabs client components: {e_eleven_import_general}. Audio generation disabled.")
RUNWAYML_SDK_IMPORTED = False; RunwayMLAPIClientClass = None
try:
from runwayml import RunwayML as ImportedRunwayMLAPIClientClass
RunwayMLAPIClientClass = ImportedRunwayMLAPIClientClass; RUNWAYML_SDK_IMPORTED = True
logger.info("RunwayML SDK (runwayml) imported successfully.")
except ImportError: logger.warning("RunwayML SDK not found (pip install runwayml). RunwayML video generation will be disabled.")
except Exception as e_runway_sdk_import_general: logger.warning(f"General error importing RunwayML SDK: {e_runway_sdk_import_general}. RunwayML features disabled.")
class VisualEngine:
DEFAULT_FONT_SIZE_PIL = 10; PREFERRED_FONT_SIZE_PIL = 20
VIDEO_OVERLAY_FONT_SIZE = 30; VIDEO_OVERLAY_FONT_COLOR = 'white'
DEFAULT_MOVIEPY_FONT = 'DejaVu-Sans-Bold'; PREFERRED_MOVIEPY_FONT = 'Liberation-Sans-Bold'
# <<< THIS IS THE CRITICAL __init__ METHOD THAT MUST BE CORRECT >>>
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
self.output_dir = output_dir
try: # Attempt to create the output directory
os.makedirs(self.output_dir, exist_ok=True)
logger.info(f"VisualEngine output directory set/ensured: {os.path.abspath(self.output_dir)}")
except Exception as e_mkdir:
logger.error(f"CRITICAL: Failed to create output directory '{self.output_dir}': {e_mkdir}", exc_info=True)
# Depending on how critical this is, you might raise an exception or set a failure flag
# For now, we'll log and continue, but writes will fail.
self.font_filename_pil_preference = "DejaVuSans-Bold.ttf"
font_paths = [ self.font_filename_pil_preference, f"/usr/share/fonts/truetype/dejavu/{self.font_filename_pil_preference}", f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"]
self.resolved_font_path_pil = next((p for p in font_paths if os.path.exists(p)), None)
self.active_font_pil = ImageFont.load_default() # Fallback default
self.active_font_size_pil = self.DEFAULT_FONT_SIZE_PIL
self.active_moviepy_font_name = self.DEFAULT_MOVIEPY_FONT
if self.resolved_font_path_pil:
try:
self.active_font_pil = ImageFont.truetype(self.resolved_font_path_pil, self.PREFERRED_FONT_SIZE_PIL)
self.active_font_size_pil = self.PREFERRED_FONT_SIZE_PIL
logger.info(f"Pillow font loaded: {self.resolved_font_path_pil} at size {self.active_font_size_pil}.")
if "dejavu" in self.resolved_font_path_pil.lower(): self.active_moviepy_font_name = 'DejaVu-Sans-Bold'
elif "liberation" in self.resolved_font_path_pil.lower(): self.active_moviepy_font_name = 'Liberation-Sans-Bold'
except IOError as e_font: logger.error(f"Pillow font IOError '{self.resolved_font_path_pil}': {e_font}. Default.")
else: logger.warning("Preferred Pillow font not found. Default.")
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
self.video_frame_size = (1280, 720)
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client_instance = None
self.elevenlabs_voice_id = default_elevenlabs_voice_id # Use the passed default
logger.info(f"VisualEngine __init__: ElevenLabs Voice ID initially set to: {self.elevenlabs_voice_id}")
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings_obj = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
else: self.elevenlabs_voice_settings_obj = None
self.pexels_api_key = None; self.USE_PEXELS = False
self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None
if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass and os.getenv("RUNWAYML_API_SECRET"):
try: self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init from env var at startup.")
except Exception as e_rwy_init: logger.error(f"Initial RunwayML client init failed: {e_rwy_init}"); self.USE_RUNWAYML = False
logger.info("VisualEngine __init__ sequence complete.")
def set_openai_api_key(self, api_key_value): self.openai_api_key = api_key_value; self.USE_AI_IMAGE_GENERATION = bool(api_key_value); logger.info(f"DALL-E status: {'Ready' if self.USE_AI_IMAGE_GENERATION else 'Disabled'}")
def set_elevenlabs_api_key(self, api_key_value, voice_id_from_secret=None): # Accepts voice_id_from_secret
self.elevenlabs_api_key = api_key_value
if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret; logger.info(f"11L Voice ID updated via set_elevenlabs_api_key to: {self.elevenlabs_voice_id}")
if api_key_value and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
try: self.elevenlabs_client_instance = ElevenLabsAPIClient(api_key=api_key_value); self.USE_ELEVENLABS = bool(self.elevenlabs_client_instance); logger.info(f"11L Client: {'Ready' if self.USE_ELEVENLABS else 'Failed'} (Using Voice: {self.elevenlabs_voice_id})")
except Exception as e_11l_setkey_init: logger.error(f"11L client init error: {e_11l_setkey_init}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client_instance=None
else: self.USE_ELEVENLABS = False; logger.info(f"11L Disabled (API key not provided or SDK issue).")
def set_pexels_api_key(self, api_key_value): self.pexels_api_key = api_key_value; self.USE_PEXELS = bool(api_key_value); logger.info(f"Pexels status: {'Ready' if self.USE_PEXELS else 'Disabled'}")
def set_runway_api_key(self, api_key_value):
self.runway_api_key = api_key_value
if api_key_value:
if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass:
if not self.runway_ml_sdk_client_instance: # If not already initialized
try:
original_env_secret = os.getenv("RUNWAYML_API_SECRET")
if not original_env_secret: os.environ["RUNWAYML_API_SECRET"] = api_key_value; logger.info("Temporarily set RUNWAYML_API_SECRET from provided key for SDK client init.")
self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client initialized successfully via set_runway_api_key.")
if not original_env_secret: del os.environ["RUNWAYML_API_SECRET"]; logger.info("Cleared temporary RUNWAYML_API_SECRET environment variable.")
except Exception as e_runway_setkey_init: logger.error(f"RunwayML Client initialization in set_runway_api_key failed: {e_runway_setkey_init}", exc_info=True); self.USE_RUNWAYML=False;self.runway_ml_sdk_client_instance=None
else: self.USE_RUNWAYML = True; logger.info("RunwayML Client was already initialized (likely from environment variable). API key stored.")
else: logger.warning("RunwayML SDK not imported. API key stored, but current integration relies on SDK. Service effectively disabled."); self.USE_RUNWAYML = False
else: self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None; logger.info("RunwayML Service Disabled (no API key provided).")
# --- Helper Methods (_image_to_data_uri, _map_resolution_to_runway_ratio, etc.) ---
# (These methods should be the corrected versions from our previous iterations)
def _image_to_data_uri(self, image_path):
try:
mime_type, _ = mimetypes.guess_type(image_path)
if not mime_type: ext = os.path.splitext(image_path)[1].lower(); mime_map = {".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".webp": "image/webp"}; mime_type = mime_map.get(ext, "application/octet-stream");
if mime_type == "application/octet-stream": logger.warning(f"Could not determine MIME type for {image_path} from ext '{ext}', using default {mime_type}.")
with open(image_path, "rb") as image_file_handle: image_binary_data = image_file_handle.read()
encoded_base64_string = base64.b64encode(image_binary_data).decode('utf-8')
data_uri_string = f"data:{mime_type};base64,{encoded_base64_string}"; logger.debug(f"Data URI for {os.path.basename(image_path)} (MIME:{mime_type}): {data_uri_string[:100]}..."); return data_uri_string
except FileNotFoundError: logger.error(f"Img not found {image_path} for data URI."); return None
except Exception as e: logger.error(f"Error converting {image_path} to data URI:{e}", exc_info=True); return None
def _map_resolution_to_runway_ratio(self, width, height):
ratio_str=f"{width}:{height}";supported_ratios_gen4=["1280:720","720:1280","1104:832","832:1104","960:960","1584:672"];
if ratio_str in supported_ratios_gen4:return ratio_str
logger.warning(f"Res {ratio_str} not in Gen-4 list. Default 1280:720 for Runway.");return "1280:720"
def _get_text_dimensions(self, text_content, font_object_pil):
dch=getattr(font_object_pil,'size',self.active_font_size_pil);
if not text_content:return 0,dch
try:
if hasattr(font_object_pil,'getbbox'):bb=font_object_pil.getbbox(text_content);w=bb[2]-bb[0];h=bb[3]-bb[1];return w,h if h>0 else dch
elif hasattr(font_object_pil,'getsize'):w,h=font_object_pil.getsize(text_content);return w,h if h>0 else dch
else:return int(len(text_content)*dch*0.6),int(dch*1.2)
except Exception as e_getdim:logger.warning(f"Error in _get_text_dimensions:{e_getdim}");return int(len(text_content)*self.active_font_size_pil*0.6),int(self.active_font_size_pil*1.2)
def _create_placeholder_image_content(self,text_description,filename,size=None):
# (Corrected version from previous responses)
if size is None: size = self.video_frame_size
img = Image.new('RGB', size, color=(20, 20, 40)); d_draw = ImageDraw.Draw(img); padding = 25 # Renamed d
max_w_text = size[0] - (2 * padding); lines_out = [] # Renamed max_w, lines
if not text_description: text_description = "(Placeholder Image)"
words_in_desc = text_description.split(); current_line_buf = "" # Renamed words, current_line
for word_idx_loop, word_val in enumerate(words_in_desc): # Renamed
prospective_add_str = word_val + (" " if word_idx_loop < len(words_in_desc) - 1 else "")
test_line_str = current_line_buf + prospective_add_str
current_w_val, _ = self._get_text_dimensions(test_line_str, self.active_font_pil)
if current_w_val == 0 and test_line_str.strip(): current_w_val = len(test_line_str) * (self.active_font_size_pil * 0.6)
if current_w_val <= max_w_text: current_line_buf = test_line_str
else:
if current_line_buf.strip(): lines_out.append(current_line_buf.strip())
current_line_buf = prospective_add_str
if current_line_buf.strip(): lines_out.append(current_line_buf.strip())
if not lines_out and text_description:
avg_char_w_val, _ = self._get_text_dimensions("W", self.active_font_pil); avg_char_w_val = avg_char_w_val or (self.active_font_size_pil * 0.6)
chars_p_line = int(max_w_text / avg_char_w_val) if avg_char_w_val > 0 else 20
lines_out.append(text_description[:chars_p_line] + ("..." if len(text_description) > chars_p_line else ""))
elif not lines_out: lines_out.append("(Placeholder Error)")
_, single_line_h_val = self._get_text_dimensions("Ay", self.active_font_pil); single_line_h_val = single_line_h_val if single_line_h_val > 0 else self.active_font_size_pil + 2
max_lines_disp = min(len(lines_out), (size[1] - (2 * padding)) // (single_line_h_val + 2)) if single_line_h_val > 0 else 1; max_lines_disp = max(1, max_lines_disp)
y_pos_text = padding + (size[1] - (2 * padding) - max_lines_disp * (single_line_h_val + 2)) / 2.0
for i_ln in range(max_lines_disp): # Renamed
line_content_str = lines_out[i_ln]; line_w_px, _ = self._get_text_dimensions(line_content_str, self.active_font_pil) # Renamed
if line_w_px == 0 and line_content_str.strip(): line_w_px = len(line_content_str) * (self.active_font_size_pil * 0.6)
x_pos_text = (size[0] - line_w_px) / 2.0
try: d_draw.text((x_pos_text, y_pos_text), line_content_str, font=self.active_font_pil, fill=(200, 200, 180))
except Exception as e_draw_ph: logger.error(f"Pillow d.text error: {e_draw_ph} for '{line_content_str}'")
y_pos_text += single_line_h_val + 2
if i_ln == 6 and max_lines_disp > 7:
try: d_draw.text((x_pos_text, y_pos_text), "...", font=self.active_font_pil, fill=(200, 200, 180))
except Exception as e_elps_ph: logger.error(f"Pillow d.text ellipsis error: {e_elps_ph}"); break
filepath_ph_img = os.path.join(self.output_dir, filename) # Renamed
try: img.save(filepath_ph_img); return filepath_ph_img
except Exception as e_save_ph_img: logger.error(f"Saving placeholder image '{filepath_ph_img}' error: {e_save_ph_img}", exc_info=True); return None
def _search_pexels_image(self, query_str_px, output_fn_base_px): # Renamed
if not self.USE_PEXELS or not self.pexels_api_key: return None
http_headers_px = {"Authorization": self.pexels_api_key}
http_params_px = {"query": query_str_px, "per_page": 1, "orientation": "landscape", "size": "large2x"}
base_name_for_pexels_img, _ = os.path.splitext(output_fn_base_px) # Renamed
pexels_filename_output = base_name_for_pexels_img + f"_pexels_{random.randint(1000,9999)}.jpg" # Renamed
filepath_for_pexels_img = os.path.join(self.output_dir, pexels_filename_output) # Renamed
try:
logger.info(f"Pexels: Searching for '{query_str_px}'")
effective_query_for_pexels = " ".join(query_str_px.split()[:5]) # Renamed
http_params_px["query"] = effective_query_for_pexels
response_from_pexels = requests.get("https://api.pexels.com/v1/search", headers=http_headers_px, params=http_params_px, timeout=20) # Renamed
response_from_pexels.raise_for_status()
data_from_pexels = response_from_pexels.json() # Renamed
if data_from_pexels.get("photos") and len(data_from_pexels["photos"]) > 0:
photo_details_item_px = data_from_pexels["photos"][0] # Renamed
photo_url_item_px = photo_details_item_px.get("src", {}).get("large2x") # Renamed
if not photo_url_item_px: logger.warning(f"Pexels: 'large2x' URL missing for '{effective_query_for_pexels}'. Details: {photo_details_item_px}"); return None
image_response_get_px = requests.get(photo_url_item_px, timeout=60); image_response_get_px.raise_for_status() # Renamed
img_pil_data_from_pexels = Image.open(io.BytesIO(image_response_get_px.content)) # Renamed
if img_pil_data_from_pexels.mode != 'RGB': img_pil_data_from_pexels = img_pil_data_from_pexels.convert('RGB')
img_pil_data_from_pexels.save(filepath_for_pexels_img); logger.info(f"Pexels: Image saved to {filepath_for_pexels_img}"); return filepath_for_pexels_img
else: logger.info(f"Pexels: No photos for '{effective_query_for_pexels}'."); return None
except requests.exceptions.RequestException as e_req_px_loop: logger.error(f"Pexels: RequestException for '{query_str_px}': {e_req_px_loop}", exc_info=False); return None # Renamed
except Exception as e_px_gen_loop: logger.error(f"Pexels: General error for '{query_str_px}': {e_px_gen_loop}", exc_info=True); return None # Renamed
def _generate_video_clip_with_runwayml(self, motion_prompt_rwy, input_img_path_rwy, scene_id_base_fn_rwy, duration_s_rwy=5): # Renamed
if not self.USE_RUNWAYML or not self.runway_ml_sdk_client_instance: logger.warning("RunwayML skip: Not enabled/client not init."); return None
if not input_img_path_rwy or not os.path.exists(input_img_path_rwy): logger.error(f"Runway Gen-4 needs input img. Invalid: {input_img_path_rwy}"); return None
img_data_uri_rwy = self._image_to_data_uri(input_img_path_rwy) # Renamed
if not img_data_uri_rwy: return None
rwy_actual_dur = 10 if duration_s_rwy >= 8 else 5; rwy_actual_ratio = self._map_resolution_to_runway_ratio(self.video_frame_size[0],self.video_frame_size[1]) # Renamed
rwy_fn_base, _ = os.path.splitext(scene_id_base_fn_rwy); rwy_output_fn = rwy_fn_base + f"_runway_gen4_d{rwy_actual_dur}s.mp4"; rwy_output_fp = os.path.join(self.output_dir,rwy_output_fn) # Renamed
logger.info(f"Runway Gen-4 task: motion='{motion_prompt_rwy[:70]}...', img='{os.path.basename(input_img_path_rwy)}', dur={rwy_actual_dur}s, ratio='{rwy_actual_ratio}'")
try:
rwy_submitted_task = self.runway_ml_sdk_client_instance.image_to_video.create(model='gen4_turbo',prompt_image=img_data_uri_rwy,prompt_text=motion_prompt_rwy,duration=rwy_actual_dur,ratio=rwy_actual_ratio) # Renamed
rwy_task_id_val = rwy_submitted_task.id; logger.info(f"Runway task ID: {rwy_task_id_val}. Polling...") # Renamed
poll_interval_val=10;max_poll_attempts=36;poll_start_timestamp=time.time() # Renamed
while time.time()-poll_start_timestamp < max_poll_attempts*poll_interval_val:
time.sleep(poll_interval_val);rwy_task_details_obj=self.runway_ml_sdk_client_instance.tasks.retrieve(id=rwy_task_id_val) # Renamed
logger.info(f"Runway task {rwy_task_id_val} status: {rwy_task_details_obj.status}")
if rwy_task_details_obj.status=='SUCCEEDED':
rwy_video_output_url=getattr(getattr(rwy_task_details_obj,'output',None),'url',None) or (getattr(rwy_task_details_obj,'artifacts',None)and rwy_task_details_obj.artifacts and hasattr(rwy_task_details_obj.artifacts[0],'url')and rwy_task_details_obj.artifacts[0].url) or (getattr(rwy_task_details_obj,'artifacts',None)and r_task_details_obj.artifacts and hasattr(rwy_task_details_obj.artifacts[0],'download_url')and rwy_task_details_obj.artifacts[0].download_url) # Renamed r_task_details_obj
if not rwy_video_output_url:logger.error(f"Runway task {rwy_task_id_val} SUCCEEDED, no output URL. Details:{vars(rwy_task_details_obj)if hasattr(rwy_task_details_obj,'__dict__')else rwy_task_details_obj}");return None
logger.info(f"Runway task {rwy_task_id_val} SUCCEEDED. Downloading: {rwy_video_output_url}")
runway_video_response=requests.get(rwy_video_output_url,stream=True,timeout=300);runway_video_response.raise_for_status() # Renamed
with open(rwy_output_fp,'wb')as f_out_vid: # Renamed
for data_chunk_vid in runway_video_response.iter_content(chunk_size=8192): f_out_vid.write(data_chunk_vid) # Renamed
logger.info(f"Runway Gen-4 video saved: {rwy_output_fp}");return rwy_output_fp
elif rwy_task_details_obj.status in['FAILED','ABORTED','ERROR']:
runway_error_detail=getattr(rwy_task_details_obj,'error_message',None)or getattr(getattr(rwy_task_details_obj,'output',None),'error',"Unknown Runway error.") # Renamed
logger.error(f"Runway task {rwy_task_id_val} status:{rwy_task_details_obj.status}. Error:{runway_error_detail}");return None
logger.warning(f"Runway task {rwy_task_id_val} timed out.");return None
except AttributeError as e_rwy_sdk_attr: logger.error(f"RunwayML SDK AttrError:{e_rwy_sdk_attr}. SDK methods changed?",exc_info=True);return None # Renamed
except Exception as e_rwy_general: logger.error(f"Runway Gen-4 API error:{e_rwy_general}",exc_info=True);return None # Renamed
def _create_placeholder_video_content(self, text_description_ph_vid, filename_ph_vid, duration_ph_vid=4, size_ph_vid=None): # Renamed
if size_ph_vid is None: size_ph_vid = self.video_frame_size
filepath_ph_vid_out = os.path.join(self.output_dir, filename_ph_vid) # Renamed
text_clip_object_ph = None # Renamed
try:
text_clip_object_ph = TextClip(text_description_ph_vid, fontsize=50, color='white', font=self.video_overlay_font,
bg_color='black', size=size_ph_vid, method='caption').set_duration(duration_ph_vid)
text_clip_object_ph.write_videofile(filepath_ph_vid_out, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
logger.info(f"Generic placeholder video created: {filepath_ph_vid_out}")
return filepath_ph_vid_out
except Exception as e_placeholder_video_creation:
logger.error(f"Failed to create generic placeholder video '{filepath_ph_vid_out}': {e_placeholder_video_creation}", exc_info=True)
return None
finally:
if text_clip_object_ph and hasattr(text_clip_object_ph, 'close'):
try: text_clip_object_ph.close()
except Exception as e_close_placeholder_clip: logger.warning(f"Ignoring error closing placeholder TextClip: {e_close_placeholder_clip}")
def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
scene_data_dictionary, scene_identifier_filename_base, # Renamed
generate_as_video_clip_bool=False, runway_target_duration_val=5): # Renamed
# (Logic mostly as before, ensuring base image is robustly generated first)
# ... (Ensure this method also uses clearly distinct variable names as demonstrated above)
# ... (The DALL-E loop was corrected in a previous response, ensure that fix is present)
base_name_current_asset, _ = os.path.splitext(scene_identifier_filename_base)
asset_info_return_obj = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
path_to_input_image_for_runway = None
filename_for_base_image_output = base_name_current_asset + ("_base_for_video.png" if generate_as_video_clip_bool else ".png")
filepath_for_base_image_output = os.path.join(self.output_dir, filename_for_base_image_output)
# Base Image Generation (DALL-E -> Pexels -> Placeholder)
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
max_retries_dalle, current_attempt_dalle = 2,0;
for idx_dalle_attempt in range(max_retries_dalle): # Renamed att_n_dalle
current_attempt_dalle = idx_dalle_attempt + 1
try:
logger.info(f"Att {current_attempt_dalle} DALL-E (base img): {image_generation_prompt_text[:70]}...");
dalle_client = openai.OpenAI(api_key=self.openai_api_key,timeout=90.0);
dalle_response = dalle_client.images.generate(model=self.dalle_model,prompt=image_generation_prompt_text,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid");
dalle_image_url = dalle_response.data[0].url;
dalle_revised_prompt = getattr(dalle_response.data[0],'revised_prompt',None);
if dalle_revised_prompt: logger.info(f"DALL-E revised: {dalle_revised_prompt[:70]}...")
dalle_image_get_response = requests.get(dalle_image_url,timeout=120); dalle_image_get_response.raise_for_status();
dalle_pil_image = Image.open(io.BytesIO(dalle_image_get_response.content));
if dalle_pil_image.mode!='RGB': dalle_pil_image=dalle_pil_image.convert('RGB')
dalle_pil_image.save(filepath_for_base_image_output); logger.info(f"DALL-E base img saved: {filepath_for_base_image_output}");
path_to_input_image_for_runway=filepath_for_base_image_output;
asset_info_return_obj={'path':filepath_for_base_image_output,'type':'image','error':False,'prompt_used':image_generation_prompt_text,'revised_prompt':dalle_revised_prompt};
break
except openai.RateLimitError as e_dalle_rl: logger.warning(f"OpenAI RateLimit Att {current_attempt_dalle}:{e_dalle_rl}.Retry...");time.sleep(5*current_attempt_dalle);asset_info_return_obj['error_message']=str(e_dalle_rl)
except openai.APIError as e_dalle_api: logger.error(f"OpenAI APIError Att {current_attempt_dalle}:{e_dalle_api}");asset_info_return_obj['error_message']=str(e_dalle_api);break
except requests.exceptions.RequestException as e_dalle_req: logger.error(f"Requests Err DALL-E Att {current_attempt_dalle}:{e_dalle_req}");asset_info_return_obj['error_message']=str(e_dalle_req);break
except Exception as e_dalle_gen: logger.error(f"General DALL-E Err Att {current_attempt_dalle}:{e_dalle_gen}",exc_info=True);asset_info_return_obj['error_message']=str(e_dalle_gen);break
if asset_info_return_obj['error']: logger.warning(f"DALL-E failed after {current_attempt_dalle} attempts for base img.")
if asset_info_return_obj['error'] and self.USE_PEXELS:
logger.info("Trying Pexels for base img.");
pexels_query_text_val = scene_data_dictionary.get('pexels_search_query_๊ฐ๋
',f"{scene_data_dictionary.get('emotional_beat','')} {scene_data_dictionary.get('setting_description','')}");
pexels_path_result = self._search_pexels_image(pexels_query_text_val, filename_for_base_image_output);
if pexels_path_result:path_to_input_image_for_runway=pexels_path_result;asset_info_return_obj={'path':pexels_path_result,'type':'image','error':False,'prompt_used':f"Pexels:{pexels_query_text_val}"}
else:current_error_msg_pexels=asset_info_return_obj.get('error_message',"");asset_info_return_obj['error_message']=(current_error_msg_pexels+" Pexels failed for base.").strip()
if asset_info_return_obj['error']:
logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");
placeholder_prompt_text_val =asset_info_return_obj.get('prompt_used',image_generation_prompt_text);
placeholder_path_result=self._create_placeholder_image_content(f"[Base Placeholder]{placeholder_prompt_text_val[:70]}...",filename_for_base_image_output);
if placeholder_path_result:path_to_input_image_for_runway=placeholder_path_result;asset_info_return_obj={'path':placeholder_path_result,'type':'image','error':False,'prompt_used':placeholder_prompt_text_val}
else:current_error_msg_ph=asset_info_return_obj.get('error_message',"");asset_info_return_obj['error_message']=(current_error_msg_ph+" Base placeholder failed.").strip()
if generate_as_video_clip_bool: # Attempt RunwayML if requested
if not path_to_input_image_for_runway:logger.error("RunwayML video: base img failed completely.");asset_info_return_obj['error']=True;asset_info_return_obj['error_message']=(asset_info_return_obj.get('error_message',"")+" Base img entirely miss, Runway abort.").strip();asset_info_return_obj['type']='none';return asset_info_return_obj
if self.USE_RUNWAYML:
runway_generated_video_path=self._generate_video_clip_with_runwayml(motion_prompt_text_for_video,path_to_input_image_for_runway,asset_base_name,runway_target_duration_val)
if runway_generated_video_path and os.path.exists(runway_generated_video_path):asset_info_return_obj={'path':runway_generated_video_path,'type':'video','error':False,'prompt_used':motion_prompt_text_for_video,'base_image_path':path_to_input_image_for_runway}
else:logger.warning(f"RunwayML video failed for {asset_base_name}. Fallback to base img.");asset_info_return_obj['error']=True;asset_info_return_obj['error_message']=(asset_info_return_obj.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info_return_obj['path']=path_to_input_image_for_runway;asset_info_return_obj['type']='image';asset_info_return_obj['prompt_used']=image_generation_prompt_text
else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info_return_obj['error']=True;asset_info_return_obj['error_message']=(asset_info_return_obj.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info_return_obj['path']=path_to_input_image_for_runway;asset_info_return_obj['type']='image';asset_info_return_obj['prompt_used']=image_generation_prompt_text
return asset_info_return_obj
def generate_narration_audio(self, narration_text, output_fn="narration_overall.mp3"):
# (Corrected version from previous response)
if not self.USE_ELEVENLABS or not self.elevenlabs_client_instance or not narration_text: logger.info("11L conditions not met. Skip audio."); return None
narration_fp = os.path.join(self.output_dir, output_fn)
try:
logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): \"{narration_text[:70]}...\"")
stream_method = None
if hasattr(self.elevenlabs_client_instance,'text_to_speech') and hasattr(self.elevenlabs_client_instance.text_to_speech,'stream'): stream_method=self.elevenlabs_client_instance.text_to_speech.stream; logger.info("Using 11L .text_to_speech.stream()")
elif hasattr(self.elevenlabs_client_instance,'generate_stream'): stream_method=self.elevenlabs_client_instance.generate_stream; logger.info("Using 11L .generate_stream()")
elif hasattr(self.elevenlabs_client_instance,'generate'):
logger.info("Using 11L .generate() (non-streaming).")
voice_p = Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings_obj) if Voice and self.elevenlabs_voice_settings_obj else str(self.elevenlabs_voice_id)
audio_b = self.elevenlabs_client_instance.generate(text=narration_text,voice=voice_p,model="eleven_multilingual_v2")
with open(narration_fp,"wb") as f_audio: f_audio.write(audio_b); logger.info(f"11L audio (non-stream): {narration_fp}"); return narration_fp
else: logger.error("No recognized 11L audio method."); return None
if stream_method:
voice_stream_params={"voice_id":str(self.elevenlabs_voice_id)}
if self.elevenlabs_voice_settings_obj:
if hasattr(self.elevenlabs_voice_settings_obj,'model_dump'): voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj.model_dump()
elif hasattr(self.elevenlabs_voice_settings_obj,'dict'): voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj.dict()
else: voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj
audio_iter = stream_method(text=narration_text,model_id="eleven_multilingual_v2",**voice_stream_params)
with open(narration_fp,"wb") as f_audio_stream:
for chunk_item in audio_iter:
if chunk_item: f_audio_stream.write(chunk_item)
logger.info(f"11L audio (stream): {narration_fp}"); return narration_fp
except AttributeError as e_11l_attr: logger.error(f"11L SDK AttrError: {e_11l_attr}. SDK/methods changed?", exc_info=True); return None
except Exception as e_11l_gen: logger.error(f"11L audio gen error: {e_11l_gen}", exc_info=True); return None
def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
# (Keep the version with robust image processing, C-contiguous array, debug saves, and pix_fmt)
# This method needs careful review for the blank video issue if it persists after __init__ is fixed.
# The version from the response addressing "the video is not working and the image" (ID: ZZr1...)
# contained detailed Pillow debugging and the attempt to use ImageClip(filename) directly.
# That is the version that should be here. For brevity, I'm not pasting its full 200+ lines again
# but it's crucial that the robust version is used.
if not asset_data_list: logger.warning("No assets for animatic."); return None
processed_moviepy_clips_list = []; narration_audio_clip_mvpy = None; final_video_output_clip = None
logger.info(f"Assembling from {len(asset_data_list)} assets. Target Frame: {self.video_frame_size}.")
for i_asset, asset_info_item_loop in enumerate(asset_data_list):
path_of_asset, type_of_asset, duration_for_scene = asset_info_item_loop.get('path'), asset_info_item_loop.get('type'), asset_info_item_loop.get('duration', 4.5)
num_of_scene, action_in_key = asset_info_item_loop.get('scene_num', i_asset + 1), asset_info_item_loop.get('key_action', '')
logger.info(f"S{num_of_scene}: Path='{path_of_asset}', Type='{type_of_asset}', Dur='{duration_for_scene}'s")
if not (path_of_asset and os.path.exists(path_of_asset)): logger.warning(f"S{num_of_scene}: Not found '{path_of_asset}'. Skip."); continue
if duration_for_scene <= 0: logger.warning(f"S{num_of_scene}: Invalid duration ({duration_for_scene}s). Skip."); continue
active_scene_clip = None
try:
if type_of_asset == 'image':
opened_pil_img = Image.open(path_of_asset); logger.debug(f"S{num_of_scene}: Loaded img. Mode:{opened_pil_img.mode}, Size:{opened_pil_img.size}")
debug_original_path = os.path.join(self.output_dir,f"debug_0_ORIGINAL_S{num_of_scene}.png"); opened_pil_img.save(debug_original_path)
converted_img_rgba = opened_pil_img.convert('RGBA') if opened_pil_img.mode != 'RGBA' else opened_pil_img.copy().convert('RGBA')
debug_rgba_path = os.path.join(self.output_dir,f"debug_1_AS_RGBA_S{num_of_scene}.png"); converted_img_rgba.save(debug_rgba_path)
thumbnailed_img = converted_img_rgba.copy(); resample_f = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumbnailed_img.thumbnail(self.video_frame_size,resample_f)
debug_thumb_path = os.path.join(self.output_dir,f"debug_2_THUMBNAIL_RGBA_S{num_of_scene}.png"); thumbnailed_img.save(debug_thumb_path)
rgba_canvas = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); pos_x,pos_y=(self.video_frame_size[0]-thumbnailed_img.width)//2,(self.video_frame_size[1]-thumbnailed_img.height)//2
rgba_canvas.paste(thumbnailed_img,(pos_x,pos_y),thumbnailed_img)
debug_composite_rgba_path = os.path.join(self.output_dir,f"debug_3_COMPOSITED_RGBA_S{num_of_scene}.png"); rgba_canvas.save(debug_composite_rgba_path)
final_rgb_img_pil = Image.new("RGB",self.video_frame_size,(0,0,0));
if rgba_canvas.mode == 'RGBA': final_rgb_img_pil.paste(rgba_canvas,mask=rgba_canvas.split()[3])
else: final_rgb_img_pil.paste(rgba_canvas)
debug_path_img_pre_numpy = os.path.join(self.output_dir,f"debug_4_PRE_NUMPY_RGB_S{num_of_scene}.png"); final_rgb_img_pil.save(debug_path_img_pre_numpy); logger.info(f"CRITICAL DEBUG: Saved PRE_NUMPY_RGB_S{num_of_scene} to {debug_path_img_pre_numpy}")
# Option A: Use the saved, processed image file for ImageClip
logger.info(f"S{num_of_scene}: Attempting ImageClip FROM FILE: {debug_path_img_pre_numpy}")
base_image_clip = ImageClip(debug_path_img_pre_numpy, transparent=False).set_duration(duration_for_scene)
# Option B: Use NumPy array (uncomment to test if file method fails, or vice-versa)
# numpy_frame_arr = np.array(final_rgb_img_pil,dtype=np.uint8);
# if not numpy_frame_arr.flags['C_CONTIGUOUS']: numpy_frame_arr=np.ascontiguousarray(numpy_frame_arr,dtype=np.uint8)
# logger.debug(f"S{num_of_scene}: NumPy for MoviePy. Shape:{numpy_frame_arr.shape}, DType:{numpy_frame_arr.dtype}, C-Contig:{numpy_frame_arr.flags['C_CONTIGUOUS']}")
# if numpy_frame_arr.size==0 or numpy_frame_arr.ndim!=3 or numpy_frame_arr.shape[2]!=3: logger.error(f"S{num_of_scene}: Invalid NumPy array for MoviePy. Skip."); continue
# base_image_clip = ImageClip(numpy_frame_arr,transparent=False, ismask=False).set_duration(duration_for_scene)
logger.debug(f"S{num_of_scene}: Base ImageClip created. Duration: {base_image_clip.duration}")
debug_path_moviepy_frame=os.path.join(self.output_dir,f"debug_7_MOVIEPY_FRAME_S{num_of_scene}.png");
try: base_image_clip.save_frame(debug_path_moviepy_frame,t=min(0.1, base_image_clip.duration/2 if base_image_clip.duration > 0 else 0.1)); logger.info(f"CRITICAL DEBUG: Saved frame FROM MOVIEPY ImageClip S{num_of_scene} to {debug_path_moviepy_frame}")
except Exception as e_save_frame: logger.error(f"DEBUG: Error saving frame FROM MOVIEPY ImageClip S{num_of_scene}: {e_save_frame}", exc_info=True)
fx_image_clip = base_image_clip
try: scale_end_kb=random.uniform(1.03,1.08);
if duration_for_scene > 0: fx_image_clip=base_image_clip.fx(vfx.resize,lambda t_val:1+(scale_end_kb-1)*(t_val/duration_for_scene)).set_position('center'); logger.debug(f"S{num_of_scene}: Ken Burns applied.")
except Exception as e_kb_fx: logger.error(f"S{num_of_scene} Ken Burns error: {e_kb_fx}",exc_info=False)
active_scene_clip = fx_image_clip
elif type_of_asset == 'video':
# (Video processing logic as before)
source_video_clip_obj=None
try:
logger.debug(f"S{num_of_scene}: Loading VIDEO asset: {path_of_asset}")
source_video_clip_obj=VideoFileClip(path_of_asset,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
temp_video_clip_obj_loop=source_video_clip_obj
if source_video_clip_obj.duration!=duration_for_scene:
if source_video_clip_obj.duration>duration_for_scene:temp_video_clip_obj_loop=source_video_clip_obj.subclip(0,duration_for_scene)
else:
if duration_for_scene/source_video_clip_obj.duration > 1.5 and source_video_clip_obj.duration>0.1:temp_video_clip_obj_loop=source_video_clip_obj.loop(duration=duration_for_scene)
else:temp_video_clip_obj_loop=source_video_clip_obj.set_duration(source_video_clip_obj.duration);logger.info(f"S{num_of_scene} Video clip ({source_video_clip_obj.duration:.2f}s) shorter than target ({duration_for_scene:.2f}s).")
active_scene_clip=temp_video_clip_obj_loop.set_duration(duration_for_scene)
if active_scene_clip.size!=list(self.video_frame_size):active_scene_clip=active_scene_clip.resize(self.video_frame_size)
logger.debug(f"S{num_of_scene}: Video asset processed. Final duration for scene: {active_scene_clip.duration:.2f}s")
except Exception as e_vid_load_loop:logger.error(f"S{num_of_scene} Video load error '{path_of_asset}':{e_vid_load_loop}",exc_info=True);continue
finally:
if source_video_clip_obj and source_video_clip_obj is not active_scene_clip and hasattr(source_video_clip_obj,'close'):
try: source_video_clip_obj.close()
except Exception as e_close_src_vid: logger.warning(f"S{num_of_scene}: Error closing source VideoFileClip: {e_close_src_vid}")
else: logger.warning(f"S{num_of_scene} Unknown asset type '{type_of_asset}'. Skipping."); continue
if active_scene_clip and action_in_key: # Text Overlay
try:
dur_text_overlay_val=min(active_scene_clip.duration-0.5,active_scene_clip.duration*0.8)if active_scene_clip.duration>0.5 else (active_scene_clip.duration if active_scene_clip.duration > 0 else 0)
start_text_overlay_val=0.25 if active_scene_clip.duration > 0.5 else 0
if dur_text_overlay_val > 0:
text_clip_for_overlay_obj=TextClip(f"Scene {num_of_scene}\n{action_in_key}",fontsize=self.VIDEO_OVERLAY_FONT_SIZE,color=self.VIDEO_OVERLAY_FONT_COLOR,font=self.active_moviepy_font_name,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(dur_text_overlay_val).set_start(start_text_overlay_val).set_position(('center',0.92),relative=True)
active_scene_clip=CompositeVideoClip([active_scene_clip,text_clip_for_overlay_obj],size=self.video_frame_size,use_bgclip=True)
logger.debug(f"S{num_of_scene}: Text overlay composited.")
else: logger.warning(f"S{num_of_scene}: Text overlay duration zero or negative ({dur_text_overlay_val}). Skipping text overlay.")
except Exception as e_txt_comp_loop:logger.error(f"S{num_of_scene} TextClip compositing error:{e_txt_comp_loop}. Proceeding without text for this scene.",exc_info=True)
if active_scene_clip: processed_moviepy_clips_list.append(active_scene_clip); logger.info(f"S{num_of_scene}: Asset successfully processed. Clip duration: {active_scene_clip.duration:.2f}s. Added to final list.")
except Exception as e_asset_loop_main_exc: logger.error(f"MAJOR UNHANDLED ERROR processing asset for S{num_of_scene} (Path: {path_of_asset}): {e_asset_loop_main_exc}", exc_info=True)
finally:
if active_scene_clip and hasattr(active_scene_clip,'close'):
try: active_scene_clip.close()
except Exception as e_close_active_err: logger.warning(f"S{num_of_scene}: Error closing active_scene_clip in error handler: {e_close_active_err}")
if not processed_moviepy_clips_list: logger.warning("No MoviePy clips were successfully processed. Aborting animatic assembly before concatenation."); return None
transition_duration_val=0.75
try:
logger.info(f"Concatenating {len(processed_moviepy_clips_list)} processed clips for final animatic.");
if len(processed_moviepy_clips_list)>1: final_video_output_clip=concatenate_videoclips(processed_moviepy_clips_list, padding=-transition_duration_val if transition_duration_val > 0 else 0, method="compose")
elif processed_moviepy_clips_list: final_video_output_clip=processed_moviepy_clips_list[0]
if not final_video_output_clip: logger.error("Concatenation resulted in a None clip. Aborting."); return None
logger.info(f"Concatenated animatic base duration:{final_video_output_clip.duration:.2f}s")
if transition_duration_val > 0 and final_video_output_clip.duration > 0:
if final_video_output_clip.duration > transition_duration_val * 2: final_video_output_clip=final_video_output_clip.fx(vfx.fadein,transition_duration_val).fx(vfx.fadeout,transition_duration_val)
else: final_video_output_clip=final_video_output_clip.fx(vfx.fadein,min(transition_duration_val,final_video_output_clip.duration/2.0))
logger.debug("Applied fade in/out effects to final composite clip.")
if overall_narration_path and os.path.exists(overall_narration_path) and final_video_output_clip.duration > 0:
try: narration_audio_clip_mvpy=AudioFileClip(overall_narration_path); logger.info(f"Adding overall narration. Video duration: {final_video_output_clip.duration:.2f}s, Narration duration: {narration_audio_clip_mvpy.duration:.2f}s"); final_video_output_clip=final_video_output_clip.set_audio(narration_audio_clip_mvpy); logger.info("Overall narration successfully added to animatic.")
except Exception as e_narr_add_final:logger.error(f"Error adding overall narration to animatic:{e_narr_add_final}",exc_info=True)
elif final_video_output_clip.duration <= 0: logger.warning("Animatic has zero or negative duration before adding audio. Audio will not be added.")
if final_video_output_clip and final_video_output_clip.duration > 0:
final_output_path_str=os.path.join(self.output_dir,output_filename); logger.info(f"Writing final animatic video to: {final_output_path_str} (Target Duration: {final_video_output_clip.duration:.2f}s)")
num_threads = os.cpu_count(); num_threads = num_threads if isinstance(num_threads, int) and num_threads >= 1 else 2
final_video_output_clip.write_videofile(final_output_path_str, fps=fps, codec='libx264', preset='medium', audio_codec='aac', temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'), remove_temp=True, threads=num_threads, logger='bar', bitrate="5000k", ffmpeg_params=["-pix_fmt", "yuv420p"])
logger.info(f"Animatic video created successfully: {final_output_path_str}"); return final_output_path_str
else: logger.error("Final animatic clip is invalid or has zero duration. Cannot write video file."); return None
except Exception as e_vid_write_final_op: logger.error(f"Error during final animatic video file writing or composition stage: {e_vid_write_final_op}", exc_info=True); return None
finally:
logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` main finally block.")
all_clips_for_closure = processed_moviepy_clips_list[:]
if narration_audio_clip_mvpy and hasattr(narration_audio_clip_mvpy, 'close'): all_clips_for_closure.append(narration_audio_clip_mvpy) # Check hasattr before append
if final_video_output_clip and hasattr(final_video_output_clip, 'close'): all_clips_for_closure.append(final_video_output_clip)
for clip_to_close_item_final in all_clips_for_closure:
if clip_to_close_item_final and hasattr(clip_to_close_item_final, 'close'): # Double check before closing
try: clip_to_close_item_final.close()
except Exception as e_final_clip_close_op: logger.warning(f"Ignoring error while closing a MoviePy clip ({type(clip_to_close_item_final).__name__}): {e_final_clip_close_op}") |