Update core/visual_engine.py
Browse files- core/visual_engine.py +133 -175
core/visual_engine.py
CHANGED
@@ -4,7 +4,7 @@ import base64
|
|
4 |
import mimetypes
|
5 |
import numpy as np
|
6 |
import os
|
7 |
-
import openai
|
8 |
import requests
|
9 |
import io
|
10 |
import time
|
@@ -15,17 +15,16 @@ from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, Te
|
|
15 |
CompositeVideoClip, AudioFileClip)
|
16 |
import moviepy.video.fx.all as vfx
|
17 |
|
18 |
-
try: # MONKEY PATCH
|
19 |
-
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'):
|
20 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
|
21 |
-
elif hasattr(Image, 'LANCZOS'):
|
22 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
|
23 |
-
elif not hasattr(Image, 'ANTIALIAS'):
|
24 |
-
|
25 |
-
except Exception as e_mp: print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}")
|
26 |
|
27 |
logger = logging.getLogger(__name__)
|
28 |
-
# logger.setLevel(logging.DEBUG) # Uncomment for
|
29 |
|
30 |
ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
|
31 |
try:
|
@@ -42,126 +41,96 @@ try:
|
|
42 |
logger.info("RunwayML SDK imported.")
|
43 |
except Exception as e_rwy_imp: logger.warning(f"RunwayML SDK import failed: {e_rwy_imp}. RunwayML disabled.")
|
44 |
|
45 |
-
|
46 |
class VisualEngine:
|
47 |
DEFAULT_FONT_SIZE_PIL = 10; PREFERRED_FONT_SIZE_PIL = 20
|
48 |
VIDEO_OVERLAY_FONT_SIZE = 30; VIDEO_OVERLAY_FONT_COLOR = 'white'
|
49 |
DEFAULT_MOVIEPY_FONT = 'DejaVu-Sans-Bold'; PREFERRED_MOVIEPY_FONT = 'Liberation-Sans-Bold'
|
50 |
|
51 |
-
# <<< CRITICAL __init__ METHOD - ENSURE IT MATCHES THIS >>>
|
52 |
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
|
53 |
self.output_dir = output_dir
|
54 |
try:
|
55 |
os.makedirs(self.output_dir, exist_ok=True)
|
56 |
logger.info(f"VisualEngine output directory set/ensured: {os.path.abspath(self.output_dir)}")
|
57 |
-
# Test writability immediately
|
58 |
test_file_path = os.path.join(self.output_dir, ".ve_write_test.txt")
|
59 |
-
with open(test_file_path, "w") as f_test:
|
60 |
-
|
61 |
-
|
62 |
-
logger.info(f"Write test to output directory '{self.output_dir}' successful.")
|
63 |
-
except Exception as e_mkdir_init: # More specific exception catching
|
64 |
-
logger.critical(f"CRITICAL FAILURE: Could not create or write to output directory '{os.path.abspath(self.output_dir)}': {e_mkdir_init}", exc_info=True)
|
65 |
-
raise OSError(f"VisualEngine failed to initialize output directory '{self.output_dir}'. Check permissions and path.") from e_mkdir_init
|
66 |
-
|
67 |
self.font_filename_pil_preference = "DejaVuSans-Bold.ttf"
|
68 |
-
|
69 |
-
self.resolved_font_path_pil = next((p for p in
|
70 |
-
|
71 |
-
self.active_font_pil = ImageFont.load_default()
|
72 |
-
self.active_font_size_pil = self.DEFAULT_FONT_SIZE_PIL
|
73 |
-
self.active_moviepy_font_name = self.DEFAULT_MOVIEPY_FONT
|
74 |
-
|
75 |
if self.resolved_font_path_pil:
|
76 |
-
try:
|
77 |
-
|
78 |
-
|
79 |
-
logger.info(f"Pillow font loaded: {self.resolved_font_path_pil} at size {self.active_font_size_pil}.")
|
80 |
-
self.active_moviepy_font_name = 'DejaVu-Sans-Bold' if "dejavu" in self.resolved_font_path_pil.lower() else ('Liberation-Sans-Bold' if "liberation" in self.resolved_font_path_pil.lower() else self.DEFAULT_MOVIEPY_FONT)
|
81 |
-
except IOError as e_font_load: logger.error(f"Pillow font IOError for '{self.resolved_font_path_pil}': {e_font_load}. Using default.")
|
82 |
-
else: logger.warning("Preferred Pillow font not found in predefined paths. Using default.")
|
83 |
-
|
84 |
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
|
85 |
self.video_frame_size = (1280, 720)
|
86 |
-
|
87 |
-
|
88 |
-
self.elevenlabs_voice_id = default_elevenlabs_voice_id
|
89 |
-
logger.info(f"VisualEngine __init__: ElevenLabs Voice ID initially set to: {self.elevenlabs_voice_id}")
|
90 |
-
|
91 |
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings_obj = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
|
92 |
else: self.elevenlabs_voice_settings_obj = None
|
93 |
-
|
94 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
95 |
self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None
|
96 |
-
|
97 |
if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass and os.getenv("RUNWAYML_API_SECRET"):
|
98 |
-
try: self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client
|
99 |
-
except Exception as
|
100 |
-
|
101 |
-
logger.info("VisualEngine __init__ sequence fully completed.")
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
def set_elevenlabs_api_key(self,
|
106 |
-
self.elevenlabs_api_key
|
107 |
-
if
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
def set_runway_api_key(self, api_key_value):
|
117 |
-
self.runway_api_key = api_key_value
|
118 |
-
if api_key_value:
|
119 |
if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass:
|
120 |
-
if not self.runway_ml_sdk_client_instance:
|
121 |
try:
|
122 |
-
|
123 |
-
if not
|
124 |
-
self.runway_ml_sdk_client_instance
|
125 |
-
if not
|
126 |
-
except Exception as
|
127 |
-
else: self.USE_RUNWAYML
|
128 |
-
else: logger.warning("RunwayML SDK not imported.
|
129 |
-
else: self.USE_RUNWAYML
|
130 |
|
131 |
-
# --- Helper Methods (_image_to_data_uri, _map_resolution_to_runway_ratio, etc.) ---
|
132 |
-
# (These should be the corrected versions from previous iterations)
|
133 |
-
def _image_to_data_uri(self, image_path_in):
|
134 |
try:
|
135 |
-
mime_type_val, _ = mimetypes.guess_type(image_path_in)
|
136 |
-
if not mime_type_val:
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
with open(image_path_in, "rb") as img_file_handle: img_binary_data = img_file_handle.read() # Renamed
|
142 |
-
encoded_b64_str = base64.b64encode(img_binary_data).decode('utf-8') # Renamed
|
143 |
-
final_data_uri = f"data:{mime_type_val};base64,{encoded_b64_str}"; logger.debug(f"Data URI for {os.path.basename(image_path_in)} (MIME:{mime_type_val}): {final_data_uri[:100]}..."); return final_data_uri # Renamed
|
144 |
except FileNotFoundError: logger.error(f"Img not found {image_path_in} for data URI."); return None
|
145 |
-
except Exception as e_to_data_uri: logger.error(f"Error converting {image_path_in} to data URI:{e_to_data_uri}", exc_info=True); return None
|
146 |
|
147 |
-
def _map_resolution_to_runway_ratio(self, width_in, height_in):
|
148 |
-
ratio_string = f"{width_in}:{height_in}"; supported_ratios = ["1280:720","720:1280","1104:832","832:1104","960:960","1584:672"];
|
149 |
if ratio_string in supported_ratios: return ratio_string
|
150 |
-
logger.warning(f"Res {ratio_string} not in Gen-4 list. Default 1280:720 for Runway.");
|
151 |
|
152 |
-
def _get_text_dimensions(self, text_str, font_pil_obj):
|
153 |
def_h = getattr(font_pil_obj, 'size', self.active_font_size_pil);
|
154 |
if not text_str: return 0, def_h
|
155 |
try:
|
156 |
-
if hasattr(font_pil_obj,'getbbox'): box = font_pil_obj.getbbox(text_str); w_val=box[2]-box[0]; h_val=box[3]-box[1]; return w_val, h_val if h_val > 0 else def_h
|
157 |
-
elif hasattr(font_pil_obj,'getsize'): w_val,h_val=font_pil_obj.getsize(text_str); return w_val, h_val if h_val > 0 else def_h
|
158 |
else: return int(len(text_str)*def_h*0.6), int(def_h*1.2)
|
159 |
-
except Exception as e_get_dim: logger.warning(f"Error in _get_text_dimensions: {e_get_dim}"); return int(len(text_str)*self.active_font_size_pil*0.6),int(self.active_font_size_pil*1.2)
|
160 |
|
161 |
-
def _create_placeholder_image_content(self,text_desc_val, filename_val, size_val=None):
|
162 |
-
# (Corrected version from previous responses)
|
163 |
if size_val is None: size_val = self.video_frame_size
|
164 |
-
placeholder_img = Image.new('RGB', size_val, color=(20, 20, 40)); placeholder_draw = ImageDraw.Draw(placeholder_img); ph_padding = 25
|
165 |
ph_max_w = size_val[0] - (2 * ph_padding); ph_lines = []
|
166 |
if not text_desc_val: text_desc_val = "(Placeholder Image)"
|
167 |
ph_words = text_desc_val.split(); ph_current_line = ""
|
@@ -197,44 +166,33 @@ class VisualEngine:
|
|
197 |
try: placeholder_img.save(ph_filepath); return ph_filepath
|
198 |
except Exception as e_ph_save: logger.error(f"Saving placeholder image '{ph_filepath}' error: {e_ph_save}", exc_info=True); return None
|
199 |
|
200 |
-
def _search_pexels_image(self,
|
201 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
try:
|
208 |
-
logger.info(f"Pexels: Searching for '{
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
if
|
215 |
-
|
216 |
-
|
217 |
-
if not
|
218 |
-
|
219 |
-
|
220 |
-
if
|
221 |
-
|
222 |
-
else: logger.info(f"Pexels: No photos for '{
|
223 |
-
except requests.exceptions.RequestException as
|
224 |
-
except Exception as
|
225 |
-
|
226 |
-
# ... (Rest of methods: _generate_video_clip_with_runwayml, generate_scene_asset, generate_narration_audio, assemble_animatic_from_assets)
|
227 |
-
# Ensure these are taken from the last fully corrected versions provided, paying close attention to their specific fixes.
|
228 |
-
# For example, generate_narration_audio had its own try-except fix.
|
229 |
-
# assemble_animatic_from_assets had extensive debugging for image corruption.
|
230 |
-
|
231 |
-
# For brevity, I will paste the corrected generate_narration_audio and the
|
232 |
-
# structure for generate_scene_asset and assemble_animatic_from_assets.
|
233 |
-
# You MUST ensure the internal logic of generate_scene_asset and assemble_animatic_from_assets
|
234 |
-
# matches the last "expert" versions that included detailed debugging for image/video issues.
|
235 |
|
236 |
def _generate_video_clip_with_runwayml(self, motion_prompt_rwy, input_img_path_rwy, scene_id_base_fn_rwy, duration_s_rwy=5):
|
237 |
-
# (Keep robust RunwayML logic from before, with proper SDK client instance: self.runway_ml_sdk_client_instance)
|
238 |
if not self.USE_RUNWAYML or not self.runway_ml_sdk_client_instance: logger.warning("RunwayML skip: Not enabled/client not init."); return None
|
239 |
if not input_img_path_rwy or not os.path.exists(input_img_path_rwy): logger.error(f"Runway Gen-4 needs input img. Invalid: {input_img_path_rwy}"); return None
|
240 |
img_data_uri_rwy = self._image_to_data_uri(input_img_path_rwy)
|
@@ -283,9 +241,8 @@ class VisualEngine:
|
|
283 |
except Exception as e_close_placeholder_clip: logger.warning(f"Ignoring error closing placeholder TextClip: {e_close_placeholder_clip}")
|
284 |
|
285 |
def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
|
286 |
-
|
287 |
-
generate_as_video_clip_flag=False,
|
288 |
-
# (Corrected DALL-E loop from previous response)
|
289 |
base_name_current_asset, _ = os.path.splitext(scene_identifier_fn_base)
|
290 |
asset_info_return_obj = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
|
291 |
path_to_input_image_for_runway = None
|
@@ -323,41 +280,33 @@ class VisualEngine:
|
|
323 |
else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info_return_obj['error']=True;asset_info_return_obj['error_message']=(asset_info_return_obj.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info_return_obj['path']=path_to_input_image_for_runway;asset_info_return_obj['type']='image';asset_info_return_obj['prompt_used']=image_generation_prompt_text
|
324 |
return asset_info_return_obj
|
325 |
|
326 |
-
def generate_narration_audio(self,
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
logger.info(
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
if Voice and self.elevenlabs_voice_settings_obj: voice_param_11l = Voice(voice_id=str(self.elevenlabs_voice_id), settings=self.elevenlabs_voice_settings_obj)
|
343 |
-
audio_bytes_data = self.elevenlabs_client_instance.generate(text=text_to_narrate, voice=voice_param_11l, model="eleven_multilingual_v2")
|
344 |
-
with open(audio_filepath_narration, "wb") as audio_file_out: audio_file_out.write(audio_bytes_data)
|
345 |
-
logger.info(f"ElevenLabs audio (non-streamed) saved successfully to: {audio_filepath_narration}"); return audio_filepath_narration
|
346 |
-
else: logger.error("No recognized audio generation method found on the ElevenLabs client instance."); return None
|
347 |
-
|
348 |
-
if audio_stream_method_11l:
|
349 |
-
params_for_voice_stream = {"voice_id": str(self.elevenlabs_voice_id)}
|
350 |
if self.elevenlabs_voice_settings_obj:
|
351 |
-
if hasattr(self.elevenlabs_voice_settings_obj,
|
352 |
-
elif hasattr(self.elevenlabs_voice_settings_obj,
|
353 |
-
else:
|
354 |
-
|
355 |
-
with open(
|
356 |
-
for
|
357 |
-
if
|
358 |
-
logger.info(f"
|
359 |
-
except AttributeError as
|
360 |
-
except Exception as
|
361 |
|
362 |
def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
|
363 |
if not asset_data_list: logger.warning("No assets for animatic."); return None
|
@@ -394,8 +343,7 @@ class VisualEngine:
|
|
394 |
logger.debug(f"S{num_of_scene} (6-ImageClip): Base ImageClip. Duration: {base_image_clip_mvpy.duration}")
|
395 |
|
396 |
debug_path_moviepy_frame = os.path.join(self.output_dir,f"debug_7_MOVIEPY_FRAME_S{num_of_scene}.png")
|
397 |
-
# <<<
|
398 |
-
try:
|
399 |
save_frame_time = min(0.1, base_image_clip_mvpy.duration / 2 if base_image_clip_mvpy.duration > 0 else 0.1)
|
400 |
base_image_clip_mvpy.save_frame(debug_path_moviepy_frame, t=save_frame_time)
|
401 |
logger.info(f"CRITICAL DEBUG: Saved frame FROM MOVIEPY ImageClip S{num_of_scene} to {debug_path_moviepy_frame}")
|
@@ -403,12 +351,13 @@ class VisualEngine:
|
|
403 |
logger.error(f"DEBUG: Error saving frame FROM MOVIEPY ImageClip S{num_of_scene}: {e_save_mvpy_frame}", exc_info=True)
|
404 |
|
405 |
fx_image_clip_mvpy = base_image_clip_mvpy
|
406 |
-
try:
|
407 |
scale_end_kb_val = random.uniform(1.03, 1.08)
|
408 |
if duration_for_scene > 0: fx_image_clip_mvpy = base_image_clip_mvpy.fx(vfx.resize, lambda t_val: 1 + (scale_end_kb_val - 1) * (t_val / duration_for_scene)).set_position('center'); logger.debug(f"S{num_of_scene} (8-KenBurns): Ken Burns applied.")
|
409 |
else: logger.warning(f"S{num_of_scene}: Duration zero, skipping Ken Burns.")
|
410 |
-
except Exception as e_kb_fx_loop:
|
411 |
-
|
|
|
412 |
elif type_of_asset == 'video':
|
413 |
source_video_clip_obj=None
|
414 |
try:
|
@@ -429,7 +378,8 @@ class VisualEngine:
|
|
429 |
try: source_video_clip_obj.close()
|
430 |
except Exception as e_close_src_vid: logger.warning(f"S{num_of_scene}: Error closing source VideoFileClip: {e_close_src_vid}")
|
431 |
else: logger.warning(f"S{num_of_scene} Unknown asset type '{type_of_asset}'. Skipping."); continue
|
432 |
-
|
|
|
433 |
try:
|
434 |
dur_text_overlay_val=min(active_scene_clip.duration-0.5,active_scene_clip.duration*0.8)if active_scene_clip.duration>0.5 else (active_scene_clip.duration if active_scene_clip.duration > 0 else 0)
|
435 |
start_text_overlay_val=0.25 if active_scene_clip.duration > 0.5 else 0
|
@@ -439,11 +389,12 @@ class VisualEngine:
|
|
439 |
logger.debug(f"S{num_of_scene}: Text overlay composited.")
|
440 |
else: logger.warning(f"S{num_of_scene}: Text overlay duration zero or negative ({dur_text_overlay_val}). Skipping text overlay.")
|
441 |
except Exception as e_txt_comp_loop:logger.error(f"S{num_of_scene} TextClip compositing error:{e_txt_comp_loop}. Proceeding without text for this scene.",exc_info=True)
|
|
|
442 |
if active_scene_clip: processed_moviepy_clips_list.append(active_scene_clip); logger.info(f"S{num_of_scene}: Asset successfully processed. Clip duration: {active_scene_clip.duration:.2f}s. Added to final list.")
|
443 |
except Exception as e_asset_loop_main_exc: logger.error(f"MAJOR UNHANDLED ERROR processing asset for S{num_of_scene} (Path: {path_of_asset}): {e_asset_loop_main_exc}", exc_info=True)
|
444 |
-
finally:
|
445 |
-
if active_scene_clip and hasattr(active_scene_clip,'close'):
|
446 |
-
try: active_scene_clip.close()
|
447 |
except Exception as e_close_active_err: logger.warning(f"S{num_of_scene}: Error closing active_scene_clip in error handler: {e_close_active_err}")
|
448 |
|
449 |
if not processed_moviepy_clips_list: logger.warning("No MoviePy clips were successfully processed. Aborting animatic assembly before concatenation."); return None
|
@@ -471,9 +422,16 @@ class VisualEngine:
|
|
471 |
except Exception as e_vid_write_final_op: logger.error(f"Error during final animatic video file writing or composition stage: {e_vid_write_final_op}", exc_info=True); return None
|
472 |
finally:
|
473 |
logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` main finally block.")
|
474 |
-
all_clips_for_closure = processed_moviepy_clips_list[:]
|
475 |
if narration_audio_clip_mvpy and hasattr(narration_audio_clip_mvpy, 'close'): all_clips_for_closure.append(narration_audio_clip_mvpy)
|
476 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
477 |
for clip_to_close_item_final in all_clips_for_closure:
|
478 |
if clip_to_close_item_final and hasattr(clip_to_close_item_final, 'close'):
|
479 |
try: clip_to_close_item_final.close()
|
|
|
4 |
import mimetypes
|
5 |
import numpy as np
|
6 |
import os
|
7 |
+
import openai
|
8 |
import requests
|
9 |
import io
|
10 |
import time
|
|
|
15 |
CompositeVideoClip, AudioFileClip)
|
16 |
import moviepy.video.fx.all as vfx
|
17 |
|
18 |
+
try: # MONKEY PATCH
|
19 |
+
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'):
|
20 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
|
21 |
+
elif hasattr(Image, 'LANCZOS'):
|
22 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
|
23 |
+
elif not hasattr(Image, 'ANTIALIAS'): print("WARNING: Pillow ANTIALIAS/Resampling issue.")
|
24 |
+
except Exception as e_mp: print(f"WARNING: ANTIALIAS patch error: {e_mp}")
|
|
|
25 |
|
26 |
logger = logging.getLogger(__name__)
|
27 |
+
# logger.setLevel(logging.DEBUG) # Uncomment for verbose debugging
|
28 |
|
29 |
ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
|
30 |
try:
|
|
|
41 |
logger.info("RunwayML SDK imported.")
|
42 |
except Exception as e_rwy_imp: logger.warning(f"RunwayML SDK import failed: {e_rwy_imp}. RunwayML disabled.")
|
43 |
|
|
|
44 |
class VisualEngine:
|
45 |
DEFAULT_FONT_SIZE_PIL = 10; PREFERRED_FONT_SIZE_PIL = 20
|
46 |
VIDEO_OVERLAY_FONT_SIZE = 30; VIDEO_OVERLAY_FONT_COLOR = 'white'
|
47 |
DEFAULT_MOVIEPY_FONT = 'DejaVu-Sans-Bold'; PREFERRED_MOVIEPY_FONT = 'Liberation-Sans-Bold'
|
48 |
|
|
|
49 |
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
|
50 |
self.output_dir = output_dir
|
51 |
try:
|
52 |
os.makedirs(self.output_dir, exist_ok=True)
|
53 |
logger.info(f"VisualEngine output directory set/ensured: {os.path.abspath(self.output_dir)}")
|
|
|
54 |
test_file_path = os.path.join(self.output_dir, ".ve_write_test.txt")
|
55 |
+
with open(test_file_path, "w") as f_test: f_test.write("VisualEngine write test OK")
|
56 |
+
os.remove(test_file_path); logger.info(f"Write test to '{self.output_dir}' OK.")
|
57 |
+
except Exception as e_mkdir: logger.critical(f"CRITICAL: Failed to create/write to output dir '{os.path.abspath(self.output_dir)}': {e_mkdir}", exc_info=True); raise OSError(f"VisualEngine failed to init output dir: {self.output_dir}") from e_mkdir
|
|
|
|
|
|
|
|
|
|
|
58 |
self.font_filename_pil_preference = "DejaVuSans-Bold.ttf"
|
59 |
+
font_paths = [ self.font_filename_pil_preference, f"/usr/share/fonts/truetype/dejavu/{self.font_filename_pil_preference}", f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"]
|
60 |
+
self.resolved_font_path_pil = next((p for p in font_paths if os.path.exists(p)), None)
|
61 |
+
self.active_font_pil = ImageFont.load_default(); self.active_font_size_pil = self.DEFAULT_FONT_SIZE_PIL; self.active_moviepy_font_name = self.DEFAULT_MOVIEPY_FONT
|
|
|
|
|
|
|
|
|
62 |
if self.resolved_font_path_pil:
|
63 |
+
try: self.active_font_pil = ImageFont.truetype(self.resolved_font_path_pil, self.PREFERRED_FONT_SIZE_PIL); self.active_font_size_pil = self.PREFERRED_FONT_SIZE_PIL; logger.info(f"Pillow font: {self.resolved_font_path_pil} sz {self.active_font_size_pil}."); self.active_moviepy_font_name = 'DejaVu-Sans-Bold' if "dejavu" in self.resolved_font_path_pil.lower() else ('Liberation-Sans-Bold' if "liberation" in self.resolved_font_path_pil.lower() else self.DEFAULT_MOVIEPY_FONT)
|
64 |
+
except IOError as e_font: logger.error(f"Pillow font IOError '{self.resolved_font_path_pil}': {e_font}. Default.")
|
65 |
+
else: logger.warning("Preferred Pillow font not found. Default.")
|
|
|
|
|
|
|
|
|
|
|
66 |
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
|
67 |
self.video_frame_size = (1280, 720)
|
68 |
+
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client_instance = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id
|
69 |
+
logger.info(f"VisualEngine __init__: 11L Voice ID initially: {self.elevenlabs_voice_id}")
|
|
|
|
|
|
|
70 |
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings_obj = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
|
71 |
else: self.elevenlabs_voice_settings_obj = None
|
|
|
72 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
73 |
self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None
|
|
|
74 |
if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass and os.getenv("RUNWAYML_API_SECRET"):
|
75 |
+
try: self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init from env var at startup.")
|
76 |
+
except Exception as e_rwy_init: logger.error(f"Initial RunwayML client init failed: {e_rwy_init}"); self.USE_RUNWAYML = False
|
77 |
+
logger.info("VisualEngine __init__ sequence complete.")
|
|
|
78 |
|
79 |
+
# --- API Key Setters (Keep as previously corrected) ---
|
80 |
+
def set_openai_api_key(self, k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E: {'Ready' if self.USE_AI_IMAGE_GENERATION else 'Disabled'}")
|
81 |
+
def set_elevenlabs_api_key(self, k, vid=None):
|
82 |
+
self.elevenlabs_api_key=k;
|
83 |
+
if vid: self.elevenlabs_voice_id = vid; logger.info(f"11L Voice ID updated to: {vid}")
|
84 |
+
if k and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
85 |
+
try: self.elevenlabs_client_instance = ElevenLabsAPIClient(api_key=k); self.USE_ELEVENLABS=True; logger.info(f"11L Client: Ready (Voice:{self.elevenlabs_voice_id})")
|
86 |
+
except Exception as e: logger.error(f"11L client init err: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client_instance=None
|
87 |
+
else: self.USE_ELEVENLABS = False; logger.info(f"11L Disabled (key/SDK).")
|
88 |
+
def set_pexels_api_key(self, k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels: {'Ready' if self.USE_PEXELS else 'Disabled'}")
|
89 |
+
def set_runway_api_key(self, k):
|
90 |
+
self.runway_api_key = k
|
91 |
+
if k:
|
|
|
|
|
|
|
92 |
if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass:
|
93 |
+
if not self.runway_ml_sdk_client_instance:
|
94 |
try:
|
95 |
+
orig_secret = os.getenv("RUNWAYML_API_SECRET")
|
96 |
+
if not orig_secret: os.environ["RUNWAYML_API_SECRET"]=k; logger.info("Temp set RUNWAYML_API_SECRET for SDK.")
|
97 |
+
self.runway_ml_sdk_client_instance=RunwayMLAPIClientClass(); self.USE_RUNWAYML=True; logger.info("RunwayML Client init via set_key.")
|
98 |
+
if not orig_secret: del os.environ["RUNWAYML_API_SECRET"]; logger.info("Cleared temp RUNWAYML_API_SECRET.")
|
99 |
+
except Exception as e: logger.error(f"RunwayML Client init in set_key fail: {e}", exc_info=True); self.USE_RUNWAYML=False;self.runway_ml_sdk_client_instance=None
|
100 |
+
else: self.USE_RUNWAYML=True; logger.info("RunwayML Client already init.")
|
101 |
+
else: logger.warning("RunwayML SDK not imported. Disabled."); self.USE_RUNWAYML=False
|
102 |
+
else: self.USE_RUNWAYML=False; self.runway_ml_sdk_client_instance=None; logger.info("RunwayML Disabled (no key).")
|
103 |
|
104 |
+
# --- Helper Methods (_image_to_data_uri, _map_resolution_to_runway_ratio, _get_text_dimensions, etc.) ---
|
105 |
+
# (These should be the corrected versions from previous iterations - ensure try/except blocks are complete)
|
106 |
+
def _image_to_data_uri(self, image_path_in):
|
107 |
try:
|
108 |
+
mime_type_val, _ = mimetypes.guess_type(image_path_in)
|
109 |
+
if not mime_type_val: ext = os.path.splitext(image_path_in)[1].lower(); mime_map = {".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".webp": "image/webp"}; mime_type_val = mime_map.get(ext, "application/octet-stream");
|
110 |
+
if mime_type_val == "application/octet-stream": logger.warning(f"Unknown MIME for {image_path_in}, using {mime_type_val}.")
|
111 |
+
with open(image_path_in, "rb") as img_file_handle: img_binary_data = img_file_handle.read()
|
112 |
+
encoded_b64_str = base64.b64encode(img_binary_data).decode('utf-8')
|
113 |
+
final_data_uri = f"data:{mime_type_val};base64,{encoded_b64_str}"; logger.debug(f"Data URI for {os.path.basename(image_path_in)} (MIME:{mime_type_val}): {final_data_uri[:100]}..."); return final_data_uri
|
|
|
|
|
|
|
114 |
except FileNotFoundError: logger.error(f"Img not found {image_path_in} for data URI."); return None
|
115 |
+
except Exception as e_to_data_uri: logger.error(f"Error converting {image_path_in} to data URI:{e_to_data_uri}", exc_info=True); return None
|
116 |
|
117 |
+
def _map_resolution_to_runway_ratio(self, width_in, height_in):
|
118 |
+
ratio_string = f"{width_in}:{height_in}"; supported_ratios = ["1280:720","720:1280","1104:832","832:1104","960:960","1584:672"];
|
119 |
if ratio_string in supported_ratios: return ratio_string
|
120 |
+
logger.warning(f"Res {ratio_string} not in Gen-4 list. Default 1280:720 for Runway.");return "1280:720"
|
121 |
|
122 |
+
def _get_text_dimensions(self, text_str, font_pil_obj):
|
123 |
def_h = getattr(font_pil_obj, 'size', self.active_font_size_pil);
|
124 |
if not text_str: return 0, def_h
|
125 |
try:
|
126 |
+
if hasattr(font_pil_obj,'getbbox'): box = font_pil_obj.getbbox(text_str); w_val=box[2]-box[0]; h_val=box[3]-box[1]; return w_val, h_val if h_val > 0 else def_h
|
127 |
+
elif hasattr(font_pil_obj,'getsize'): w_val,h_val=font_pil_obj.getsize(text_str); return w_val, h_val if h_val > 0 else def_h
|
128 |
else: return int(len(text_str)*def_h*0.6), int(def_h*1.2)
|
129 |
+
except Exception as e_get_dim: logger.warning(f"Error in _get_text_dimensions: {e_get_dim}"); return int(len(text_str)*self.active_font_size_pil*0.6),int(self.active_font_size_pil*1.2)
|
130 |
|
131 |
+
def _create_placeholder_image_content(self,text_desc_val, filename_val, size_val=None):
|
|
|
132 |
if size_val is None: size_val = self.video_frame_size
|
133 |
+
placeholder_img = Image.new('RGB', size_val, color=(20, 20, 40)); placeholder_draw = ImageDraw.Draw(placeholder_img); ph_padding = 25
|
134 |
ph_max_w = size_val[0] - (2 * ph_padding); ph_lines = []
|
135 |
if not text_desc_val: text_desc_val = "(Placeholder Image)"
|
136 |
ph_words = text_desc_val.split(); ph_current_line = ""
|
|
|
166 |
try: placeholder_img.save(ph_filepath); return ph_filepath
|
167 |
except Exception as e_ph_save: logger.error(f"Saving placeholder image '{ph_filepath}' error: {e_ph_save}", exc_info=True); return None
|
168 |
|
169 |
+
def _search_pexels_image(self, query_str_px, output_fn_base_px):
|
170 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
171 |
+
http_headers_px = {"Authorization": self.pexels_api_key}
|
172 |
+
http_params_px = {"query": query_str_px, "per_page": 1, "orientation": "landscape", "size": "large2x"}
|
173 |
+
base_name_for_pexels_img, _ = os.path.splitext(output_fn_base_px)
|
174 |
+
pexels_filename_output = base_name_for_pexels_img + f"_pexels_{random.randint(1000,9999)}.jpg"
|
175 |
+
filepath_for_pexels_img = os.path.join(self.output_dir, pexels_filename_output)
|
176 |
try:
|
177 |
+
logger.info(f"Pexels: Searching for '{query_str_px}'")
|
178 |
+
effective_query_for_pexels = " ".join(query_str_px.split()[:5])
|
179 |
+
http_params_px["query"] = effective_query_for_pexels
|
180 |
+
response_from_pexels = requests.get("https://api.pexels.com/v1/search", headers=http_headers_px, params=http_params_px, timeout=20)
|
181 |
+
response_from_pexels.raise_for_status()
|
182 |
+
data_from_pexels = response_from_pexels.json()
|
183 |
+
if data_from_pexels.get("photos") and len(data_from_pexels["photos"]) > 0:
|
184 |
+
photo_details_item_px = data_from_pexels["photos"][0]
|
185 |
+
photo_url_item_px = photo_details_item_px.get("src", {}).get("large2x")
|
186 |
+
if not photo_url_item_px: logger.warning(f"Pexels: 'large2x' URL missing for '{effective_query_for_pexels}'. Details: {photo_details_item_px}"); return None
|
187 |
+
image_response_get_px = requests.get(photo_url_item_px, timeout=60); image_response_get_px.raise_for_status()
|
188 |
+
img_pil_data_from_pexels = Image.open(io.BytesIO(image_response_get_px.content))
|
189 |
+
if img_pil_data_from_pexels.mode != 'RGB': img_pil_data_from_pexels = img_pil_data_from_pexels.convert('RGB')
|
190 |
+
img_pil_data_from_pexels.save(filepath_for_pexels_img); logger.info(f"Pexels: Image saved to {filepath_for_pexels_img}"); return filepath_for_pexels_img
|
191 |
+
else: logger.info(f"Pexels: No photos for '{effective_query_for_pexels}'."); return None
|
192 |
+
except requests.exceptions.RequestException as e_req_px_loop: logger.error(f"Pexels: RequestException for '{query_str_px}': {e_req_px_loop}", exc_info=False); return None
|
193 |
+
except Exception as e_px_gen_loop: logger.error(f"Pexels: General error for '{query_str_px}': {e_px_gen_loop}", exc_info=True); return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
def _generate_video_clip_with_runwayml(self, motion_prompt_rwy, input_img_path_rwy, scene_id_base_fn_rwy, duration_s_rwy=5):
|
|
|
196 |
if not self.USE_RUNWAYML or not self.runway_ml_sdk_client_instance: logger.warning("RunwayML skip: Not enabled/client not init."); return None
|
197 |
if not input_img_path_rwy or not os.path.exists(input_img_path_rwy): logger.error(f"Runway Gen-4 needs input img. Invalid: {input_img_path_rwy}"); return None
|
198 |
img_data_uri_rwy = self._image_to_data_uri(input_img_path_rwy)
|
|
|
241 |
except Exception as e_close_placeholder_clip: logger.warning(f"Ignoring error closing placeholder TextClip: {e_close_placeholder_clip}")
|
242 |
|
243 |
def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
|
244 |
+
scene_data_dictionary, scene_identifier_fn_base,
|
245 |
+
generate_as_video_clip_flag=False, runway_target_duration_val=5):
|
|
|
246 |
base_name_current_asset, _ = os.path.splitext(scene_identifier_fn_base)
|
247 |
asset_info_return_obj = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
|
248 |
path_to_input_image_for_runway = None
|
|
|
280 |
else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info_return_obj['error']=True;asset_info_return_obj['error_message']=(asset_info_return_obj.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info_return_obj['path']=path_to_input_image_for_runway;asset_info_return_obj['type']='image';asset_info_return_obj['prompt_used']=image_generation_prompt_text
|
281 |
return asset_info_return_obj
|
282 |
|
283 |
+
def generate_narration_audio(self, narration_text, output_fn="narration_overall.mp3"):
|
284 |
+
if not self.USE_ELEVENLABS or not self.elevenlabs_client_instance or not narration_text: logger.info("11L conditions not met. Skip audio."); return None
|
285 |
+
narration_fp = os.path.join(self.output_dir, output_fn)
|
286 |
+
try:
|
287 |
+
logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): \"{narration_text[:70]}...\"")
|
288 |
+
stream_method = None
|
289 |
+
if hasattr(self.elevenlabs_client_instance,'text_to_speech') and hasattr(self.elevenlabs_client_instance.text_to_speech,'stream'): stream_method=self.elevenlabs_client_instance.text_to_speech.stream; logger.info("Using 11L .text_to_speech.stream()")
|
290 |
+
elif hasattr(self.elevenlabs_client_instance,'generate_stream'): stream_method=self.elevenlabs_client_instance.generate_stream; logger.info("Using 11L .generate_stream()")
|
291 |
+
elif hasattr(self.elevenlabs_client_instance,'generate'):
|
292 |
+
logger.info("Using 11L .generate() (non-streaming).")
|
293 |
+
voice_p = Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings_obj) if Voice and self.elevenlabs_voice_settings_obj else str(self.elevenlabs_voice_id)
|
294 |
+
audio_b = self.elevenlabs_client_instance.generate(text=narration_text,voice=voice_p,model="eleven_multilingual_v2")
|
295 |
+
with open(narration_fp,"wb") as f_audio: f_audio.write(audio_b); logger.info(f"11L audio (non-stream): {narration_fp}"); return narration_fp
|
296 |
+
else: logger.error("No recognized 11L audio method."); return None
|
297 |
+
if stream_method:
|
298 |
+
voice_stream_params={"voice_id":str(self.elevenlabs_voice_id)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
if self.elevenlabs_voice_settings_obj:
|
300 |
+
if hasattr(self.elevenlabs_voice_settings_obj,'model_dump'): voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj.model_dump()
|
301 |
+
elif hasattr(self.elevenlabs_voice_settings_obj,'dict'): voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj.dict()
|
302 |
+
else: voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj
|
303 |
+
audio_iter = stream_method(text=narration_text,model_id="eleven_multilingual_v2",**voice_stream_params)
|
304 |
+
with open(narration_fp,"wb") as f_audio_stream:
|
305 |
+
for chunk_item in audio_iter:
|
306 |
+
if chunk_item: f_audio_stream.write(chunk_item)
|
307 |
+
logger.info(f"11L audio (stream): {narration_fp}"); return narration_fp
|
308 |
+
except AttributeError as e_11l_attr: logger.error(f"11L SDK AttrError: {e_11l_attr}. SDK/methods changed?", exc_info=True); return None
|
309 |
+
except Exception as e_11l_gen: logger.error(f"11L audio gen error: {e_11l_gen}", exc_info=True); return None
|
310 |
|
311 |
def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
|
312 |
if not asset_data_list: logger.warning("No assets for animatic."); return None
|
|
|
343 |
logger.debug(f"S{num_of_scene} (6-ImageClip): Base ImageClip. Duration: {base_image_clip_mvpy.duration}")
|
344 |
|
345 |
debug_path_moviepy_frame = os.path.join(self.output_dir,f"debug_7_MOVIEPY_FRAME_S{num_of_scene}.png")
|
346 |
+
try: # <<< CORRECTED: This try now has an except >>>
|
|
|
347 |
save_frame_time = min(0.1, base_image_clip_mvpy.duration / 2 if base_image_clip_mvpy.duration > 0 else 0.1)
|
348 |
base_image_clip_mvpy.save_frame(debug_path_moviepy_frame, t=save_frame_time)
|
349 |
logger.info(f"CRITICAL DEBUG: Saved frame FROM MOVIEPY ImageClip S{num_of_scene} to {debug_path_moviepy_frame}")
|
|
|
351 |
logger.error(f"DEBUG: Error saving frame FROM MOVIEPY ImageClip S{num_of_scene}: {e_save_mvpy_frame}", exc_info=True)
|
352 |
|
353 |
fx_image_clip_mvpy = base_image_clip_mvpy
|
354 |
+
try: # Ken Burns try block
|
355 |
scale_end_kb_val = random.uniform(1.03, 1.08)
|
356 |
if duration_for_scene > 0: fx_image_clip_mvpy = base_image_clip_mvpy.fx(vfx.resize, lambda t_val: 1 + (scale_end_kb_val - 1) * (t_val / duration_for_scene)).set_position('center'); logger.debug(f"S{num_of_scene} (8-KenBurns): Ken Burns applied.")
|
357 |
else: logger.warning(f"S{num_of_scene}: Duration zero, skipping Ken Burns.")
|
358 |
+
except Exception as e_kb_fx_loop: # Except for Ken Burns
|
359 |
+
logger.error(f"S{num_of_scene} Ken Burns error: {e_kb_fx_loop}", exc_info=False)
|
360 |
+
active_scene_clip = fx_image_clip_mvpy # Assign result (either original or with FX)
|
361 |
elif type_of_asset == 'video':
|
362 |
source_video_clip_obj=None
|
363 |
try:
|
|
|
378 |
try: source_video_clip_obj.close()
|
379 |
except Exception as e_close_src_vid: logger.warning(f"S{num_of_scene}: Error closing source VideoFileClip: {e_close_src_vid}")
|
380 |
else: logger.warning(f"S{num_of_scene} Unknown asset type '{type_of_asset}'. Skipping."); continue
|
381 |
+
|
382 |
+
if active_scene_clip and action_in_key: # Text Overlay
|
383 |
try:
|
384 |
dur_text_overlay_val=min(active_scene_clip.duration-0.5,active_scene_clip.duration*0.8)if active_scene_clip.duration>0.5 else (active_scene_clip.duration if active_scene_clip.duration > 0 else 0)
|
385 |
start_text_overlay_val=0.25 if active_scene_clip.duration > 0.5 else 0
|
|
|
389 |
logger.debug(f"S{num_of_scene}: Text overlay composited.")
|
390 |
else: logger.warning(f"S{num_of_scene}: Text overlay duration zero or negative ({dur_text_overlay_val}). Skipping text overlay.")
|
391 |
except Exception as e_txt_comp_loop:logger.error(f"S{num_of_scene} TextClip compositing error:{e_txt_comp_loop}. Proceeding without text for this scene.",exc_info=True)
|
392 |
+
|
393 |
if active_scene_clip: processed_moviepy_clips_list.append(active_scene_clip); logger.info(f"S{num_of_scene}: Asset successfully processed. Clip duration: {active_scene_clip.duration:.2f}s. Added to final list.")
|
394 |
except Exception as e_asset_loop_main_exc: logger.error(f"MAJOR UNHANDLED ERROR processing asset for S{num_of_scene} (Path: {path_of_asset}): {e_asset_loop_main_exc}", exc_info=True)
|
395 |
+
finally: # Ensure individual clip created in this iteration is closed if it's not added or an error occurs
|
396 |
+
if active_scene_clip and active_scene_clip not in processed_moviepy_clips_list and hasattr(active_scene_clip,'close'):
|
397 |
+
try: active_scene_clip.close(); logger.debug(f"S{num_of_scene}: Closed active_scene_clip in asset loop finally block.")
|
398 |
except Exception as e_close_active_err: logger.warning(f"S{num_of_scene}: Error closing active_scene_clip in error handler: {e_close_active_err}")
|
399 |
|
400 |
if not processed_moviepy_clips_list: logger.warning("No MoviePy clips were successfully processed. Aborting animatic assembly before concatenation."); return None
|
|
|
422 |
except Exception as e_vid_write_final_op: logger.error(f"Error during final animatic video file writing or composition stage: {e_vid_write_final_op}", exc_info=True); return None
|
423 |
finally:
|
424 |
logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` main finally block.")
|
425 |
+
all_clips_for_closure = processed_moviepy_clips_list[:] # Start with a copy of the list of clips that were successfully processed and added
|
426 |
if narration_audio_clip_mvpy and hasattr(narration_audio_clip_mvpy, 'close'): all_clips_for_closure.append(narration_audio_clip_mvpy)
|
427 |
+
# final_video_output_clip itself is composed of other clips; closing it might close its sources if not already closed.
|
428 |
+
# If concatenate_videoclips or effects create new intermediate clips that aren't in processed_moviepy_clips_list,
|
429 |
+
# those might need more careful handling. However, MoviePy usually manages this.
|
430 |
+
# We will explicitly close the final composite clip if it exists.
|
431 |
+
if final_video_output_clip and hasattr(final_video_output_clip, 'close'):
|
432 |
+
if final_video_output_clip not in all_clips_for_closure : # Avoid double-adding if it was the only clip
|
433 |
+
all_clips_for_closure.append(final_video_output_clip)
|
434 |
+
|
435 |
for clip_to_close_item_final in all_clips_for_closure:
|
436 |
if clip_to_close_item_final and hasattr(clip_to_close_item_final, 'close'):
|
437 |
try: clip_to_close_item_final.close()
|