Update core/visual_engine.py
Browse files- core/visual_engine.py +157 -198
core/visual_engine.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
# core/visual_engine.py
|
2 |
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
3 |
-
# --- MONKEY PATCH ---
|
4 |
try:
|
5 |
-
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'):
|
6 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
|
7 |
-
elif hasattr(Image, 'LANCZOS'):
|
8 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
|
9 |
-
elif not hasattr(Image, 'ANTIALIAS'):
|
10 |
-
|
|
|
|
|
11 |
|
12 |
from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
|
13 |
CompositeVideoClip, AudioFileClip)
|
@@ -24,7 +26,7 @@ import logging
|
|
24 |
logger = logging.getLogger(__name__)
|
25 |
logger.setLevel(logging.INFO)
|
26 |
|
27 |
-
# ---
|
28 |
ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
|
29 |
try:
|
30 |
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
@@ -33,11 +35,9 @@ try:
|
|
33 |
ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
|
34 |
except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
|
35 |
|
36 |
-
|
|
|
37 |
try:
|
38 |
-
# from runwayml import RunwayClient # Hypothetical actual import
|
39 |
-
# RunwayMLClient = RunwayClient
|
40 |
-
# RUNWAYML_SDK_IMPORTED = True
|
41 |
logger.info("RunwayML SDK import is a placeholder.")
|
42 |
except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
|
43 |
except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
|
@@ -48,25 +48,36 @@ class VisualEngine:
|
|
48 |
self.output_dir = output_dir
|
49 |
os.makedirs(self.output_dir, exist_ok=True)
|
50 |
self.font_filename = "DejaVuSans-Bold.ttf"
|
51 |
-
font_paths_to_try = [
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
|
53 |
-
self.font_size_pil = 20
|
|
|
|
|
54 |
self.video_overlay_font = 'DejaVu-Sans-Bold'
|
|
|
55 |
try:
|
56 |
self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
|
57 |
-
if self.font_path_pil: logger.info(f"Pillow font: {self.font_path_pil}.")
|
58 |
-
else: logger.warning("
|
59 |
-
except IOError as e_font: logger.error(f"Pillow font IOError: {e_font}.
|
60 |
-
|
|
|
|
|
61 |
self.video_frame_size = (1280, 720)
|
62 |
-
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
|
|
|
63 |
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
|
64 |
else: self.elevenlabs_voice_settings = None
|
65 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
66 |
-
self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
|
67 |
logger.info("VisualEngine initialized.")
|
68 |
|
69 |
-
# --- API Key Setters (Keep as before) ---
|
70 |
def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
|
71 |
def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
|
72 |
self.elevenlabs_api_key=api_key
|
@@ -76,219 +87,176 @@ class VisualEngine:
|
|
76 |
except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
|
77 |
else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
|
78 |
def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
|
79 |
-
def set_runway_api_key(self, k):
|
80 |
self.runway_api_key = k
|
81 |
-
if k
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
# self.USE_RUNWAYML = True; logger.info("RunwayML Client (Placeholder SDK) Ready.")
|
86 |
-
# except Exception as e: logger.error(f"RunwayML client init error: {e}", exc_info=True); self.USE_RUNWAYML = False
|
87 |
-
# else: # No SDK, or direct HTTP calls are planned
|
88 |
-
self.USE_RUNWAYML = True; logger.info("RunwayML API Key set. (SDK integration is placeholder).")
|
89 |
else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
if
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
def _search_pexels_image(self, q, ofnb):
|
110 |
if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"}
|
111 |
pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn)
|
112 |
try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20)
|
113 |
r.raise_for_status();d=r.json()
|
114 |
-
if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();
|
115 |
-
if
|
116 |
else: logger.info(f"No Pexels for: '{eq}'")
|
117 |
except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None
|
118 |
|
119 |
-
#
|
120 |
-
|
121 |
-
""
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
logger.error(f"Runway Gen-4 requires an input image. Path not provided or invalid: {input_image_path}")
|
130 |
-
return None
|
131 |
-
|
132 |
-
# Gen-4 produces 5s or 10s. We can aim for the closest or let user choose via app.py if more control is needed.
|
133 |
-
# For simplicity, let's assume target_duration_seconds from Gemini/user is a suggestion.
|
134 |
-
# Actual API call would specify duration if supported, or model has fixed outputs.
|
135 |
-
runway_duration_param = 10 if target_duration_seconds > 7 else 5 # Example logic to map to 5s or 10s
|
136 |
-
|
137 |
-
output_video_filename = scene_identifier_filename_base.replace(".png", f"_runway_gen4_d{runway_duration_param}s.mp4")
|
138 |
-
output_video_filepath = os.path.join(self.output_dir, output_video_filename)
|
139 |
-
|
140 |
-
logger.info(f"Attempting Runway Gen-4 (Placeholder) with image: {os.path.basename(input_image_path)}, motion prompt: '{text_prompt_for_motion[:100]}...', target duration: {runway_duration_param}s")
|
141 |
-
|
142 |
-
# --- ACTUAL RUNWAY GEN-4 API/SDK CALL WOULD GO HERE ---
|
143 |
-
# This would involve:
|
144 |
-
# 1. Uploading input_image_path (if API requires it, or providing a URL).
|
145 |
-
# 2. Submitting the job with text_prompt_for_motion and desired parameters (duration, seed, etc.).
|
146 |
-
# 3. Polling for completion.
|
147 |
-
# 4. Downloading the resulting video to output_video_filepath.
|
148 |
-
# Example (very hypothetical SDK structure):
|
149 |
-
# try:
|
150 |
-
# if not self.runway_client: self.runway_client = RunwayMLClient(api_key=self.runway_api_key)
|
151 |
-
# runway_task = self.runway_client.gen4.generate(
|
152 |
-
# image_path=input_image_path,
|
153 |
-
# text_prompt=text_prompt_for_motion,
|
154 |
-
# duration_seconds=runway_duration_param, # Or let model default
|
155 |
-
# # ... other Gen-4 parameters like seed, motion_score, upscale, etc.
|
156 |
-
# )
|
157 |
-
# runway_task.wait_for_completion() # Blocks until done
|
158 |
-
# if runway_task.status == 'succeeded':
|
159 |
-
# runway_task.download_video(output_video_filepath)
|
160 |
-
# logger.info(f"Runway Gen-4 video saved to: {output_video_filepath}")
|
161 |
-
# return output_video_filepath
|
162 |
-
# else:
|
163 |
-
# logger.error(f"Runway Gen-4 task failed. Status: {runway_task.status}, Error: {runway_task.error_message}")
|
164 |
-
# return None
|
165 |
-
# except Exception as e_runway:
|
166 |
-
# logger.error(f"Error during actual Runway Gen-4 call: {e_runway}", exc_info=True)
|
167 |
-
# return None
|
168 |
-
# --- END ACTUAL RUNWAY GEN-4 API/SDK CALL ---
|
169 |
-
|
170 |
-
logger.warning("Using PLACEHOLDER video generation for Runway Gen-4.")
|
171 |
-
# Create a dummy video using the input image as a static frame for the placeholder
|
172 |
try:
|
173 |
-
img_clip = ImageClip(
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
if hasattr(
|
183 |
-
if hasattr(
|
184 |
-
if hasattr(final_placeholder_clip, 'close'): final_placeholder_clip.close()
|
185 |
-
return output_video_filepath
|
186 |
-
except Exception as e_placeholder:
|
187 |
-
logger.error(f"Failed to create Runway Gen-4 placeholder video: {e_placeholder}", exc_info=True)
|
188 |
-
return None
|
189 |
|
190 |
-
def _create_placeholder_video_content(self,
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
|
196 |
-
logger.info(f"Generic placeholder video: {fp}"); return fp
|
197 |
-
except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
|
198 |
finally:
|
199 |
-
if tc and hasattr(tc,
|
200 |
|
201 |
-
|
202 |
-
def generate_scene_asset(self, image_generation_prompt_text, # For DALL-E / Pexels
|
203 |
-
motion_prompt_text_for_video, # For Runway Gen-4 (motion only)
|
204 |
scene_data, scene_identifier_filename_base,
|
205 |
generate_as_video_clip=False, runway_target_duration=5):
|
206 |
base_name, _ = os.path.splitext(scene_identifier_filename_base)
|
207 |
asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
|
208 |
-
|
209 |
-
# STEP 1: Generate the input image (DALL-E/Pexels/Placeholder) regardless of final asset type if video is chosen.
|
210 |
-
# This image will serve as the base for Runway Gen-4 if generate_as_video_clip is True.
|
211 |
input_image_for_runway_path = None
|
212 |
-
|
213 |
-
image_filepath = os.path.join(self.output_dir, image_filename_with_ext)
|
214 |
temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
|
215 |
|
|
|
216 |
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
|
|
217 |
max_r, att_n = 2, 0
|
218 |
for att_n in range(max_r):
|
219 |
try:
|
220 |
-
|
|
|
221 |
cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
222 |
r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
|
223 |
iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
|
224 |
if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
|
225 |
ir = requests.get(iu, timeout=120); ir.raise_for_status()
|
226 |
-
id_img = Image.open(io.BytesIO(ir.content)) # Renamed
|
227 |
if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
|
228 |
-
id_img.save(
|
229 |
-
input_image_for_runway_path =
|
230 |
-
temp_image_asset_info = {'path':
|
231 |
-
break
|
232 |
except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); temp_image_asset_info['error_message']=str(e)
|
233 |
except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); temp_image_asset_info['error_message']=str(e); break
|
234 |
if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
|
235 |
|
236 |
-
if temp_image_asset_info['error'] and self.USE_PEXELS
|
237 |
pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
238 |
-
pp = self._search_pexels_image(pqt,
|
239 |
if pp: input_image_for_runway_path = pp; temp_image_asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
|
240 |
else: current_em = temp_image_asset_info.get('error_message',""); temp_image_asset_info['error_message']=(current_em + " Pexels failed.").strip()
|
241 |
|
242 |
-
if temp_image_asset_info['error']:
|
243 |
-
logger.warning("Base image
|
244 |
ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
|
245 |
-
php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...",
|
246 |
if php: input_image_for_runway_path = php; temp_image_asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
|
247 |
else: current_em=temp_image_asset_info.get('error_message',"");temp_image_asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
|
248 |
|
249 |
-
#
|
250 |
if generate_as_video_clip:
|
251 |
if self.USE_RUNWAYML and input_image_for_runway_path:
|
252 |
-
|
253 |
-
video_path = self._generate_video_clip_with_runwayml(
|
254 |
-
text_prompt_for_motion=motion_prompt_text_for_video, # Use the motion-specific prompt
|
255 |
-
input_image_path=input_image_for_runway_path,
|
256 |
-
scene_identifier_filename_base=base_name, # Will append _runway_gen4.mp4
|
257 |
-
target_duration_seconds=runway_target_duration
|
258 |
-
)
|
259 |
if video_path and os.path.exists(video_path):
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
logger.warning(f"RunwayML video clip generation failed for {base_name}. Using the base image as fallback.")
|
264 |
-
asset_info = temp_image_asset_info # Fallback to the base image
|
265 |
-
asset_info['error'] = True # Indicate video step failed, though base image might be okay
|
266 |
-
asset_info['error_message'] = "RunwayML video generation step failed; using base image."
|
267 |
-
asset_info['type'] = 'image' # Explicitly set to image as it's the fallback
|
268 |
return asset_info
|
269 |
-
elif not self.USE_RUNWAYML:
|
270 |
-
|
271 |
-
asset_info = temp_image_asset_info
|
272 |
-
asset_info['error_message'] = "RunwayML disabled; using base image."
|
273 |
-
asset_info['type'] = 'image'
|
274 |
return asset_info
|
275 |
-
else: #
|
276 |
-
|
277 |
-
asset_info = temp_image_asset_info # This will have error=True
|
278 |
-
asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image failed, so Runway video not attempted.").strip()
|
279 |
-
asset_info['type'] = 'image' # Even though it failed, its type was image
|
280 |
return asset_info
|
281 |
else: # Image was requested directly
|
282 |
-
|
283 |
-
return asset_info
|
284 |
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
elif hasattr(self.elevenlabs_client,'
|
291 |
-
elif hasattr(self.elevenlabs_client,'generate'):logger.info("11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=text_to_narrate,voice=vp,model="eleven_multilingual_v2");
|
292 |
with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
|
293 |
else:logger.error("No 11L audio method.");return None
|
294 |
if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
|
@@ -296,14 +264,13 @@ class VisualEngine:
|
|
296 |
if hasattr(self.elevenlabs_voice_settings,'model_dump'):vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump()
|
297 |
elif hasattr(self.elevenlabs_voice_settings,'dict'):vps["voice_settings"]=self.elevenlabs_voice_settings.dict()
|
298 |
else:vps["voice_settings"]=self.elevenlabs_voice_settings
|
299 |
-
adi=asm(text=
|
300 |
with open(afp,"wb")as f:
|
301 |
for c in adi:
|
302 |
if c:f.write(c)
|
303 |
logger.info(f"11L audio (stream): {afp}");return afp
|
304 |
except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
|
305 |
|
306 |
-
# --- assemble_animatic_from_assets (Keep robust version from previous step, ensuring C-contiguous array and debug saves) ---
|
307 |
def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
|
308 |
if not asset_data_list: logger.warning("No assets for animatic."); return None
|
309 |
processed_clips = []; narration_clip = None; final_clip = None
|
@@ -327,7 +294,6 @@ class VisualEngine:
|
|
327 |
cv_rgba.paste(thumb,(xo,yo),thumb)
|
328 |
final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3])
|
329 |
|
330 |
-
# CRITICAL DEBUG: Save image fed to NumPy array
|
331 |
dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
|
332 |
|
333 |
frame_np = np.array(final_rgb_pil,dtype=np.uint8);
|
@@ -336,7 +302,6 @@ class VisualEngine:
|
|
336 |
if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue
|
337 |
|
338 |
clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur)
|
339 |
-
# CRITICAL DEBUG: Save frame from MoviePy clip
|
340 |
mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
|
341 |
|
342 |
clip_fx = clip_base
|
@@ -347,7 +312,7 @@ class VisualEngine:
|
|
347 |
elif asset_type == 'video':
|
348 |
src_clip=None
|
349 |
try:
|
350 |
-
src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None)
|
351 |
tmp_clip=src_clip
|
352 |
if src_clip.duration!=scene_dur:
|
353 |
if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
|
@@ -364,7 +329,7 @@ class VisualEngine:
|
|
364 |
if current_scene_mvpy_clip and key_action:
|
365 |
try:
|
366 |
to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
|
367 |
-
to_start=0.25
|
368 |
txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
|
369 |
current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
|
370 |
except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
|
@@ -372,8 +337,8 @@ class VisualEngine:
|
|
372 |
except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
|
373 |
finally:
|
374 |
if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
|
375 |
-
try: current_scene_mvpy_clip.close()
|
376 |
-
except: pass
|
377 |
|
378 |
if not processed_clips:logger.warning("No clips processed. Abort.");return None
|
379 |
td=0.75
|
@@ -392,20 +357,14 @@ class VisualEngine:
|
|
392 |
elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
|
393 |
if final_clip and final_clip.duration>0:
|
394 |
op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
|
395 |
-
final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
|
396 |
logger.info(f"Video created:{op}");return op
|
397 |
else:logger.error("Final clip invalid. No write.");return None
|
398 |
except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
|
399 |
finally:
|
400 |
logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
|
401 |
-
|
402 |
-
for clip_obj in
|
403 |
if clip_obj and hasattr(clip_obj, 'close'):
|
404 |
try: clip_obj.close()
|
405 |
-
except Exception as e_close: logger.warning(f"Ignoring error closing a
|
406 |
-
if narration_clip and hasattr(narration_clip, 'close'):
|
407 |
-
try: narration_clip.close()
|
408 |
-
except Exception as e_close_audio: logger.warning(f"Ignoring error closing narration clip: {e_close_audio}")
|
409 |
-
if final_clip and hasattr(final_clip, 'close'): # final_composite_clip_obj was renamed to final_clip
|
410 |
-
try: final_clip.close()
|
411 |
-
except Exception as e_close_final: logger.warning(f"Ignoring error closing final composite clip: {e_close_final}")
|
|
|
1 |
# core/visual_engine.py
|
2 |
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
3 |
+
# --- MONKEY PATCH FOR Image.ANTIALIAS ---
|
4 |
try:
|
5 |
+
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
|
6 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
|
7 |
+
elif hasattr(Image, 'LANCZOS'): # Pillow 8
|
8 |
if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
|
9 |
+
elif not hasattr(Image, 'ANTIALIAS'):
|
10 |
+
print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. Video effects might fail.")
|
11 |
+
except Exception as e_mp: print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}")
|
12 |
+
# --- END MONKEY PATCH ---
|
13 |
|
14 |
from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
|
15 |
CompositeVideoClip, AudioFileClip)
|
|
|
26 |
logger = logging.getLogger(__name__)
|
27 |
logger.setLevel(logging.INFO)
|
28 |
|
29 |
+
# --- ElevenLabs Client Import ---
|
30 |
ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
|
31 |
try:
|
32 |
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
|
|
35 |
ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
|
36 |
except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
|
37 |
|
38 |
+
# --- RunwayML Client Import (Placeholder) ---
|
39 |
+
RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None
|
40 |
try:
|
|
|
|
|
|
|
41 |
logger.info("RunwayML SDK import is a placeholder.")
|
42 |
except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
|
43 |
except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
|
|
|
48 |
self.output_dir = output_dir
|
49 |
os.makedirs(self.output_dir, exist_ok=True)
|
50 |
self.font_filename = "DejaVuSans-Bold.ttf"
|
51 |
+
font_paths_to_try = [
|
52 |
+
self.font_filename,
|
53 |
+
f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
|
54 |
+
f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
|
55 |
+
f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf",
|
56 |
+
f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"
|
57 |
+
]
|
58 |
self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
|
59 |
+
self.font_size_pil = 20
|
60 |
+
self.video_overlay_font_size = 30
|
61 |
+
self.video_overlay_font_color = 'white'
|
62 |
self.video_overlay_font = 'DejaVu-Sans-Bold'
|
63 |
+
|
64 |
try:
|
65 |
self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
|
66 |
+
if self.font_path_pil: logger.info(f"Pillow font loaded: {self.font_path_pil}.")
|
67 |
+
else: logger.warning("Using default Pillow font."); self.font_size_pil = 10
|
68 |
+
except IOError as e_font: logger.error(f"Pillow font loading IOError: {e_font}. Using default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
|
69 |
+
|
70 |
+
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
|
71 |
+
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
|
72 |
self.video_frame_size = (1280, 720)
|
73 |
+
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
|
74 |
+
self.elevenlabs_voice_id = default_elevenlabs_voice_id
|
75 |
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
|
76 |
else: self.elevenlabs_voice_settings = None
|
77 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
78 |
+
self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
|
79 |
logger.info("VisualEngine initialized.")
|
80 |
|
|
|
81 |
def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
|
82 |
def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
|
83 |
self.elevenlabs_api_key=api_key
|
|
|
87 |
except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
|
88 |
else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
|
89 |
def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
|
90 |
+
def set_runway_api_key(self, k):
|
91 |
self.runway_api_key = k
|
92 |
+
if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient:
|
93 |
+
try: self.USE_RUNWAYML = True; logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
|
94 |
+
except Exception as e: logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True); self.USE_RUNWAYML = False
|
95 |
+
elif k: self.USE_RUNWAYML = True; logger.info("RunwayML API Key set (direct API or placeholder).")
|
|
|
|
|
|
|
|
|
96 |
else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
|
97 |
|
98 |
+
def _get_text_dimensions(self, text_content, font_obj):
|
99 |
+
default_line_height = getattr(font_obj, 'size', self.font_size_pil)
|
100 |
+
if not text_content: return 0, default_line_height
|
101 |
+
try:
|
102 |
+
if hasattr(font_obj, 'getbbox'):
|
103 |
+
bbox = font_obj.getbbox(text_content); width = bbox[2] - bbox[0]; height = bbox[3] - bbox[1]
|
104 |
+
return width, height if height > 0 else default_line_height
|
105 |
+
elif hasattr(font_obj, 'getsize'):
|
106 |
+
width, height = font_obj.getsize(text_content)
|
107 |
+
return width, height if height > 0 else default_line_height
|
108 |
+
else: return int(len(text_content) * default_line_height * 0.6), int(default_line_height * 1.2)
|
109 |
+
except Exception as e: logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}"); return int(len(text_content) * self.font_size_pil * 0.6),int(self.font_size_pil * 1.2)
|
110 |
+
|
111 |
+
def _create_placeholder_image_content(self, text_description, filename, size=None):
|
112 |
+
if size is None: size = self.video_frame_size
|
113 |
+
img = Image.new('RGB', size, color=(20, 20, 40)); draw = ImageDraw.Draw(img)
|
114 |
+
padding = 25; max_text_width = size[0] - (2 * padding); lines = []
|
115 |
+
if not text_description: text_description = "(Placeholder: No text description provided)"
|
116 |
+
words = text_description.split(); current_line = ""
|
117 |
+
for word in words:
|
118 |
+
test_line = current_line + word + " "; line_width_test, _ = self._get_text_dimensions(test_line.strip(), self.font)
|
119 |
+
if line_width_test <= max_text_width: current_line = test_line
|
120 |
+
else:
|
121 |
+
if current_line.strip(): lines.append(current_line.strip())
|
122 |
+
word_width, _ = self._get_text_dimensions(word, self.font)
|
123 |
+
if word_width > max_text_width:
|
124 |
+
avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10
|
125 |
+
chars_that_fit = int(max_text_width / avg_char_w)
|
126 |
+
lines.append(word[:chars_that_fit-3] + "...") if len(word) > chars_that_fit else lines.append(word)
|
127 |
+
current_line = ""
|
128 |
+
else: current_line = word + " "
|
129 |
+
if current_line.strip(): lines.append(current_line.strip())
|
130 |
+
if not lines and text_description:
|
131 |
+
avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10; chars_that_fit = int(max_text_width / avg_char_w)
|
132 |
+
lines.append(text_description[:chars_that_fit-3] + "..." if len(text_description) > chars_that_fit else text_description)
|
133 |
+
elif not lines: lines.append("(Placeholder Text Error)")
|
134 |
+
_, single_line_height = self._get_text_dimensions("Ay", self.font); single_line_height = single_line_height if single_line_height > 0 else (self.font_size_pil + 2)
|
135 |
+
line_spacing = 2; max_lines_to_display = min(len(lines), (size[1]-(2*padding))//(single_line_height+line_spacing)) if single_line_height > 0 else 1
|
136 |
+
if max_lines_to_display <= 0: max_lines_to_display = 1
|
137 |
+
total_text_block_height = max_lines_to_display * single_line_height + (max_lines_to_display-1)*line_spacing
|
138 |
+
y_text_start = padding + (size[1]-(2*padding)-total_text_block_height)/2.0; current_y = y_text_start
|
139 |
+
for i in range(max_lines_to_display):
|
140 |
+
line_content = lines[i]; line_width_actual, _ = self._get_text_dimensions(line_content, self.font)
|
141 |
+
x_text = max(padding, (size[0]-line_width_actual)/2.0)
|
142 |
+
draw.text((x_text, current_y), line_content, font=self.font, fill=(200,200,180)); current_y += single_line_height + line_spacing
|
143 |
+
if i==6 and max_lines_to_display > 7 and len(lines) > max_lines_to_display:
|
144 |
+
ellipsis_width, _ = self._get_text_dimensions("...",self.font); x_ellipsis = max(padding, (size[0]-ellipsis_width)/2.0)
|
145 |
+
draw.text((x_ellipsis, current_y), "...", font=self.font, fill=(200,200,180)); break
|
146 |
+
filepath = os.path.join(self.output_dir, filename)
|
147 |
+
try: img.save(filepath); return filepath
|
148 |
+
except Exception as e: logger.error(f"Error saving placeholder image {filepath}: {e}", exc_info=True); return None
|
149 |
+
|
150 |
def _search_pexels_image(self, q, ofnb):
|
151 |
if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"}
|
152 |
pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn)
|
153 |
try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20)
|
154 |
r.raise_for_status();d=r.json()
|
155 |
+
if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();id_img=Image.open(io.BytesIO(ir.content)) # Renamed id to id_img
|
156 |
+
if id_img.mode!='RGB':id_img=id_img.convert('RGB');id_img.save(fp);logger.info(f"Pexels saved: {fp}");return fp
|
157 |
else: logger.info(f"No Pexels for: '{eq}'")
|
158 |
except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None
|
159 |
|
160 |
+
def _generate_video_clip_with_runwayml(self, pt, sifnb, tds=5, iip=None): # Default tds to 5s for Gen-4
|
161 |
+
if not self.USE_RUNWAYML or not self.runway_api_key: logger.warning("RunwayML disabled."); return None
|
162 |
+
if not iip or not os.path.exists(iip): logger.error(f"Runway Gen-4 needs input image. Path invalid: {iip}"); return None
|
163 |
+
runway_dur = 10 if tds > 7 else 5 # Map to 5s or 10s
|
164 |
+
ovfn = sifnb.replace(".png", f"_runway_gen4_d{runway_dur}s.mp4")
|
165 |
+
ovfp = os.path.join(self.output_dir, ovfn)
|
166 |
+
logger.info(f"Runway Gen-4 (Placeholder) img: {os.path.basename(iip)}, motion: '{pt[:100]}...', dur: {runway_dur}s")
|
167 |
+
# --- ACTUAL RUNWAYML API CALL (NEEDS IMPLEMENTATION) ---
|
168 |
+
logger.warning("Using PLACEHOLDER video for Runway Gen-4.")
|
169 |
+
img_clip=None; txt_c=None; final_ph_clip=None # Initialize for finally block
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
try:
|
171 |
+
img_clip = ImageClip(iip).set_duration(runway_dur)
|
172 |
+
txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(iip)}\nMotion: {pt[:50]}..."
|
173 |
+
txt_c = TextClip(txt, fontsize=24,color='white',font=self.video_overlay_font,bg_color='rgba(0,0,0,0.5)',size=(self.video_frame_size[0]*0.8,None),method='caption').set_duration(runway_dur).set_position('center')
|
174 |
+
final_ph_clip = CompositeVideoClip([img_clip, txt_c], size=img_clip.size)
|
175 |
+
final_ph_clip.write_videofile(ovfp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2)
|
176 |
+
logger.info(f"Runway Gen-4 placeholder video: {ovfp}"); return ovfp
|
177 |
+
except Exception as e: logger.error(f"Runway Gen-4 placeholder error: {e}",exc_info=True); return None
|
178 |
+
finally:
|
179 |
+
if img_clip and hasattr(img_clip,'close'): img_clip.close()
|
180 |
+
if txt_c and hasattr(txt_c,'close'): txt_c.close()
|
181 |
+
if final_ph_clip and hasattr(final_ph_clip,'close'): final_ph_clip.close()
|
|
|
|
|
|
|
|
|
|
|
182 |
|
183 |
+
def _create_placeholder_video_content(self, td, fn, dur=4, sz=None): # Generic placeholder
|
184 |
+
if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
|
185 |
+
try: tc = TextClip(td, fontsize=50,color='white',font=self.video_overlay_font,bg_color='black',size=sz,method='caption').set_duration(dur)
|
186 |
+
tc.write_videofile(fp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
|
187 |
+
except Exception as e: logger.error(f"Generic placeholder error {fp}: {e}",exc_info=True); return None
|
|
|
|
|
|
|
188 |
finally:
|
189 |
+
if tc and hasattr(tc,'close'): tc.close()
|
190 |
|
191 |
+
def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
|
|
|
|
|
192 |
scene_data, scene_identifier_filename_base,
|
193 |
generate_as_video_clip=False, runway_target_duration=5):
|
194 |
base_name, _ = os.path.splitext(scene_identifier_filename_base)
|
195 |
asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
|
|
|
|
|
|
|
196 |
input_image_for_runway_path = None
|
197 |
+
image_filename_base = base_name + "_base_image" # For base image files
|
|
|
198 |
temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
|
199 |
|
200 |
+
# Step 1: Generate base image (DALL-E -> Pexels -> Placeholder)
|
201 |
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
202 |
+
# ... (DALL-E logic as before, saving to image_filename_base + ".png") ...
|
203 |
max_r, att_n = 2, 0
|
204 |
for att_n in range(max_r):
|
205 |
try:
|
206 |
+
img_fp_dalle = os.path.join(self.output_dir, image_filename_base + ".png")
|
207 |
+
logger.info(f"Attempt {att_n+1} DALL-E (base img): {image_generation_prompt_text[:100]}...")
|
208 |
cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
209 |
r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
|
210 |
iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
|
211 |
if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
|
212 |
ir = requests.get(iu, timeout=120); ir.raise_for_status()
|
213 |
+
id_img = Image.open(io.BytesIO(ir.content)); # Renamed
|
214 |
if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
|
215 |
+
id_img.save(img_fp_dalle); logger.info(f"DALL-E base image: {img_fp_dalle}");
|
216 |
+
input_image_for_runway_path = img_fp_dalle
|
217 |
+
temp_image_asset_info = {'path': img_fp_dalle, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp}
|
218 |
+
break
|
219 |
except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); temp_image_asset_info['error_message']=str(e)
|
220 |
except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); temp_image_asset_info['error_message']=str(e); break
|
221 |
if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
|
222 |
|
223 |
+
if temp_image_asset_info['error'] and self.USE_PEXELS:
|
224 |
pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
225 |
+
pp = self._search_pexels_image(pqt, image_filename_base + ".png") # Use png in base name for consistency
|
226 |
if pp: input_image_for_runway_path = pp; temp_image_asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
|
227 |
else: current_em = temp_image_asset_info.get('error_message',""); temp_image_asset_info['error_message']=(current_em + " Pexels failed.").strip()
|
228 |
|
229 |
+
if temp_image_asset_info['error']:
|
230 |
+
logger.warning("Base image (DALL-E/Pexels) failed. Placeholder base image.")
|
231 |
ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text)
|
232 |
+
php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_base + ".png")
|
233 |
if php: input_image_for_runway_path = php; temp_image_asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
|
234 |
else: current_em=temp_image_asset_info.get('error_message',"");temp_image_asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
|
235 |
|
236 |
+
# Step 2: Generate video with RunwayML if requested and base image exists
|
237 |
if generate_as_video_clip:
|
238 |
if self.USE_RUNWAYML and input_image_for_runway_path:
|
239 |
+
video_path = self._generate_video_clip_with_runwayml(motion_prompt_text_for_video, input_image_for_runway_path, base_name, runway_target_duration)
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
if video_path and os.path.exists(video_path):
|
241 |
+
return {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, 'base_image_path': input_image_for_runway_path}
|
242 |
+
else: # Runway failed, return base image with error for video step
|
243 |
+
asset_info = temp_image_asset_info; asset_info['error'] = True; asset_info['error_message'] = "RunwayML video gen failed; using base image."; asset_info['type'] = 'image'
|
|
|
|
|
|
|
|
|
|
|
244 |
return asset_info
|
245 |
+
elif not self.USE_RUNWAYML: # Video requested, but RunwayML disabled
|
246 |
+
asset_info = temp_image_asset_info; asset_info['error_message'] = "RunwayML disabled; using base image."; asset_info['type'] = 'image'
|
|
|
|
|
|
|
247 |
return asset_info
|
248 |
+
else: # Video requested, but base image failed
|
249 |
+
asset_info = temp_image_asset_info; asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image failed, Runway video not attempted.").strip(); asset_info['type'] = 'image'
|
|
|
|
|
|
|
250 |
return asset_info
|
251 |
else: # Image was requested directly
|
252 |
+
return temp_image_asset_info # Return result of base image generation
|
|
|
253 |
|
254 |
+
def generate_narration_audio(self, ttn, ofn="narration_overall.mp3"):
|
255 |
+
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not ttn: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,ofn)
|
256 |
+
try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {ttn[:70]}..."); asm=None
|
257 |
+
if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
|
258 |
+
elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("Using 11L .generate_stream()")
|
259 |
+
elif hasattr(self.elevenlabs_client,'generate'):logger.info("Using 11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=ttn,voice=vp,model="eleven_multilingual_v2");
|
|
|
260 |
with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp
|
261 |
else:logger.error("No 11L audio method.");return None
|
262 |
if asm:vps={"voice_id":str(self.elevenlabs_voice_id)}
|
|
|
264 |
if hasattr(self.elevenlabs_voice_settings,'model_dump'):vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump()
|
265 |
elif hasattr(self.elevenlabs_voice_settings,'dict'):vps["voice_settings"]=self.elevenlabs_voice_settings.dict()
|
266 |
else:vps["voice_settings"]=self.elevenlabs_voice_settings
|
267 |
+
adi=asm(text=ttn,model_id="eleven_multilingual_v2",**vps)
|
268 |
with open(afp,"wb")as f:
|
269 |
for c in adi:
|
270 |
if c:f.write(c)
|
271 |
logger.info(f"11L audio (stream): {afp}");return afp
|
272 |
except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
|
273 |
|
|
|
274 |
def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
|
275 |
if not asset_data_list: logger.warning("No assets for animatic."); return None
|
276 |
processed_clips = []; narration_clip = None; final_clip = None
|
|
|
294 |
cv_rgba.paste(thumb,(xo,yo),thumb)
|
295 |
final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3])
|
296 |
|
|
|
297 |
dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}")
|
298 |
|
299 |
frame_np = np.array(final_rgb_pil,dtype=np.uint8);
|
|
|
302 |
if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue
|
303 |
|
304 |
clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur)
|
|
|
305 |
mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}")
|
306 |
|
307 |
clip_fx = clip_base
|
|
|
312 |
elif asset_type == 'video':
|
313 |
src_clip=None
|
314 |
try:
|
315 |
+
src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False) # Added audio=False
|
316 |
tmp_clip=src_clip
|
317 |
if src_clip.duration!=scene_dur:
|
318 |
if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
|
|
|
329 |
if current_scene_mvpy_clip and key_action:
|
330 |
try:
|
331 |
to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
|
332 |
+
to_start=0.25
|
333 |
txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
|
334 |
current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
|
335 |
except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
|
|
|
337 |
except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
|
338 |
finally:
|
339 |
if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
|
340 |
+
try: current_scene_mvpy_clip.close()
|
341 |
+
except: pass
|
342 |
|
343 |
if not processed_clips:logger.warning("No clips processed. Abort.");return None
|
344 |
td=0.75
|
|
|
357 |
elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
|
358 |
if final_clip and final_clip.duration>0:
|
359 |
op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
|
360 |
+
final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
|
361 |
logger.info(f"Video created:{op}");return op
|
362 |
else:logger.error("Final clip invalid. No write.");return None
|
363 |
except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
|
364 |
finally:
|
365 |
logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
|
366 |
+
clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
|
367 |
+
for clip_obj in clips_to_close:
|
368 |
if clip_obj and hasattr(clip_obj, 'close'):
|
369 |
try: clip_obj.close()
|
370 |
+
except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {e_close}")
|
|
|
|
|
|
|
|
|
|
|
|