Update core/visual_engine.py
Browse files- core/visual_engine.py +90 -87
core/visual_engine.py
CHANGED
@@ -10,13 +10,14 @@ import requests
|
|
10 |
import io
|
11 |
import time
|
12 |
import random
|
13 |
-
import subprocess # For
|
14 |
|
15 |
# --- ElevenLabs Import ---
|
|
|
16 |
ELEVENLABS_CLIENT_IMPORTED = False
|
17 |
-
ElevenLabsAPIClient = None
|
18 |
-
Voice = None
|
19 |
-
VoiceSettings = None
|
20 |
|
21 |
try:
|
22 |
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
@@ -26,11 +27,11 @@ try:
|
|
26 |
Voice = ImportedVoice
|
27 |
VoiceSettings = ImportedVoiceSettings
|
28 |
ELEVENLABS_CLIENT_IMPORTED = True
|
29 |
-
print("Successfully imported ElevenLabs client components (SDK v1.x.x pattern).")
|
30 |
except ImportError as e_eleven:
|
31 |
-
print(f"WARNING: Could not import ElevenLabs client components: {e_eleven}. ElevenLabs audio
|
32 |
-
except Exception as e_gen_eleven:
|
33 |
-
print(f"WARNING: General error importing ElevenLabs: {e_gen_eleven}. ElevenLabs audio
|
34 |
|
35 |
|
36 |
class VisualEngine:
|
@@ -43,66 +44,66 @@ class VisualEngine:
|
|
43 |
self.font_size_pil = 20
|
44 |
self.video_overlay_font_size = 30
|
45 |
self.video_overlay_font_color = 'white'
|
46 |
-
self.video_overlay_font = 'Arial-Bold'
|
47 |
|
48 |
try:
|
49 |
self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
|
50 |
-
|
51 |
except IOError:
|
52 |
-
print(f"
|
53 |
self.font = ImageFont.load_default()
|
54 |
self.font_size_pil = 10
|
55 |
|
56 |
-
|
57 |
-
self.USE_AI_IMAGE_GENERATION = False
|
58 |
-
self.dalle_model = "dall-e-3"
|
59 |
-
self.
|
60 |
-
self.video_frame_size = (1280, 720)
|
61 |
|
62 |
-
|
63 |
-
self.elevenlabs_api_key = None
|
64 |
-
self.USE_ELEVENLABS = False
|
65 |
self.elevenlabs_client = None
|
66 |
-
self.elevenlabs_voice_id = "Rachel" # Default
|
67 |
if VoiceSettings: # Check if VoiceSettings was successfully imported
|
68 |
self.elevenlabs_voice_settings = VoiceSettings(
|
69 |
-
stability=0.
|
70 |
-
style=0.
|
71 |
)
|
72 |
else:
|
73 |
self.elevenlabs_voice_settings = None
|
74 |
|
75 |
-
self.pexels_api_key = None
|
76 |
-
|
77 |
|
78 |
def set_openai_api_key(self,k):
|
79 |
-
self.openai_api_key=k
|
80 |
-
self.
|
81 |
-
# print(f"DALL-E ({self.dalle_model}) {'Ready' if k else 'Disabled'}.")
|
82 |
|
83 |
def set_elevenlabs_api_key(self,api_key):
|
84 |
self.elevenlabs_api_key=api_key
|
85 |
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
86 |
try:
|
87 |
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
|
88 |
-
#
|
89 |
-
#
|
90 |
-
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
93 |
except Exception as e:
|
94 |
-
print(f"
|
95 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
96 |
else:
|
97 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
98 |
-
|
99 |
-
#
|
100 |
-
|
101 |
-
|
|
|
102 |
|
103 |
def set_pexels_api_key(self,k):
|
104 |
self.pexels_api_key=k; self.USE_PEXELS=bool(k)
|
105 |
-
|
106 |
|
107 |
def _get_text_dimensions(self,text_content,font_obj):
|
108 |
if not text_content: return 0,self.font_size_pil
|
@@ -113,12 +114,13 @@ class VisualEngine:
|
|
113 |
elif hasattr(font_obj,'getsize'):
|
114 |
w,h=font_obj.getsize(text_content)
|
115 |
return w, h if h > 0 else self.font_size_pil
|
116 |
-
else:
|
117 |
return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
|
118 |
-
except Exception:
|
119 |
return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
|
120 |
|
121 |
-
def _create_placeholder_image_content(self,text_description,filename,size=
|
|
|
122 |
img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
|
123 |
if not text_description: text_description="(Placeholder: No prompt text)"
|
124 |
words=text_description.split();current_line=""
|
@@ -134,40 +136,39 @@ class VisualEngine:
|
|
134 |
_,single_line_h=self._get_text_dimensions("Ay",self.font)
|
135 |
single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
|
136 |
|
137 |
-
max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2))
|
138 |
|
139 |
y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
|
140 |
|
141 |
for i in range(max_lines_to_display):
|
142 |
line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
|
143 |
d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
|
144 |
-
if i==6 and max_lines_to_display > 7:
|
145 |
d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
|
146 |
filepath=os.path.join(self.output_dir,filename)
|
147 |
try:img.save(filepath);return filepath
|
148 |
-
except Exception as e:print(f"
|
149 |
|
150 |
def _search_pexels_image(self, query, output_filename_base):
|
151 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
152 |
headers = {"Authorization": self.pexels_api_key}
|
153 |
-
params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
|
154 |
-
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(
|
155 |
filepath = os.path.join(self.output_dir, pexels_filename)
|
156 |
try:
|
157 |
-
print(f"Searching Pexels for: '{query}'")
|
158 |
-
effective_query = " ".join(query.split()[:5])
|
159 |
params["query"] = effective_query
|
160 |
-
|
161 |
response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
|
162 |
response.raise_for_status(); data = response.json()
|
163 |
if data.get("photos") and len(data["photos"]) > 0:
|
164 |
-
photo_url = data["photos"][0]["src"]["large2x"]
|
165 |
image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
|
166 |
img_data = Image.open(io.BytesIO(image_response.content))
|
167 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
168 |
-
img_data.save(filepath); print(f"Pexels image saved: {filepath}"); return filepath
|
169 |
-
else: print(f"No photos found on Pexels for query: '{effective_query}'")
|
170 |
-
except Exception as e: print(f"Pexels search/download
|
171 |
return None
|
172 |
|
173 |
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename):
|
@@ -176,57 +177,58 @@ class VisualEngine:
|
|
176 |
max_retries = 2
|
177 |
for attempt in range(max_retries):
|
178 |
try:
|
179 |
-
print(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:
|
180 |
-
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
181 |
response = client.images.generate(
|
182 |
model=self.dalle_model, prompt=image_prompt_text, n=1,
|
183 |
size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid"
|
184 |
)
|
185 |
image_url = response.data[0].url
|
186 |
revised_prompt = getattr(response.data[0], 'revised_prompt', None)
|
187 |
-
if revised_prompt: print(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
|
188 |
|
189 |
image_response = requests.get(image_url, timeout=120)
|
190 |
image_response.raise_for_status()
|
191 |
img_data = Image.open(io.BytesIO(image_response.content))
|
192 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
193 |
|
194 |
-
img_data.save(filepath); print(f"AI Image (DALL-E) saved: {filepath}"); return filepath
|
195 |
except openai.RateLimitError as e:
|
196 |
-
print(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s...")
|
197 |
time.sleep(5 * (attempt + 1))
|
198 |
-
if attempt == max_retries - 1: print("Max retries for RateLimitError."); break
|
199 |
else: continue
|
200 |
-
except openai.APIError as e: print(f"OpenAI API Error: {e}"); break
|
201 |
-
except requests.exceptions.RequestException as e: print(f"Requests Error (DALL-E download): {e}"); break
|
202 |
-
except Exception as e: print(f"Generic error (DALL-E gen): {e}"); break
|
203 |
|
204 |
-
print("DALL-E generation failed. Trying Pexels fallback...")
|
205 |
pexels_query_text = scene_data.get('pexels_search_query_감독',
|
206 |
f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
207 |
pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
|
208 |
if pexels_path: return pexels_path
|
209 |
|
210 |
-
print("Pexels also failed/disabled. Using placeholder.")
|
211 |
return self._create_placeholder_image_content(
|
212 |
f"[AI/Pexels Failed] Original Prompt: {image_prompt_text[:100]}...",
|
213 |
-
scene_identifier_filename
|
214 |
)
|
215 |
else:
|
|
|
216 |
return self._create_placeholder_image_content(
|
217 |
-
image_prompt_text, scene_identifier_filename
|
218 |
)
|
219 |
|
220 |
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
|
221 |
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
|
222 |
-
# print("ElevenLabs not enabled, client not initialized, or no text. Skipping audio.")
|
223 |
return None
|
224 |
|
225 |
audio_filepath = os.path.join(self.output_dir, output_filename)
|
226 |
try:
|
227 |
-
print(f"Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
|
228 |
|
229 |
-
voice_param = self.elevenlabs_voice_id
|
230 |
if Voice and self.elevenlabs_voice_settings: # Check if Voice & VoiceSettings were imported
|
231 |
voice_param = Voice(
|
232 |
voice_id=self.elevenlabs_voice_id,
|
@@ -236,29 +238,28 @@ class VisualEngine:
|
|
236 |
audio_data_iterator = self.elevenlabs_client.generate(
|
237 |
text=text_to_narrate,
|
238 |
voice=voice_param,
|
239 |
-
model="eleven_multilingual_v2"
|
240 |
)
|
241 |
|
242 |
with open(audio_filepath, "wb") as f:
|
243 |
for chunk in audio_data_iterator:
|
244 |
if chunk: f.write(chunk)
|
245 |
|
246 |
-
print(f"ElevenLabs audio saved: {audio_filepath}")
|
247 |
return audio_filepath
|
248 |
except AttributeError as ae:
|
249 |
-
print(f"AttributeError with ElevenLabs client (method
|
250 |
except Exception as e:
|
251 |
-
print(f"
|
252 |
return None
|
253 |
|
254 |
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
|
255 |
-
if not image_data_list: print("No image data for video."); return None
|
256 |
-
# print(f"Creating video from {len(image_data_list)} image sets.") # Less verbose
|
257 |
processed_clips = []; narration_audio_clip = None; final_video_clip_obj = None
|
258 |
|
259 |
for i, data in enumerate(image_data_list):
|
260 |
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
|
261 |
-
if not (img_path and os.path.exists(img_path)): print(f"Img not found: {img_path}"); continue
|
262 |
try:
|
263 |
pil_img = Image.open(img_path);
|
264 |
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
|
@@ -281,9 +282,9 @@ class VisualEngine:
|
|
281 |
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
|
282 |
else: final_scene_clip = img_clip
|
283 |
processed_clips.append(final_scene_clip)
|
284 |
-
except Exception as e: print(f"
|
285 |
|
286 |
-
if not processed_clips: print("No clips processed for video."); return None
|
287 |
transition = 0.8
|
288 |
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
|
289 |
if final_video_clip_obj.duration > transition*2:
|
@@ -292,24 +293,26 @@ class VisualEngine:
|
|
292 |
if overall_narration_path and os.path.exists(overall_narration_path):
|
293 |
try:
|
294 |
narration_audio_clip = AudioFileClip(overall_narration_path)
|
295 |
-
|
296 |
-
#
|
297 |
-
|
|
|
|
|
298 |
final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
|
299 |
|
300 |
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
|
301 |
-
print("Overall narration added to video.")
|
302 |
-
except Exception as e: print(f"
|
303 |
|
304 |
output_path = os.path.join(self.output_dir, output_filename)
|
305 |
try:
|
306 |
-
print(f"Writing final video to: {output_path}")
|
307 |
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
|
308 |
audio_codec='aac',
|
309 |
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
|
310 |
-
remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
|
311 |
-
print(f"Video successfully created: {output_path}"); return output_path
|
312 |
-
except Exception as e: print(f"
|
313 |
finally:
|
314 |
for c_item in processed_clips:
|
315 |
if hasattr(c_item, 'close'): c_item.close()
|
|
|
10 |
import io
|
11 |
import time
|
12 |
import random
|
13 |
+
import subprocess # For dummy video fallback
|
14 |
|
15 |
# --- ElevenLabs Import ---
|
16 |
+
# These will be assigned if the import is successful
|
17 |
ELEVENLABS_CLIENT_IMPORTED = False
|
18 |
+
ElevenLabsAPIClient = None
|
19 |
+
Voice = None
|
20 |
+
VoiceSettings = None
|
21 |
|
22 |
try:
|
23 |
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
|
|
27 |
Voice = ImportedVoice
|
28 |
VoiceSettings = ImportedVoiceSettings
|
29 |
ELEVENLABS_CLIENT_IMPORTED = True
|
30 |
+
print("INFO: Successfully imported ElevenLabs client components (SDK v1.x.x pattern).")
|
31 |
except ImportError as e_eleven:
|
32 |
+
print(f"WARNING: Could not import ElevenLabs client components: {e_eleven}. ElevenLabs audio will be disabled.")
|
33 |
+
except Exception as e_gen_eleven:
|
34 |
+
print(f"WARNING: General error importing ElevenLabs: {e_gen_eleven}. ElevenLabs audio will be disabled.")
|
35 |
|
36 |
|
37 |
class VisualEngine:
|
|
|
44 |
self.font_size_pil = 20
|
45 |
self.video_overlay_font_size = 30
|
46 |
self.video_overlay_font_color = 'white'
|
47 |
+
self.video_overlay_font = 'Arial-Bold' # Relies on ImageMagick font discovery
|
48 |
|
49 |
try:
|
50 |
self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
|
51 |
+
print(f"INFO: Placeholder font loaded: {self.font_path_in_container}.")
|
52 |
except IOError:
|
53 |
+
print(f"WARNING: Placeholder font '{self.font_path_in_container}' not found. Using default.")
|
54 |
self.font = ImageFont.load_default()
|
55 |
self.font_size_pil = 10
|
56 |
|
57 |
+
# API Client States
|
58 |
+
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
|
59 |
+
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024" # Landscape orientation
|
60 |
+
self.video_frame_size = (1280, 720) # 16:9 standard HD
|
|
|
61 |
|
62 |
+
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False
|
|
|
|
|
63 |
self.elevenlabs_client = None
|
64 |
+
self.elevenlabs_voice_id = "Rachel" # Default
|
65 |
if VoiceSettings: # Check if VoiceSettings was successfully imported
|
66 |
self.elevenlabs_voice_settings = VoiceSettings(
|
67 |
+
stability=0.60, similarity_boost=0.80, # Adjusted for potentially more character
|
68 |
+
style=0.15, use_speaker_boost=True
|
69 |
)
|
70 |
else:
|
71 |
self.elevenlabs_voice_settings = None
|
72 |
|
73 |
+
self.pexels_api_key = None; self.USE_PEXELS = False
|
74 |
+
print("INFO: VisualEngine initialized.")
|
75 |
|
76 |
def set_openai_api_key(self,k):
|
77 |
+
self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k)
|
78 |
+
print(f"INFO: DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled (no API key).'}")
|
|
|
79 |
|
80 |
def set_elevenlabs_api_key(self,api_key):
|
81 |
self.elevenlabs_api_key=api_key
|
82 |
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
83 |
try:
|
84 |
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
|
85 |
+
# Minimal test: Check if client object was created.
|
86 |
+
# Fetching voices makes an API call, can do it later if needed.
|
87 |
+
if self.elevenlabs_client:
|
88 |
+
self.USE_ELEVENLABS=True
|
89 |
+
print("INFO: ElevenLabs Client Ready.")
|
90 |
+
else: # Should not happen if ElevenLabsAPIClient() doesn't raise error
|
91 |
+
print("WARNING: ElevenLabs client is None after init. ElevenLabs Disabled.")
|
92 |
+
self.USE_ELEVENLABS=False
|
93 |
except Exception as e:
|
94 |
+
print(f"ERROR: Initializing ElevenLabs client: {e}. ElevenLabs Disabled.");
|
95 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
96 |
else:
|
97 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
98 |
+
if not ELEVENLABS_CLIENT_IMPORTED or not ElevenLabsAPIClient:
|
99 |
+
# This message is already printed at import time if it fails
|
100 |
+
pass
|
101 |
+
else:
|
102 |
+
print("INFO: ElevenLabs API Key not provided or client class not imported. ElevenLabs Disabled.")
|
103 |
|
104 |
def set_pexels_api_key(self,k):
|
105 |
self.pexels_api_key=k; self.USE_PEXELS=bool(k)
|
106 |
+
print(f"INFO: Pexels Search {'Ready.' if k else 'Disabled (no API key).'}")
|
107 |
|
108 |
def _get_text_dimensions(self,text_content,font_obj):
|
109 |
if not text_content: return 0,self.font_size_pil
|
|
|
114 |
elif hasattr(font_obj,'getsize'):
|
115 |
w,h=font_obj.getsize(text_content)
|
116 |
return w, h if h > 0 else self.font_size_pil
|
117 |
+
else:
|
118 |
return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
|
119 |
+
except Exception:
|
120 |
return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
|
121 |
|
122 |
+
def _create_placeholder_image_content(self,text_description,filename,size=None):
|
123 |
+
if size is None: size = self.video_frame_size # Default to video frame size
|
124 |
img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
|
125 |
if not text_description: text_description="(Placeholder: No prompt text)"
|
126 |
words=text_description.split();current_line=""
|
|
|
136 |
_,single_line_h=self._get_text_dimensions("Ay",self.font)
|
137 |
single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
|
138 |
|
139 |
+
max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2))
|
140 |
|
141 |
y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
|
142 |
|
143 |
for i in range(max_lines_to_display):
|
144 |
line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
|
145 |
d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
|
146 |
+
if i==6 and max_lines_to_display > 7:
|
147 |
d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
|
148 |
filepath=os.path.join(self.output_dir,filename)
|
149 |
try:img.save(filepath);return filepath
|
150 |
+
except Exception as e:print(f"ERROR: Saving placeholder image {filepath}: {e}");return None
|
151 |
|
152 |
def _search_pexels_image(self, query, output_filename_base):
|
153 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
154 |
headers = {"Authorization": self.pexels_api_key}
|
155 |
+
params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
|
156 |
+
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg")
|
157 |
filepath = os.path.join(self.output_dir, pexels_filename)
|
158 |
try:
|
159 |
+
print(f"INFO: Searching Pexels for: '{query}'")
|
160 |
+
effective_query = " ".join(query.split()[:5])
|
161 |
params["query"] = effective_query
|
|
|
162 |
response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
|
163 |
response.raise_for_status(); data = response.json()
|
164 |
if data.get("photos") and len(data["photos"]) > 0:
|
165 |
+
photo_url = data["photos"][0]["src"]["large2x"]
|
166 |
image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
|
167 |
img_data = Image.open(io.BytesIO(image_response.content))
|
168 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
169 |
+
img_data.save(filepath); print(f"INFO: Pexels image saved: {filepath}"); return filepath
|
170 |
+
else: print(f"INFO: No photos found on Pexels for query: '{effective_query}'")
|
171 |
+
except Exception as e: print(f"ERROR: Pexels search/download for query '{query}': {e}")
|
172 |
return None
|
173 |
|
174 |
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename):
|
|
|
177 |
max_retries = 2
|
178 |
for attempt in range(max_retries):
|
179 |
try:
|
180 |
+
print(f"INFO: Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:100]}...")
|
181 |
+
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
182 |
response = client.images.generate(
|
183 |
model=self.dalle_model, prompt=image_prompt_text, n=1,
|
184 |
size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid"
|
185 |
)
|
186 |
image_url = response.data[0].url
|
187 |
revised_prompt = getattr(response.data[0], 'revised_prompt', None)
|
188 |
+
if revised_prompt: print(f"INFO: DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
|
189 |
|
190 |
image_response = requests.get(image_url, timeout=120)
|
191 |
image_response.raise_for_status()
|
192 |
img_data = Image.open(io.BytesIO(image_response.content))
|
193 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
194 |
|
195 |
+
img_data.save(filepath); print(f"INFO: AI Image (DALL-E) saved: {filepath}"); return filepath
|
196 |
except openai.RateLimitError as e:
|
197 |
+
print(f"WARNING: OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s...")
|
198 |
time.sleep(5 * (attempt + 1))
|
199 |
+
if attempt == max_retries - 1: print("ERROR: Max retries for RateLimitError."); break
|
200 |
else: continue
|
201 |
+
except openai.APIError as e: print(f"ERROR: OpenAI API Error: {e}"); break
|
202 |
+
except requests.exceptions.RequestException as e: print(f"ERROR: Requests Error (DALL-E download): {e}"); break
|
203 |
+
except Exception as e: print(f"ERROR: Generic error (DALL-E gen): {e}"); break
|
204 |
|
205 |
+
print("WARNING: DALL-E generation failed. Trying Pexels fallback...")
|
206 |
pexels_query_text = scene_data.get('pexels_search_query_감독',
|
207 |
f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
208 |
pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
|
209 |
if pexels_path: return pexels_path
|
210 |
|
211 |
+
print("WARNING: Pexels also failed/disabled. Using placeholder.")
|
212 |
return self._create_placeholder_image_content(
|
213 |
f"[AI/Pexels Failed] Original Prompt: {image_prompt_text[:100]}...",
|
214 |
+
scene_identifier_filename
|
215 |
)
|
216 |
else:
|
217 |
+
# print(f"INFO: AI image generation not enabled/ready. Creating placeholder.")
|
218 |
return self._create_placeholder_image_content(
|
219 |
+
image_prompt_text, scene_identifier_filename
|
220 |
)
|
221 |
|
222 |
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
|
223 |
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
|
224 |
+
# print("INFO: ElevenLabs not enabled, client not initialized, or no text. Skipping audio.")
|
225 |
return None
|
226 |
|
227 |
audio_filepath = os.path.join(self.output_dir, output_filename)
|
228 |
try:
|
229 |
+
print(f"INFO: Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
|
230 |
|
231 |
+
voice_param = self.elevenlabs_voice_id
|
232 |
if Voice and self.elevenlabs_voice_settings: # Check if Voice & VoiceSettings were imported
|
233 |
voice_param = Voice(
|
234 |
voice_id=self.elevenlabs_voice_id,
|
|
|
238 |
audio_data_iterator = self.elevenlabs_client.generate(
|
239 |
text=text_to_narrate,
|
240 |
voice=voice_param,
|
241 |
+
model="eleven_multilingual_v2"
|
242 |
)
|
243 |
|
244 |
with open(audio_filepath, "wb") as f:
|
245 |
for chunk in audio_data_iterator:
|
246 |
if chunk: f.write(chunk)
|
247 |
|
248 |
+
print(f"INFO: ElevenLabs audio saved: {audio_filepath}")
|
249 |
return audio_filepath
|
250 |
except AttributeError as ae:
|
251 |
+
print(f"ERROR: AttributeError with ElevenLabs client (method 'generate' might be different or client not fully init): {ae}")
|
252 |
except Exception as e:
|
253 |
+
print(f"ERROR: Generating ElevenLabs audio: {e}")
|
254 |
return None
|
255 |
|
256 |
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
|
257 |
+
if not image_data_list: print("WARNING: No image data for video."); return None
|
|
|
258 |
processed_clips = []; narration_audio_clip = None; final_video_clip_obj = None
|
259 |
|
260 |
for i, data in enumerate(image_data_list):
|
261 |
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
|
262 |
+
if not (img_path and os.path.exists(img_path)): print(f"WARNING: Img not found: {img_path}"); continue
|
263 |
try:
|
264 |
pil_img = Image.open(img_path);
|
265 |
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
|
|
|
282 |
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
|
283 |
else: final_scene_clip = img_clip
|
284 |
processed_clips.append(final_scene_clip)
|
285 |
+
except Exception as e: print(f"ERROR: Creating video clip for {img_path}: {e}.")
|
286 |
|
287 |
+
if not processed_clips: print("WARNING: No clips processed for video."); return None
|
288 |
transition = 0.8
|
289 |
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
|
290 |
if final_video_clip_obj.duration > transition*2:
|
|
|
293 |
if overall_narration_path and os.path.exists(overall_narration_path):
|
294 |
try:
|
295 |
narration_audio_clip = AudioFileClip(overall_narration_path)
|
296 |
+
# Ensure video duration matches audio if audio is shorter, or cap audio at video length.
|
297 |
+
# MoviePy's set_audio will truncate the longer of the two to match the shorter one IF the video has no audio track yet.
|
298 |
+
# If video already has audio, it replaces. If video is shorter than new audio, video extends with last frame.
|
299 |
+
# We want audio to dictate length if it's shorter than visual sequence.
|
300 |
+
if narration_audio_clip.duration < final_video_clip_obj.duration:
|
301 |
final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
|
302 |
|
303 |
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
|
304 |
+
print("INFO: Overall narration added to video.")
|
305 |
+
except Exception as e: print(f"ERROR: Adding overall narration: {e}.")
|
306 |
|
307 |
output_path = os.path.join(self.output_dir, output_filename)
|
308 |
try:
|
309 |
+
print(f"INFO: Writing final video to: {output_path}")
|
310 |
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
|
311 |
audio_codec='aac',
|
312 |
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
|
313 |
+
remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
|
314 |
+
print(f"INFO: Video successfully created: {output_path}"); return output_path
|
315 |
+
except Exception as e: print(f"ERROR: Writing video file: {e}"); return None
|
316 |
finally:
|
317 |
for c_item in processed_clips:
|
318 |
if hasattr(c_item, 'close'): c_item.close()
|