Update core/visual_engine.py
Browse files- core/visual_engine.py +133 -156
core/visual_engine.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# core/visual_engine.py
|
2 |
-
from PIL import Image, ImageDraw, ImageFont
|
3 |
from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
|
4 |
CompositeVideoClip, AudioFileClip)
|
5 |
import moviepy.video.fx.all as vfx
|
@@ -10,10 +10,13 @@ import requests
|
|
10 |
import io
|
11 |
import time
|
12 |
import random
|
13 |
-
import subprocess
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# --- ElevenLabs Import ---
|
16 |
-
# These will be assigned if the import is successful
|
17 |
ELEVENLABS_CLIENT_IMPORTED = False
|
18 |
ElevenLabsAPIClient = None
|
19 |
Voice = None
|
@@ -22,16 +25,15 @@ VoiceSettings = None
|
|
22 |
try:
|
23 |
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
24 |
from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
|
25 |
-
|
26 |
ElevenLabsAPIClient = ImportedElevenLabsClient
|
27 |
Voice = ImportedVoice
|
28 |
VoiceSettings = ImportedVoiceSettings
|
29 |
ELEVENLABS_CLIENT_IMPORTED = True
|
30 |
-
|
31 |
except ImportError as e_eleven:
|
32 |
-
|
33 |
except Exception as e_gen_eleven:
|
34 |
-
|
35 |
|
36 |
|
37 |
class VisualEngine:
|
@@ -44,121 +46,93 @@ class VisualEngine:
|
|
44 |
self.font_size_pil = 20
|
45 |
self.video_overlay_font_size = 30
|
46 |
self.video_overlay_font_color = 'white'
|
47 |
-
self.video_overlay_font = '
|
48 |
|
49 |
try:
|
50 |
self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
|
51 |
-
|
52 |
except IOError:
|
53 |
-
|
54 |
self.font = ImageFont.load_default()
|
55 |
self.font_size_pil = 10
|
56 |
|
57 |
-
# API Client States
|
58 |
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
|
59 |
-
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
|
60 |
-
self.video_frame_size = (1280, 720)
|
61 |
|
62 |
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False
|
63 |
self.elevenlabs_client = None
|
64 |
-
self.elevenlabs_voice_id = "Rachel"
|
65 |
-
if VoiceSettings
|
66 |
self.elevenlabs_voice_settings = VoiceSettings(
|
67 |
-
stability=0.60, similarity_boost=0.80,
|
68 |
style=0.15, use_speaker_boost=True
|
69 |
)
|
70 |
-
else:
|
71 |
-
self.elevenlabs_voice_settings = None
|
72 |
-
|
73 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
74 |
-
|
75 |
|
76 |
def set_openai_api_key(self,k):
|
77 |
self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k)
|
78 |
-
|
79 |
|
80 |
def set_elevenlabs_api_key(self,api_key):
|
81 |
self.elevenlabs_api_key=api_key
|
82 |
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
83 |
try:
|
84 |
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
|
85 |
-
|
86 |
-
|
87 |
-
if self.elevenlabs_client:
|
88 |
-
self.USE_ELEVENLABS=True
|
89 |
-
print("INFO: ElevenLabs Client Ready.")
|
90 |
-
else: # Should not happen if ElevenLabsAPIClient() doesn't raise error
|
91 |
-
print("WARNING: ElevenLabs client is None after init. ElevenLabs Disabled.")
|
92 |
-
self.USE_ELEVENLABS=False
|
93 |
except Exception as e:
|
94 |
-
|
95 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
96 |
else:
|
97 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
98 |
-
if not ELEVENLABS_CLIENT_IMPORTED
|
99 |
-
|
100 |
-
pass
|
101 |
-
else:
|
102 |
-
print("INFO: ElevenLabs API Key not provided or client class not imported. ElevenLabs Disabled.")
|
103 |
|
104 |
def set_pexels_api_key(self,k):
|
105 |
self.pexels_api_key=k; self.USE_PEXELS=bool(k)
|
106 |
-
|
107 |
|
108 |
-
def _get_text_dimensions(self,text_content,font_obj):
|
109 |
if not text_content: return 0,self.font_size_pil
|
110 |
try:
|
111 |
-
if hasattr(font_obj,'getbbox'):
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
w,h=font_obj.getsize(text_content)
|
116 |
-
return w, h if h > 0 else self.font_size_pil
|
117 |
-
else:
|
118 |
-
return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
|
119 |
-
except Exception:
|
120 |
-
return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
|
121 |
|
122 |
-
def _create_placeholder_image_content(self,text_description,filename,size=None):
|
123 |
-
if size is None: size = self.video_frame_size
|
124 |
img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
|
125 |
if not text_description: text_description="(Placeholder: No prompt text)"
|
126 |
words=text_description.split();current_line=""
|
127 |
for word in words:
|
128 |
-
test_line=current_line+word+" "
|
129 |
if self._get_text_dimensions(test_line,self.font)[0] <= max_w: current_line=test_line
|
130 |
else:
|
131 |
-
if current_line: lines.append(current_line.strip())
|
132 |
-
current_line=word+" "
|
133 |
if current_line: lines.append(current_line.strip())
|
134 |
if not lines: lines.append("(Text error or too long for placeholder)")
|
135 |
-
|
136 |
-
_,single_line_h=self._get_text_dimensions("Ay",self.font)
|
137 |
-
single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
|
138 |
-
|
139 |
max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2))
|
140 |
-
|
141 |
y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
|
142 |
-
|
143 |
for i in range(max_lines_to_display):
|
144 |
line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
|
145 |
d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
|
146 |
-
if i==6 and max_lines_to_display > 7:
|
147 |
-
|
148 |
-
filepath=os.path.join(self.output_dir,filename)
|
149 |
try:img.save(filepath);return filepath
|
150 |
-
except Exception as e:
|
151 |
|
152 |
-
def _search_pexels_image(self, query, output_filename_base):
|
153 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
154 |
-
headers = {"Authorization": self.pexels_api_key}
|
155 |
-
params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
|
156 |
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg")
|
157 |
filepath = os.path.join(self.output_dir, pexels_filename)
|
158 |
try:
|
159 |
-
|
160 |
-
effective_query = " ".join(query.split()[:5])
|
161 |
-
params["query"] = effective_query
|
162 |
response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
|
163 |
response.raise_for_status(); data = response.json()
|
164 |
if data.get("photos") and len(data["photos"]) > 0:
|
@@ -166,154 +140,157 @@ class VisualEngine:
|
|
166 |
image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
|
167 |
img_data = Image.open(io.BytesIO(image_response.content))
|
168 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
169 |
-
img_data.save(filepath);
|
170 |
-
else:
|
171 |
-
except Exception as e:
|
172 |
return None
|
173 |
|
174 |
-
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename):
|
175 |
filepath = os.path.join(self.output_dir, scene_identifier_filename)
|
176 |
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
177 |
max_retries = 2
|
178 |
for attempt in range(max_retries):
|
179 |
try:
|
180 |
-
|
181 |
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
182 |
-
response = client.images.generate(
|
183 |
-
|
184 |
-
|
185 |
-
)
|
186 |
-
|
187 |
-
revised_prompt = getattr(response.data[0], 'revised_prompt', None)
|
188 |
-
if revised_prompt: print(f"INFO: DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
|
189 |
-
|
190 |
-
image_response = requests.get(image_url, timeout=120)
|
191 |
-
image_response.raise_for_status()
|
192 |
-
img_data = Image.open(io.BytesIO(image_response.content))
|
193 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
194 |
-
|
195 |
-
img_data.save(filepath); print(f"INFO: AI Image (DALL-E) saved: {filepath}"); return filepath
|
196 |
except openai.RateLimitError as e:
|
197 |
-
|
198 |
-
|
199 |
-
if attempt == max_retries - 1: print("ERROR: Max retries for RateLimitError."); break
|
200 |
else: continue
|
201 |
-
except openai.APIError as e:
|
202 |
-
except requests.exceptions.RequestException as e:
|
203 |
-
except Exception as e:
|
204 |
-
|
205 |
-
|
206 |
-
pexels_query_text = scene_data.get('pexels_search_query_감독',
|
207 |
-
f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
208 |
pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
|
209 |
if pexels_path: return pexels_path
|
210 |
-
|
211 |
-
|
212 |
-
return self._create_placeholder_image_content(
|
213 |
-
f"[AI/Pexels Failed] Original Prompt: {image_prompt_text[:100]}...",
|
214 |
-
scene_identifier_filename
|
215 |
-
)
|
216 |
else:
|
217 |
-
|
218 |
-
return self._create_placeholder_image_content(
|
219 |
-
image_prompt_text, scene_identifier_filename
|
220 |
-
)
|
221 |
|
222 |
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
|
223 |
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
|
224 |
-
|
225 |
return None
|
226 |
|
227 |
audio_filepath = os.path.join(self.output_dir, output_filename)
|
228 |
try:
|
229 |
-
|
230 |
|
|
|
231 |
voice_param = self.elevenlabs_voice_id
|
232 |
if Voice and self.elevenlabs_voice_settings: # Check if Voice & VoiceSettings were imported
|
233 |
voice_param = Voice(
|
234 |
-
voice_id=self.elevenlabs_voice_id,
|
235 |
settings=self.elevenlabs_voice_settings
|
236 |
)
|
237 |
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
244 |
with open(audio_filepath, "wb") as f:
|
245 |
for chunk in audio_data_iterator:
|
246 |
if chunk: f.write(chunk)
|
247 |
|
248 |
-
|
249 |
return audio_filepath
|
250 |
except AttributeError as ae:
|
251 |
-
|
252 |
except Exception as e:
|
253 |
-
|
254 |
return None
|
255 |
|
256 |
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
|
257 |
-
if not image_data_list:
|
258 |
processed_clips = []; narration_audio_clip = None; final_video_clip_obj = None
|
259 |
|
|
|
260 |
for i, data in enumerate(image_data_list):
|
261 |
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
|
262 |
-
if not (img_path and os.path.exists(img_path)):
|
263 |
try:
|
264 |
-
pil_img = Image.open(img_path)
|
265 |
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
|
|
|
|
|
266 |
img_copy = pil_img.copy()
|
267 |
-
|
268 |
-
|
|
|
|
|
269 |
xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
|
270 |
canvas.paste(img_copy, (xo,yo))
|
271 |
-
frame_np = np.array(canvas)
|
|
|
|
|
272 |
img_clip = ImageClip(frame_np).set_duration(duration_per_image)
|
273 |
-
|
|
|
274 |
img_clip = img_clip.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
|
275 |
img_clip = img_clip.set_position('center')
|
|
|
276 |
if key_action:
|
277 |
txt_clip = TextClip(f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size,
|
278 |
color=self.video_overlay_font_color, font=self.video_overlay_font,
|
279 |
-
bg_color='rgba(10,10,20,0.
|
280 |
-
size=(self.video_frame_size[0]*0.9, None), kerning=-1, stroke_color='black', stroke_width=1
|
281 |
-
).set_duration(duration_per_image - 1.0).set_start(0.5).set_position(('center', 0.
|
282 |
-
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
|
283 |
else: final_scene_clip = img_clip
|
284 |
processed_clips.append(final_scene_clip)
|
285 |
-
except Exception as e:
|
286 |
|
287 |
-
if not processed_clips:
|
288 |
-
transition = 0.8
|
289 |
-
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
|
290 |
-
if final_video_clip_obj.duration > transition*2:
|
291 |
-
final_video_clip_obj = final_video_clip_obj.fx(vfx.fadein, transition).fx(vfx.fadeout, transition)
|
292 |
|
293 |
-
|
294 |
-
try:
|
295 |
-
narration_audio_clip = AudioFileClip(overall_narration_path)
|
296 |
-
# Ensure video duration matches audio if audio is shorter, or cap audio at video length.
|
297 |
-
# MoviePy's set_audio will truncate the longer of the two to match the shorter one IF the video has no audio track yet.
|
298 |
-
# If video already has audio, it replaces. If video is shorter than new audio, video extends with last frame.
|
299 |
-
# We want audio to dictate length if it's shorter than visual sequence.
|
300 |
-
if narration_audio_clip.duration < final_video_clip_obj.duration:
|
301 |
-
final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
|
302 |
-
|
303 |
-
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
|
304 |
-
print("INFO: Overall narration added to video.")
|
305 |
-
except Exception as e: print(f"ERROR: Adding overall narration: {e}.")
|
306 |
-
|
307 |
-
output_path = os.path.join(self.output_dir, output_filename)
|
308 |
try:
|
309 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
except Exception as e:
|
316 |
-
finally:
|
317 |
for c_item in processed_clips:
|
318 |
if hasattr(c_item, 'close'): c_item.close()
|
319 |
if narration_audio_clip and hasattr(narration_audio_clip, 'close'): narration_audio_clip.close()
|
|
|
1 |
# core/visual_engine.py
|
2 |
+
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
3 |
from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
|
4 |
CompositeVideoClip, AudioFileClip)
|
5 |
import moviepy.video.fx.all as vfx
|
|
|
10 |
import io
|
11 |
import time
|
12 |
import random
|
13 |
+
import subprocess
|
14 |
+
import logging
|
15 |
+
|
16 |
+
logger = logging.getLogger(__name__)
|
17 |
+
logger.setLevel(logging.INFO)
|
18 |
|
19 |
# --- ElevenLabs Import ---
|
|
|
20 |
ELEVENLABS_CLIENT_IMPORTED = False
|
21 |
ElevenLabsAPIClient = None
|
22 |
Voice = None
|
|
|
25 |
try:
|
26 |
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
27 |
from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
|
|
|
28 |
ElevenLabsAPIClient = ImportedElevenLabsClient
|
29 |
Voice = ImportedVoice
|
30 |
VoiceSettings = ImportedVoiceSettings
|
31 |
ELEVENLABS_CLIENT_IMPORTED = True
|
32 |
+
logger.info("Successfully imported ElevenLabs client components (SDK v1.x.x pattern).")
|
33 |
except ImportError as e_eleven:
|
34 |
+
logger.warning(f"Could not import ElevenLabs client components: {e_eleven}. ElevenLabs audio will be disabled.")
|
35 |
except Exception as e_gen_eleven:
|
36 |
+
logger.warning(f"General error importing ElevenLabs: {e_gen_eleven}. ElevenLabs audio will be disabled.")
|
37 |
|
38 |
|
39 |
class VisualEngine:
|
|
|
46 |
self.font_size_pil = 20
|
47 |
self.video_overlay_font_size = 30
|
48 |
self.video_overlay_font_color = 'white'
|
49 |
+
self.video_overlay_font = 'Liberation-Sans-Bold' # More likely to be found by ImageMagick on Linux
|
50 |
|
51 |
try:
|
52 |
self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
|
53 |
+
logger.info(f"Placeholder font loaded: {self.font_path_in_container}.")
|
54 |
except IOError:
|
55 |
+
logger.warning(f"Placeholder font '{self.font_path_in_container}' not found. Using default.")
|
56 |
self.font = ImageFont.load_default()
|
57 |
self.font_size_pil = 10
|
58 |
|
|
|
59 |
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
|
60 |
+
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
|
61 |
+
self.video_frame_size = (1280, 720)
|
62 |
|
63 |
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False
|
64 |
self.elevenlabs_client = None
|
65 |
+
self.elevenlabs_voice_id = "Rachel"
|
66 |
+
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
|
67 |
self.elevenlabs_voice_settings = VoiceSettings(
|
68 |
+
stability=0.60, similarity_boost=0.80,
|
69 |
style=0.15, use_speaker_boost=True
|
70 |
)
|
71 |
+
else: self.elevenlabs_voice_settings = None
|
|
|
|
|
72 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
73 |
+
logger.info("VisualEngine initialized.")
|
74 |
|
75 |
def set_openai_api_key(self,k):
|
76 |
self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k)
|
77 |
+
logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled (no API key).'}")
|
78 |
|
79 |
def set_elevenlabs_api_key(self,api_key):
|
80 |
self.elevenlabs_api_key=api_key
|
81 |
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
82 |
try:
|
83 |
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
|
84 |
+
if self.elevenlabs_client: self.USE_ELEVENLABS=True; logger.info("ElevenLabs Client Ready.")
|
85 |
+
else: self.USE_ELEVENLABS=False; logger.warning("ElevenLabs client is None post-init.")
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
except Exception as e:
|
87 |
+
logger.error(f"Error initializing ElevenLabs client: {e}. Disabled.", exc_info=True);
|
88 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
89 |
else:
|
90 |
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
91 |
+
if not (ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient): pass # Logged at import
|
92 |
+
else: logger.info("ElevenLabs API Key not provided. Disabled.")
|
|
|
|
|
|
|
93 |
|
94 |
def set_pexels_api_key(self,k):
|
95 |
self.pexels_api_key=k; self.USE_PEXELS=bool(k)
|
96 |
+
logger.info(f"Pexels Search {'Ready.' if k else 'Disabled (no API key).'}")
|
97 |
|
98 |
+
def _get_text_dimensions(self,text_content,font_obj): # No changes
|
99 |
if not text_content: return 0,self.font_size_pil
|
100 |
try:
|
101 |
+
if hasattr(font_obj,'getbbox'): bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1];return w, h if h > 0 else self.font_size_pil
|
102 |
+
elif hasattr(font_obj,'getsize'): w,h=font_obj.getsize(text_content);return w, h if h > 0 else self.font_size_pil
|
103 |
+
else: return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
|
104 |
+
except: return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
+
def _create_placeholder_image_content(self,text_description,filename,size=None): # No changes
|
107 |
+
if size is None: size = self.video_frame_size
|
108 |
img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
|
109 |
if not text_description: text_description="(Placeholder: No prompt text)"
|
110 |
words=text_description.split();current_line=""
|
111 |
for word in words:
|
112 |
+
test_line=current_line+word+" ";
|
113 |
if self._get_text_dimensions(test_line,self.font)[0] <= max_w: current_line=test_line
|
114 |
else:
|
115 |
+
if current_line: lines.append(current_line.strip()); current_line=word+" "
|
|
|
116 |
if current_line: lines.append(current_line.strip())
|
117 |
if not lines: lines.append("(Text error or too long for placeholder)")
|
118 |
+
_,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
|
|
|
|
|
|
|
119 |
max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2))
|
|
|
120 |
y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
|
|
|
121 |
for i in range(max_lines_to_display):
|
122 |
line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
|
123 |
d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
|
124 |
+
if i==6 and max_lines_to_display > 7: d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
|
125 |
+
filepath=os.path.join(self.output_dir,filename);
|
|
|
126 |
try:img.save(filepath);return filepath
|
127 |
+
except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
|
128 |
|
129 |
+
def _search_pexels_image(self, query, output_filename_base): # No changes
|
130 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
131 |
+
headers = {"Authorization": self.pexels_api_key}; params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
|
|
|
132 |
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg")
|
133 |
filepath = os.path.join(self.output_dir, pexels_filename)
|
134 |
try:
|
135 |
+
logger.info(f"Searching Pexels for: '{query}'"); effective_query = " ".join(query.split()[:5]); params["query"] = effective_query
|
|
|
|
|
136 |
response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
|
137 |
response.raise_for_status(); data = response.json()
|
138 |
if data.get("photos") and len(data["photos"]) > 0:
|
|
|
140 |
image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
|
141 |
img_data = Image.open(io.BytesIO(image_response.content))
|
142 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
143 |
+
img_data.save(filepath); logger.info(f"Pexels image saved: {filepath}"); return filepath
|
144 |
+
else: logger.info(f"No photos found on Pexels for query: '{effective_query}'")
|
145 |
+
except Exception as e: logger.error(f"Pexels search/download for query '{query}': {e}", exc_info=True)
|
146 |
return None
|
147 |
|
148 |
+
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename): # No changes
|
149 |
filepath = os.path.join(self.output_dir, scene_identifier_filename)
|
150 |
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
151 |
max_retries = 2
|
152 |
for attempt in range(max_retries):
|
153 |
try:
|
154 |
+
logger.info(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:100]}...")
|
155 |
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
156 |
+
response = client.images.generate(model=self.dalle_model, prompt=image_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
|
157 |
+
image_url = response.data[0].url; revised_prompt = getattr(response.data[0], 'revised_prompt', None)
|
158 |
+
if revised_prompt: logger.info(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
|
159 |
+
image_response = requests.get(image_url, timeout=120); image_response.raise_for_status()
|
160 |
+
img_data = Image.open(io.BytesIO(image_response.content));
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
162 |
+
img_data.save(filepath); logger.info(f"AI Image (DALL-E) saved: {filepath}"); return filepath
|
|
|
163 |
except openai.RateLimitError as e:
|
164 |
+
logger.warning(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s..."); time.sleep(5 * (attempt + 1))
|
165 |
+
if attempt == max_retries - 1: logger.error("Max retries for RateLimitError."); break
|
|
|
166 |
else: continue
|
167 |
+
except openai.APIError as e: logger.error(f"OpenAI API Error: {e}"); break
|
168 |
+
except requests.exceptions.RequestException as e: logger.error(f"Requests Error (DALL-E download): {e}"); break
|
169 |
+
except Exception as e: logger.error(f"Generic error (DALL-E gen): {e}", exc_info=True); break
|
170 |
+
logger.warning("DALL-E generation failed. Trying Pexels fallback...")
|
171 |
+
pexels_query_text = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
|
|
|
|
172 |
pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
|
173 |
if pexels_path: return pexels_path
|
174 |
+
logger.warning("Pexels also failed/disabled. Using placeholder.")
|
175 |
+
return self._create_placeholder_image_content(f"[AI/Pexels Failed] {image_prompt_text[:100]}...", scene_identifier_filename)
|
|
|
|
|
|
|
|
|
176 |
else:
|
177 |
+
return self._create_placeholder_image_content(image_prompt_text, scene_identifier_filename)
|
|
|
|
|
|
|
178 |
|
179 |
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
|
180 |
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
|
181 |
+
logger.info("ElevenLabs conditions not met. Skipping audio generation.")
|
182 |
return None
|
183 |
|
184 |
audio_filepath = os.path.join(self.output_dir, output_filename)
|
185 |
try:
|
186 |
+
logger.info(f"Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
|
187 |
|
188 |
+
# Determine how to pass voice (as object or ID string)
|
189 |
voice_param = self.elevenlabs_voice_id
|
190 |
if Voice and self.elevenlabs_voice_settings: # Check if Voice & VoiceSettings were imported
|
191 |
voice_param = Voice(
|
192 |
+
voice_id=self.elevenlabs_voice_id, # This voice_id must be a valid ID string or a known name
|
193 |
settings=self.elevenlabs_voice_settings
|
194 |
)
|
195 |
|
196 |
+
# Use the text_to_speech.stream() method for newer SDK
|
197 |
+
if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'):
|
198 |
+
logger.info("Using elevenlabs_client.text_to_speech.stream()")
|
199 |
+
audio_data_iterator = self.elevenlabs_client.text_to_speech.stream(
|
200 |
+
text=text_to_narrate,
|
201 |
+
voice_id=self.elevenlabs_voice_id, # stream usually takes voice_id string
|
202 |
+
model_id="eleven_multilingual_v2" # or other models like "eleven_monolingual_v1"
|
203 |
+
)
|
204 |
+
# Fallback to direct .generate() if text_to_speech.stream isn't there (might be an older v1 client or different structure)
|
205 |
+
elif hasattr(self.elevenlabs_client, 'generate'):
|
206 |
+
logger.info("Using elevenlabs_client.generate() as fallback.")
|
207 |
+
audio_data_iterator = self.elevenlabs_client.generate(
|
208 |
+
text=text_to_narrate,
|
209 |
+
voice=voice_param, # This might take the Voice object or just the ID string
|
210 |
+
model="eleven_multilingual_v2"
|
211 |
+
)
|
212 |
+
else:
|
213 |
+
logger.error("No recognized audio generation method (text_to_speech.stream or generate) found on ElevenLabs client.")
|
214 |
+
return None
|
215 |
|
216 |
with open(audio_filepath, "wb") as f:
|
217 |
for chunk in audio_data_iterator:
|
218 |
if chunk: f.write(chunk)
|
219 |
|
220 |
+
logger.info(f"ElevenLabs audio saved: {audio_filepath}")
|
221 |
return audio_filepath
|
222 |
except AttributeError as ae:
|
223 |
+
logger.error(f"AttributeError with ElevenLabs client: {ae}. Method or attribute likely missing for installed SDK version.", exc_info=True)
|
224 |
except Exception as e:
|
225 |
+
logger.error(f"Error generating ElevenLabs audio: {e}", exc_info=True)
|
226 |
return None
|
227 |
|
228 |
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
|
229 |
+
if not image_data_list: logger.warning("No image data for video."); return None
|
230 |
processed_clips = []; narration_audio_clip = None; final_video_clip_obj = None
|
231 |
|
232 |
+
logger.info(f"Preparing {len(image_data_list)} clips for video. Target frame size: {self.video_frame_size}")
|
233 |
for i, data in enumerate(image_data_list):
|
234 |
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
|
235 |
+
if not (img_path and os.path.exists(img_path)): logger.warning(f"Img not found or invalid: {img_path}"); continue
|
236 |
try:
|
237 |
+
pil_img = Image.open(img_path)
|
238 |
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
|
239 |
+
|
240 |
+
# Resize and letterbox/pillarbox
|
241 |
img_copy = pil_img.copy()
|
242 |
+
# Use Image.Resampling.LANCZOS for high quality downscaling
|
243 |
+
img_copy.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
|
244 |
+
|
245 |
+
canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,5), random.randint(0,5), random.randint(0,5)))
|
246 |
xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
|
247 |
canvas.paste(img_copy, (xo,yo))
|
248 |
+
frame_np = np.array(canvas) # This numpy array is correctly sized.
|
249 |
+
|
250 |
+
# ImageClip should not need to do much resizing now.
|
251 |
img_clip = ImageClip(frame_np).set_duration(duration_per_image)
|
252 |
+
|
253 |
+
end_scale = random.uniform(1.03, 1.08)
|
254 |
img_clip = img_clip.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
|
255 |
img_clip = img_clip.set_position('center')
|
256 |
+
|
257 |
if key_action:
|
258 |
txt_clip = TextClip(f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size,
|
259 |
color=self.video_overlay_font_color, font=self.video_overlay_font,
|
260 |
+
bg_color='rgba(10,10,20,0.8)', method='caption', align='West',
|
261 |
+
size=(self.video_frame_size[0]*0.9, None), kerning=-1, stroke_color='black', stroke_width=1.5
|
262 |
+
).set_duration(duration_per_image - 1.0).set_start(0.5).set_position(('center', 0.92), relative=True)
|
263 |
+
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size, use_bgclip=True, bg_color=(0,0,0)) # Ensure bg for composite
|
264 |
else: final_scene_clip = img_clip
|
265 |
processed_clips.append(final_scene_clip)
|
266 |
+
except Exception as e: logger.error(f"Creating video clip for {img_path}: {e}", exc_info=True)
|
267 |
|
268 |
+
if not processed_clips: logger.warning("No clips processed for video."); return None
|
|
|
|
|
|
|
|
|
269 |
|
270 |
+
transition = 0.75
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
try:
|
272 |
+
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
|
273 |
+
if final_video_clip_obj.duration > transition*2:
|
274 |
+
final_video_clip_obj = final_video_clip_obj.fx(vfx.fadein, transition).fx(vfx.fadeout, transition)
|
275 |
+
|
276 |
+
if overall_narration_path and os.path.exists(overall_narration_path):
|
277 |
+
try:
|
278 |
+
narration_audio_clip = AudioFileClip(overall_narration_path)
|
279 |
+
if narration_audio_clip.duration < final_video_clip_obj.duration:
|
280 |
+
final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
|
281 |
+
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
|
282 |
+
logger.info("Overall narration added to video.")
|
283 |
+
except Exception as e: logger.error(f"Adding overall narration: {e}", exc_info=True)
|
284 |
+
|
285 |
+
output_path = os.path.join(self.output_dir, output_filename)
|
286 |
+
logger.info(f"Writing final video to: {output_path}")
|
287 |
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
|
288 |
+
audio_codec='aac',
|
289 |
+
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
|
290 |
+
remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
|
291 |
+
logger.info(f"Video successfully created: {output_path}"); return output_path
|
292 |
+
except Exception as e: logger.error(f"Writing video file: {e}", exc_info=True); return None
|
293 |
+
finally: # Ensure clips are closed
|
294 |
for c_item in processed_clips:
|
295 |
if hasattr(c_item, 'close'): c_item.close()
|
296 |
if narration_audio_clip and hasattr(narration_audio_clip, 'close'): narration_audio_clip.close()
|