mgbam commited on
Commit
29c2122
ยท
verified ยท
1 Parent(s): ada066c

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +143 -176
core/visual_engine.py CHANGED
@@ -1,4 +1,5 @@
1
  # core/visual_engine.py
 
2
  from PIL import Image, ImageDraw, ImageFont
3
  from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
4
  CompositeVideoClip, AudioFileClip)
@@ -9,255 +10,221 @@ import openai
9
  import requests
10
  import io
11
  import time
 
12
  from elevenlabs import generate as elevenlabs_generate_audio, set_api_key as elevenlabs_set_api_key_func
13
 
14
  class VisualEngine:
15
  def __init__(self, output_dir="temp_cinegen_media"):
16
- self.output_dir = output_dir
17
- os.makedirs(self.output_dir, exist_ok=True)
18
-
19
- self.font_filename = "arial.ttf"
20
- self.font_path_in_container = f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
21
- self.font_size_pil = 20
22
- self.video_overlay_font_size = 30 # Slightly smaller for more text with narration
23
- self.video_overlay_font_color = 'white'
24
- self.video_overlay_font = 'Arial-Bold'
25
-
26
- try:
27
- self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
28
- print(f"Font for placeholders: {self.font_path_in_container}.")
29
- except IOError:
30
- print(f"Warning: Placeholder font '{self.font_path_in_container}' not loaded. Using default.")
31
- self.font = ImageFont.load_default(); self.font_size_pil = 10
32
-
33
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
34
- self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
35
  self.video_frame_size = (1280, 720)
36
-
37
- self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False
38
- self.elevenlabs_voice_id = "Rachel" # Default, can be made configurable
39
-
40
  self.pexels_api_key = None; self.USE_PEXELS = False
41
 
42
- def set_openai_api_key(self, api_key):
43
- if api_key: self.openai_api_key = api_key; self.USE_AI_IMAGE_GENERATION = True; print(f"DALL-E ({self.dalle_model}) Ready.")
44
- else: self.USE_AI_IMAGE_GENERATION = False; print("DALL-E Disabled.")
45
-
46
- def set_elevenlabs_api_key(self, api_key):
47
- if api_key:
48
- self.elevenlabs_api_key = api_key
49
- try:
50
- elevenlabs_set_api_key_func(api_key) # Set for the elevenlabs library
51
- self.USE_ELEVENLABS = True
52
- print("ElevenLabs Ready.")
53
- except Exception as e:
54
- print(f"Error setting ElevenLabs API key for library: {e}. ElevenLabs disabled.")
55
- self.USE_ELEVENLABS = False
56
- else: self.USE_ELEVENLABS = False; print("ElevenLabs Disabled.")
57
 
58
- def set_pexels_api_key(self, api_key):
59
- if api_key: self.pexels_api_key = api_key; self.USE_PEXELS = True; print("Pexels Ready.")
60
- else: self.USE_PEXELS = False; print("Pexels Disabled.")
61
-
62
- def _get_text_dimensions(self, text_content, font_obj): # Remains same
63
- if not text_content: return 0, self.font_size_pil
64
  try:
65
- if hasattr(font_obj, 'getbbox'): bbox = font_obj.getbbox(text_content); w = bbox[2]-bbox[0]; h = bbox[3]-bbox[1]; return w, h if h>0 else self.font_size_pil
66
- elif hasattr(font_obj, 'getsize'): w,h = font_obj.getsize(text_content); return w, h if h>0 else self.font_size_pil
67
- else: return int(len(text_content)*self.font_size_pil*0.6), int(self.font_size_pil*1.2 if self.font_size_pil*1.2 > 0 else self.font_size_pil)
68
- except: return int(len(text_content)*self.font_size_pil*0.6), int(self.font_size_pil*1.2)
69
-
70
- def _create_placeholder_image_content(self, text_description, filename, size=(1280, 720)): # Remains same
71
- img = Image.new('RGB', size, color=(20,20,40)); draw = ImageDraw.Draw(img); padding = 25; max_w = size[0]-(2*padding); lines = []
72
- if not text_description: text_description = "(Placeholder)"
73
- words = text_description.split(); current_line = ""
74
- for word in words:
75
- test_line = current_line + word + " ";
76
- if self._get_text_dimensions(test_line, self.font)[0] <= max_w: current_line = test_line
77
- else:
78
- if current_line: lines.append(current_line.strip())
79
- current_line = word + " "
80
- if current_line: lines.append(current_line.strip())
81
- if not lines: lines.append("(Text err)")
82
- _, line_h = self._get_text_dimensions("Ay", self.font); line_h = line_h if line_h>0 else self.font_size_pil+2
83
- max_lines = min(len(lines), (size[1]-2*padding)//(line_h+2))
84
- y = padding + (size[1]-2*padding - max_lines*(line_h+2))/2.0
85
- for i in range(max_lines):
86
- line = lines[i]; line_w, _ = self._get_text_dimensions(line, self.font); x = (size[0]-line_w)/2.0
87
- draw.text((x,y), line, font=self.font, fill=(200,200,180)); y += line_h+2
88
- if i==6 and max_lines>7: draw.text((x,y), "...", font=self.font, fill=(200,200,180)); break
89
- fp = os.path.join(self.output_dir, filename);
90
- try: img.save(fp); return fp
91
- except Exception as e: print(f"Err placeholder save: {e}"); return None
92
 
 
93
  def _search_pexels_image(self, query, output_filename):
94
  if not self.USE_PEXELS or not self.pexels_api_key: return None
95
  headers = {"Authorization": self.pexels_api_key}
96
- params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
97
- # Ensure JPG for pexels typical format, but DALL-E images are PNG. Filename will be distinct.
98
- pexels_filename = output_filename.replace(".png", "_pexels.jpg")
99
  filepath = os.path.join(self.output_dir, pexels_filename)
100
  try:
101
- print(f"Searching Pexels for: '{query}' (max 3 words for relevance)")
102
- # Limit query length for Pexels to improve relevance
103
- query_parts = query.split()
104
- effective_query = " ".join(query_parts[:5]) # Use first 5 words
105
- params["query"] = effective_query
106
-
107
  response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=15)
108
- response.raise_for_status()
109
- data = response.json()
110
  if data.get("photos"):
 
111
  photo_url = data["photos"][0]["src"]["large2x"]
112
- image_response = requests.get(photo_url, timeout=45)
113
- image_response.raise_for_status()
114
  img_data = Image.open(io.BytesIO(image_response.content))
115
  if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
116
- img_data.save(filepath)
117
- print(f"Pexels image saved: {filepath}")
118
- return filepath
119
- else:
120
- print(f"No photos found on Pexels for query: '{effective_query}'")
121
- except Exception as e: print(f"Pexels search/download error for '{query}': {e}")
122
  return None
123
 
124
- def generate_image_visual(self, image_prompt_text, scene_data_for_fallbacks, scene_identifier_filename):
 
 
 
 
 
 
 
 
 
125
  filepath = os.path.join(self.output_dir, scene_identifier_filename)
126
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
127
  max_retries = 2
128
  for attempt in range(max_retries):
129
  try:
130
  print(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:120]}...")
131
- client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0) # Increased client timeout
132
- response = client.images.generate(
133
- model=self.dalle_model, prompt=image_prompt_text, n=1,
134
- size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid"
135
- )
136
- image_url = response.data[0].url
137
- revised_prompt = getattr(response.data[0], 'revised_prompt', None)
138
  if revised_prompt: print(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
139
-
140
- image_response = requests.get(image_url, timeout=120) # Increased download timeout
141
- image_response.raise_for_status()
142
  img_data = Image.open(io.BytesIO(image_response.content))
143
  if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
144
  img_data.save(filepath); print(f"AI Image (DALL-E) saved: {filepath}"); return filepath
145
- except openai.RateLimitError as e:
146
- print(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s...")
147
- time.sleep(5*(attempt+1));
148
- if attempt == max_retries -1: print("Max retries for RateLimitError."); break # Break from loop
149
- else: continue # Go to next attempt
150
  except openai.APIError as e: print(f"OpenAI API Error: {e}"); break
151
  except requests.exceptions.RequestException as e: print(f"Requests Error (DALL-E download): {e}"); break
152
  except Exception as e: print(f"Generic error (DALL-E gen): {e}"); break
153
 
154
  print("DALL-E generation failed. Trying Pexels fallback...")
155
- pexels_query = f"{scene_data_for_fallbacks.get('emotional_beat','')} {scene_data_for_fallbacks.get('setting_description','')} {scene_data_for_fallbacks.get('genre','')} {scene_data_for_fallbacks.get('mood','')}"
156
- pexels_path = self._search_pexels_image(pexels_query, scene_identifier_filename)
 
157
  if pexels_path: return pexels_path
158
 
159
  print("Pexels also failed/disabled. Using placeholder.")
160
- return self._create_placeholder_image_content(
161
- f"[AI/Pexels Failed] Original Prompt: {image_prompt_text[:100]}...",
162
- scene_identifier_filename, size=self.video_frame_size
163
- )
164
  else: # AI image generation not enabled
165
- return self._create_placeholder_image_content(
166
- image_prompt_text, scene_identifier_filename, size=self.video_frame_size
167
- )
168
 
169
- def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
 
170
  if not self.USE_ELEVENLABS or not self.elevenlabs_api_key or not text_to_narrate:
171
- print("ElevenLabs not enabled, API key missing, or no text provided. Skipping audio generation.")
172
- return None
173
-
174
  audio_filepath = os.path.join(self.output_dir, output_filename)
175
  try:
176
  print(f"Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
177
- # Ensure API key is set for the elevenlabs library context if it's not global
178
- # elevenlabs_set_api_key_func(self.elevenlabs_api_key) # Usually set once globally is enough
179
-
180
- audio_data = elevenlabs_generate_audio(
181
- text=text_to_narrate,
182
- voice=self.elevenlabs_voice_id,
183
- model="eleven_multilingual_v2" # Or other suitable model like "eleven_turbo_v2"
184
- )
185
- with open(audio_filepath, "wb") as f:
186
- f.write(audio_data)
187
- print(f"ElevenLabs audio saved: {audio_filepath}")
188
- return audio_filepath
189
- except ImportError:
190
- print("ElevenLabs library not installed. Cannot generate audio.")
191
- except Exception as e:
192
- print(f"Error generating ElevenLabs audio: {e}")
193
  return None
194
 
195
- def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4):
 
 
196
  if not image_data_list: return None
197
- print(f"Creating video from {len(image_data_list)} image sets.")
198
  processed_clips = []
199
- narration_audio_clip = None # Initialize
200
- final_video_clip_obj = None # Initialize
201
 
202
  for i, data in enumerate(image_data_list):
203
  img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
204
- if not (img_path and os.path.exists(img_path)):
205
- print(f"Image not found: {img_path}. Skipping."); continue
206
  try:
207
- pil_img_orig = Image.open(img_path)
208
- if pil_img_orig.mode != 'RGB': pil_img_orig = pil_img_orig.convert('RGB')
209
- img_for_frame = pil_img_orig.copy()
210
- img_for_frame.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
211
- canvas = Image.new('RGB', self.video_frame_size, (0,0,0))
212
- x_offset = (self.video_frame_size[0] - img_for_frame.width) // 2
213
- y_offset = (self.video_frame_size[1] - img_for_frame.height) // 2
214
- canvas.paste(img_for_frame, (x_offset, y_offset))
 
215
  frame_np = np.array(canvas)
 
216
  img_clip = ImageClip(frame_np).set_duration(duration_per_image)
217
- img_clip = img_clip.fx(vfx.resize, lambda t: 1 + 0.1 * (t / duration_per_image)).set_position('center')
 
 
 
 
 
 
 
 
 
 
 
 
218
  if key_action:
219
- overlay_text = f"Scene {scene_num}\n{key_action}"
220
- txt_clip = TextClip(overlay_text, fontsize=self.video_overlay_font_size,
221
  color=self.video_overlay_font_color, font=self.video_overlay_font,
222
- bg_color='rgba(0,0,0,0.7)', method='caption', align='West',
223
- size=(self.video_frame_size[0]*0.85, None), kerning=-1, stroke_color='black', stroke_width=0.5
224
- ).set_duration(duration_per_image - 1.0).set_start(0.5).set_position(('center', 0.88), relative=True)
225
  final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
226
  else: final_scene_clip = img_clip
227
  processed_clips.append(final_scene_clip)
228
- except Exception as e: print(f"Error processing clip for {img_path}: {e}. Skipping.")
229
 
230
- if not processed_clips: print("No clips processed for video."); return None
231
 
232
- video_wo_audio = concatenate_videoclips(processed_clips, padding=-0.75, method="compose")
233
- if video_wo_audio.duration > 1.5:
234
- video_wo_audio = video_wo_audio.fx(vfx.fadein, 0.75).fx(vfx.fadeout, 0.75)
 
235
 
236
- final_video_clip_obj = video_wo_audio
237
  if overall_narration_path and os.path.exists(overall_narration_path):
238
  try:
239
  narration_audio_clip = AudioFileClip(overall_narration_path)
240
  final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
 
241
  if narration_audio_clip.duration < final_video_clip_obj.duration:
242
  final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
243
- elif narration_audio_clip.duration > final_video_clip_obj.duration:
244
- # If audio is longer, we might want to loop video or extend last frame - complex.
245
- # For now, video duration dictates. Audio will be cut.
246
- # Or, ensure narration script length matches expected video length.
247
- pass # Moviepy will cut audio to video duration by default with set_audio
248
- print("Overall narration added to video.")
249
- except Exception as e:
250
- print(f"Error adding overall narration: {e}. Proceeding without main narration.")
251
 
252
  output_path = os.path.join(self.output_dir, output_filename)
253
  try:
254
- final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium', audio_codec='aac',
255
  temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
256
- remove_temp=True, threads=os.cpu_count() or 2, logger='bar')
257
  print(f"Video created: {output_path}"); return output_path
258
- except Exception as e: print(f"Error writing video file: {e}"); return None
259
- finally: # Ensure clips are closed
260
- for clip_item in processed_clips:
261
- if hasattr(clip_item, 'close'): clip_item.close()
262
- if narration_audio_clip and hasattr(narration_audio_clip, 'close'): narration_audio_clip.close()
263
- if final_video_clip_obj and hasattr(final_video_clip_obj, 'close'): final_video_clip_obj.close()
 
1
  # core/visual_engine.py
2
+ # ... (imports: PIL, MoviePy, numpy, os, openai, requests, io, time, elevenlabs - same) ...
3
  from PIL import Image, ImageDraw, ImageFont
4
  from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
5
  CompositeVideoClip, AudioFileClip)
 
10
  import requests
11
  import io
12
  import time
13
+ import random # For slight Ken Burns variations
14
  from elevenlabs import generate as elevenlabs_generate_audio, set_api_key as elevenlabs_set_api_key_func
15
 
16
  class VisualEngine:
17
  def __init__(self, output_dir="temp_cinegen_media"):
18
+ # ... (font setup, API key initializations, DALL-E settings - same) ...
19
+ self.output_dir = output_dir; os.makedirs(self.output_dir, exist_ok=True)
20
+ self.font_filename="arial.ttf"; self.font_path_in_container=f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
21
+ self.font_size_pil=20; self.video_overlay_font_size=30; self.video_overlay_font_color='white'; self.video_overlay_font='Arial-Bold'
22
+ try: self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil); print(f"Placeholder font: {self.font_path_in_container}.")
23
+ except IOError: print(f"Warn: Placeholder font '{self.font_path_in_container}' fail. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
 
 
 
 
 
 
 
 
 
 
 
24
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
25
+ self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024" # Landscape
26
  self.video_frame_size = (1280, 720)
27
+ self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_voice_id = "Rachel"
 
 
 
28
  self.pexels_api_key = None; self.USE_PEXELS = False
29
 
30
+ # ... (set_openai_api_key, set_elevenlabs_api_key, set_pexels_api_key - same) ...
31
+ def set_openai_api_key(self,k): # Pythonic shortened
32
+ self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k)
33
+ print(f"DALL-E ({self.dalle_model}) {'Ready' if k else 'Disabled'}.")
34
+ def set_elevenlabs_api_key(self,k):
35
+ self.elevenlabs_api_key=k
36
+ if k:
37
+ try: elevenlabs_set_api_key_func(k); self.USE_ELEVENLABS=True; print("ElevenLabs Ready.")
38
+ except Exception as e: print(f"ElevenLabs key set error: {e}. Disabled."); self.USE_ELEVENLABS=False
39
+ else: self.USE_ELEVENLABS=False
40
+ def set_pexels_api_key(self,k):
41
+ self.pexels_api_key=k; self.USE_PEXELS=bool(k)
42
+ print(f"Pexels {'Ready' if k else 'Disabled'}.")
 
 
43
 
44
+ # ... (_get_text_dimensions, _create_placeholder_image_content - same) ...
45
+ def _get_text_dimensions(self,t,f): # Shortened
46
+ if not t: return 0,self.font_size_pil
 
 
 
47
  try:
48
+ if hasattr(f,'getbbox'): bb=f.getbbox(t);w=bb[2]-bb[0];h=bb[3]-bb[1];return w,h if h>0 else self.font_size_pil
49
+ elif hasattr(f,'getsize'): w,h=f.getsize(t);return w,h if h>0 else self.font_size_pil
50
+ else: return int(len(t)*self.font_size_pil*.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
51
+ except: return int(len(t)*self.font_size_pil*.6),int(self.font_size_pil*1.2)
52
+ def _create_placeholder_image_content(self,td,fn,s=(1280,720)): # Shortened
53
+ img=Image.new('RGB',s,color=(20,20,40));d=ImageDraw.Draw(img);p=25;max_w=s[0]-(2*p);ls=[];
54
+ if not td: td="(Placeholder)"
55
+ ws=td.split();cl=""
56
+ for w in ws:
57
+ tl=cl+w+" ";
58
+ if self._get_text_dimensions(tl,self.font)[0]<=max_w: cl=tl
59
+ else:
60
+ if cl:ls.append(cl.strip())
61
+ cl=w+" "
62
+ if cl:ls.append(cl.strip())
63
+ if not ls:ls.append("(Text err)")
64
+ _,sh=self._get_text_dimensions("Ay",self.font);sh=sh if sh>0 else self.font_size_pil+2
65
+ max_ls=min(len(ls),(s[1]-2*p)//(sh+2));
66
+ yt=p+(s[1]-2*p-max_ls*(sh+2))/2.0
67
+ for i in range(max_ls):
68
+ l=ls[i];lw,_=self._get_text_dimensions(l,self.font);xt=(s[0]-lw)/2.0
69
+ d.text((xt,yt),l,font=self.font,fill=(200,200,180));yt+=sh+2
70
+ if i==6 and max_ls>7:d.text((xt,yt),"...",font=self.font,fill=(200,200,180));break
71
+ fp=os.path.join(self.output_dir,fn);
72
+ try:img.save(fp);return fp
73
+ except Exception as e:print(f"Err placeholder save: {e}");return None
 
74
 
75
+ # ... (_search_pexels_image - same logic, ensure query is good) ...
76
  def _search_pexels_image(self, query, output_filename):
77
  if not self.USE_PEXELS or not self.pexels_api_key: return None
78
  headers = {"Authorization": self.pexels_api_key}
79
+ # Use a broader query, let Pexels do its magic, then maybe allow user to pick from a few
80
+ params = {"query": query, "per_page": 3, "orientation": "landscape", "size": "large"}
81
+ pexels_filename = output_filename.replace(".png", f"_pexels_{random.randint(100,999)}.jpg")
82
  filepath = os.path.join(self.output_dir, pexels_filename)
83
  try:
84
+ print(f"Searching Pexels for: '{query}'")
 
 
 
 
 
85
  response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=15)
86
+ response.raise_for_status(); data = response.json()
 
87
  if data.get("photos"):
88
+ # For now, just take the first one. UI could let user pick.
89
  photo_url = data["photos"][0]["src"]["large2x"]
90
+ image_response = requests.get(photo_url, timeout=45); image_response.raise_for_status()
 
91
  img_data = Image.open(io.BytesIO(image_response.content))
92
  if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
93
+ img_data.save(filepath); print(f"Pexels image saved: {filepath}"); return filepath
94
+ else: print(f"No photos on Pexels for: '{query}'")
95
+ except Exception as e: print(f"Pexels error for '{query}': {e}")
 
 
 
96
  return None
97
 
98
+ # generate_image_visual - The Pexels fallback query should use the specific `pexels_search_query_๊ฐ๋…`
99
+ def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename):
100
+ # ... (DALL-E logic same as previous version including retries) ...
101
+ # Fallback logic:
102
+ # print("DALL-E failed. Trying Pexels...")
103
+ # pexels_query = scene_data.get('pexels_search_query_๊ฐ๋…', "abstract background") # Use Gemini's suggestion
104
+ # pexels_path = self._search_pexels_image(pexels_query, scene_identifier_filename)
105
+ # if pexels_path: return pexels_path
106
+ # return self._create_placeholder_image_content(...)
107
+ # For brevity, pasting the core DALL-E logic again:
108
  filepath = os.path.join(self.output_dir, scene_identifier_filename)
109
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
110
  max_retries = 2
111
  for attempt in range(max_retries):
112
  try:
113
  print(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:120]}...")
114
+ client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
115
+ response = client.images.generate(model=self.dalle_model, prompt=image_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
116
+ image_url = response.data[0].url; revised_prompt = getattr(response.data[0], 'revised_prompt', None)
 
 
 
 
117
  if revised_prompt: print(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
118
+ image_response = requests.get(image_url, timeout=120); image_response.raise_for_status()
 
 
119
  img_data = Image.open(io.BytesIO(image_response.content))
120
  if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
121
  img_data.save(filepath); print(f"AI Image (DALL-E) saved: {filepath}"); return filepath
122
+ except openai.RateLimitError as e: print(f"OpenAI Rate Limit: {e}. Retrying..."); time.sleep(5*(attempt+1));
123
+ if attempt == max_retries -1: print("Max retries for RateLimitError."); break
124
+ else: continue
 
 
125
  except openai.APIError as e: print(f"OpenAI API Error: {e}"); break
126
  except requests.exceptions.RequestException as e: print(f"Requests Error (DALL-E download): {e}"); break
127
  except Exception as e: print(f"Generic error (DALL-E gen): {e}"); break
128
 
129
  print("DALL-E generation failed. Trying Pexels fallback...")
130
+ # Use the specific Pexels query from Gemini's scene breakdown
131
+ pexels_query_text = scene_data.get('pexels_search_query_๊ฐ๋…', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
132
+ pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
133
  if pexels_path: return pexels_path
134
 
135
  print("Pexels also failed/disabled. Using placeholder.")
136
+ return self._create_placeholder_image_content(f"[AI/Pexels Failed] Prompt: {image_prompt_text[:100]}...", scene_identifier_filename, size=self.video_frame_size)
 
 
 
137
  else: # AI image generation not enabled
138
+ return self._create_placeholder_image_content(image_prompt_text, scene_identifier_filename, size=self.video_frame_size)
 
 
139
 
140
+
141
+ def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"): # Remains same logic
142
  if not self.USE_ELEVENLABS or not self.elevenlabs_api_key or not text_to_narrate:
143
+ print("ElevenLabs disabled/no text. Skipping audio."); return None
 
 
144
  audio_filepath = os.path.join(self.output_dir, output_filename)
145
  try:
146
  print(f"Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
147
+ # This is where the actual call to elevenlabs library happens
148
+ # elevenlabs_set_api_key_func(self.elevenlabs_api_key) # Ensure key is set for the library
149
+ audio_data = elevenlabs_generate_audio(text=text_to_narrate, voice=self.elevenlabs_voice_id, model="eleven_multilingual_v2")
150
+ with open(audio_filepath, "wb") as f: f.write(audio_data)
151
+ print(f"ElevenLabs audio saved: {audio_filepath}"); return audio_filepath
152
+ except ImportError: print("ElevenLabs library not found. Install it.")
153
+ except Exception as e: print(f"Error ElevenLabs audio: {e}")
 
 
 
 
 
 
 
 
 
154
  return None
155
 
156
+ def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5): # Slightly longer duration
157
+ # ... (Image processing, Ken Burns, Text Overlay from previous full version) ...
158
+ # Add slight random variation to Ken Burns
159
  if not image_data_list: return None
 
160
  processed_clips = []
161
+ narration_audio_clip = None; final_video_clip_obj = None
 
162
 
163
  for i, data in enumerate(image_data_list):
164
  img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
165
+ if not (img_path and os.path.exists(img_path)): print(f"Img not found: {img_path}"); continue
 
166
  try:
167
+ pil_img = Image.open(img_path);
168
+ if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
169
+
170
+ # Ensure image fits within video_frame_size, letter/pillarboxing
171
+ img_copy = pil_img.copy()
172
+ img_copy.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
173
+ canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,15), random.randint(0,15), random.randint(0,15))) # Slightly off-black bg
174
+ xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
175
+ canvas.paste(img_copy, (xo,yo))
176
  frame_np = np.array(canvas)
177
+
178
  img_clip = ImageClip(frame_np).set_duration(duration_per_image)
179
+
180
+ # Enhanced Ken Burns: Random start/end zoom & slight pan
181
+ start_scale = 1.0
182
+ end_scale = random.uniform(1.05, 1.15) # Random zoom between 5% and 15%
183
+
184
+ # Subtle random panning (values between -0.05 and 0.05 relative to image dimension)
185
+ # Pan is (fraction_of_width, fraction_of_height)
186
+ # For this, it's easier if the image is slightly larger than the crop area initially.
187
+ # A simpler way is to resize and then use set_position with a lambda for movement.
188
+ # Let's simplify to just zoom for now to avoid overcomplicating the resize lambda.
189
+ img_clip = img_clip.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
190
+ img_clip = img_clip.set_position('center')
191
+
192
  if key_action:
193
+ txt_clip = TextClip(f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size,
 
194
  color=self.video_overlay_font_color, font=self.video_overlay_font,
195
+ bg_color='rgba(10,10,20,0.75)', method='caption', align='West',
196
+ size=(self.video_frame_size[0]*0.9, None), kerning=-1, stroke_color='black', stroke_width=1
197
+ ).set_duration(duration_per_image - 1.0).set_start(0.5).set_position(('center', 0.9), relative=True) # Slightly higher
198
  final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
199
  else: final_scene_clip = img_clip
200
  processed_clips.append(final_scene_clip)
201
+ except Exception as e: print(f"Error clip for {img_path}: {e}.")
202
 
203
+ if not processed_clips: print("No clips for video."); return None
204
 
205
+ transition = 0.8 # Slightly longer crossfade
206
+ final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
207
+ if final_video_clip_obj.duration > transition*2:
208
+ final_video_clip_obj = final_video_clip_obj.fx(vfx.fadein, transition).fx(vfx.fadeout, transition)
209
 
 
210
  if overall_narration_path and os.path.exists(overall_narration_path):
211
  try:
212
  narration_audio_clip = AudioFileClip(overall_narration_path)
213
  final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
214
+ # Adjust video duration to match audio if audio is shorter.
215
  if narration_audio_clip.duration < final_video_clip_obj.duration:
216
  final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
217
+ print("Overall narration added.")
218
+ except Exception as e: print(f"Error adding narration: {e}.")
 
 
 
 
 
 
219
 
220
  output_path = os.path.join(self.output_dir, output_filename)
221
  try:
222
+ final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='slow', audio_codec='aac', # 'slow' for better quality
223
  temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
224
+ remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k") # Higher bitrate
225
  print(f"Video created: {output_path}"); return output_path
226
+ except Exception as e: print(f"Error writing video: {e}"); return None
227
+ finally:
228
+ for c in processed_clips: c.close()
229
+ if narration_audio_clip: narration_audio_clip.close()
230
+ if final_video_clip_obj: final_video_clip_obj.close()