mgbam commited on
Commit
f13d4b2
·
verified ·
1 Parent(s): 39aa8e3

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +202 -125
core/visual_engine.py CHANGED
@@ -10,86 +10,164 @@ import requests
10
  import io
11
  import time
12
  import random
13
- #from elevenlabs import generate as elevenlabs_generate_audio, set_api_key as elevenlabs_set_api_key_func
14
- # Import ElevenLabs client class and methods:
15
- from elevenlabs.client import ElevenLabs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  class VisualEngine:
17
  def __init__(self, output_dir="temp_cinegen_media"):
18
- self.output_dir = output_dir; os.makedirs(self.output_dir, exist_ok=True)
19
- self.font_filename="arial.ttf"; self.font_path_in_container=f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
20
- self.font_size_pil=20; self.video_overlay_font_size=30; self.video_overlay_font_color='white'; self.video_overlay_font='Arial-Bold'
21
- try: self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil); print(f"Placeholder font: {self.font_path_in_container}.")
22
- except IOError: print(f"Warn: Placeholder font '{self.font_path_in_container}' fail. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
23
- self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
24
- self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  self.video_frame_size = (1280, 720)
26
- self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_voice_id = "Rachel"
27
- self.pexels_api_key = None; self.USE_PEXELS = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  def set_openai_api_key(self,k):
30
- self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k)
31
- print(f"DALL-E ({self.dalle_model}) {'Ready' if k else 'Disabled'}.")
32
- def set_elevenlabs_api_key(self,k):
33
- self.elevenlabs_api_key=k
34
- if k:
35
- try: elevenlabs_set_api_key_func(k); self.USE_ELEVENLABS=True; print("ElevenLabs Ready.")
36
- except Exception as e: print(f"ElevenLabs key set error: {e}. Disabled."); self.USE_ELEVENLABS=False
37
- else: self.USE_ELEVENLABS=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  def set_pexels_api_key(self,k):
39
  self.pexels_api_key=k; self.USE_PEXELS=bool(k)
40
- print(f"Pexels {'Ready' if k else 'Disabled'}.")
41
-
42
- def _get_text_dimensions(self,t,f):
43
- if not t: return 0,self.font_size_pil
44
  try:
45
- if hasattr(f,'getbbox'): bb=f.getbbox(t);w=bb[2]-bb[0];h=bb[3]-bb[1];return w,h if h>0 else self.font_size_pil
46
- elif hasattr(f,'getsize'): w,h=f.getsize(t);return w,h if h>0 else self.font_size_pil
47
- else: return int(len(t)*self.font_size_pil*.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
48
- except: return int(len(t)*self.font_size_pil*.6),int(self.font_size_pil*1.2)
 
 
 
 
 
 
49
 
50
- def _create_placeholder_image_content(self,td,fn,s=(1280,720)):
51
- img=Image.new('RGB',s,color=(20,20,40));d=ImageDraw.Draw(img);p=25;max_w=s[0]-(2*p);ls=[];
52
- if not td: td="(Placeholder)"
53
- ws=td.split();cl=""
54
- for w in ws:
55
- tl=cl+w+" ";
56
- if self._get_text_dimensions(tl,self.font)[0]<=max_w: cl=tl
57
  else:
58
- if cl:ls.append(cl.strip())
59
- cl=w+" "
60
- if cl:ls.append(cl.strip())
61
- if not ls:ls.append("(Text err)")
62
- _,sh=self._get_text_dimensions("Ay",self.font);sh=sh if sh>0 else self.font_size_pil+2
63
- max_ls=min(len(ls),(s[1]-2*p)//(sh+2));
64
- yt=p+(s[1]-2*p-max_ls*(sh+2))/2.0
65
- for i in range(max_ls):
66
- line=ls[i];lw,_=self._get_text_dimensions(line,self.font);xt=(s[0]-lw)/2.0
67
- d.text((xt,yt),line,font=self.font,fill=(200,200,180));yt+=sh+2
68
- if i==6 and max_ls>7:d.text((xt,yt),"...",font=self.font,fill=(200,200,180));break
69
- fp=os.path.join(self.output_dir,fn);
70
- try:img.save(fp);return fp
71
- except Exception as e:print(f"Err placeholder save: {e}");return None
72
-
73
- def _search_pexels_image(self, query, output_filename):
 
 
 
 
 
 
74
  if not self.USE_PEXELS or not self.pexels_api_key: return None
75
  headers = {"Authorization": self.pexels_api_key}
76
- params = {"query": query, "per_page": 3, "orientation": "landscape", "size": "large"}
77
- pexels_filename = output_filename.replace(".png", f"_pexels_{random.randint(100,999)}.jpg")
78
  filepath = os.path.join(self.output_dir, pexels_filename)
79
  try:
80
  print(f"Searching Pexels for: '{query}'")
81
- query_parts = query.split(); effective_query = " ".join(query_parts[:5])
82
  params["query"] = effective_query
83
- response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=15)
 
84
  response.raise_for_status(); data = response.json()
85
- if data.get("photos"):
86
- photo_url = data["photos"][0]["src"]["large2x"]
87
- image_response = requests.get(photo_url, timeout=45); image_response.raise_for_status()
88
  img_data = Image.open(io.BytesIO(image_response.content))
89
  if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
90
  img_data.save(filepath); print(f"Pexels image saved: {filepath}"); return filepath
91
- else: print(f"No photos on Pexels for: '{effective_query}'")
92
- except Exception as e: print(f"Pexels error for '{query}': {e}")
93
  return None
94
 
95
  def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename):
@@ -101,87 +179,82 @@ class VisualEngine:
101
  print(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:120]}...")
102
  client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
103
  response = client.images.generate(
104
- model=self.dalle_model,
105
- prompt=image_prompt_text,
106
- n=1,
107
- size=self.image_size_dalle3,
108
- quality="hd",
109
- response_format="url",
110
- style="vivid"
111
  )
112
  image_url = response.data[0].url
113
  revised_prompt = getattr(response.data[0], 'revised_prompt', None)
114
- if revised_prompt:
115
- print(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
116
 
117
  image_response = requests.get(image_url, timeout=120)
118
  image_response.raise_for_status()
119
  img_data = Image.open(io.BytesIO(image_response.content))
120
- if img_data.mode != 'RGB':
121
- img_data = img_data.convert('RGB')
122
 
123
- img_data.save(filepath)
124
- print(f"AI Image (DALL-E) saved: {filepath}")
125
- return filepath
126
-
127
  except openai.RateLimitError as e:
128
  print(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s...")
129
  time.sleep(5 * (attempt + 1))
130
- # CORRECTED INDENTATION FOR THIS BLOCK
131
- if attempt == max_retries - 1:
132
- print("Max retries reached for RateLimitError.")
133
- break # Break from the for loop if max retries hit for RateLimitError
134
- else:
135
- continue # Go to the next attempt in the for loop
136
-
137
- except openai.APIError as e:
138
- print(f"OpenAI API Error: {e}")
139
- break # Break from loop, will try Pexels/placeholder
140
- except requests.exceptions.RequestException as e:
141
- print(f"Requests Error (DALL-E image download): {e}")
142
- break # Break from loop
143
- except Exception as e:
144
- print(f"Generic error (DALL-E gen): {e}")
145
- break # Break from loop
146
 
147
- # This code block is reached if the 'for' loop completes (max retries)
148
- # or if it 'break's due to an error other than RateLimitError (where it 'continue's)
149
- print("DALL-E generation failed or max retries reached. Trying Pexels fallback...")
150
- pexels_query_text = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
151
  pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
152
- if pexels_path:
153
- return pexels_path
154
 
155
  print("Pexels also failed/disabled. Using placeholder.")
156
  return self._create_placeholder_image_content(
157
  f"[AI/Pexels Failed] Original Prompt: {image_prompt_text[:100]}...",
158
  scene_identifier_filename, size=self.video_frame_size
159
  )
160
- else: # AI image generation not enabled
161
  return self._create_placeholder_image_content(
162
  image_prompt_text, scene_identifier_filename, size=self.video_frame_size
163
  )
164
 
165
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
166
- if not self.USE_ELEVENLABS or not self.elevenlabs_api_key or not text_to_narrate:
167
- print("ElevenLabs disabled/no text. Skipping audio."); return None
 
 
168
  audio_filepath = os.path.join(self.output_dir, output_filename)
169
  try:
170
  print(f"Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
171
- # elevenlabs_set_api_key_func(self.elevenlabs_api_key) # Set key if library requires it per call
172
- audio_data = elevenlabs_generate_audio(text=text_to_narrate, voice=self.elevenlabs_voice_id, model="eleven_multilingual_v2")
173
- with open(audio_filepath, "wb") as f: f.write(audio_data)
174
- print(f"ElevenLabs audio saved: {audio_filepath}"); return audio_filepath
175
- except ImportError: print("ElevenLabs library not found. Install it.")
176
- except Exception as e: print(f"Error ElevenLabs audio: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  return None
178
 
179
  def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
180
- if not image_data_list: return None
181
- print(f"Creating video from {len(image_data_list)} image sets.")
182
- processed_clips = []
183
- narration_audio_clip = None
184
- final_video_clip_obj = None
185
 
186
  for i, data in enumerate(image_data_list):
187
  img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
@@ -191,12 +264,12 @@ class VisualEngine:
191
  if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
192
  img_copy = pil_img.copy()
193
  img_copy.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
194
- canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,15), random.randint(0,15), random.randint(0,15)))
195
  xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
196
  canvas.paste(img_copy, (xo,yo))
197
  frame_np = np.array(canvas)
198
  img_clip = ImageClip(frame_np).set_duration(duration_per_image)
199
- end_scale = random.uniform(1.05, 1.12) # Ken Burns zoom
200
  img_clip = img_clip.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
201
  img_clip = img_clip.set_position('center')
202
  if key_action:
@@ -208,9 +281,9 @@ class VisualEngine:
208
  final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
209
  else: final_scene_clip = img_clip
210
  processed_clips.append(final_scene_clip)
211
- except Exception as e: print(f"Error clip for {img_path}: {e}.")
212
 
213
- if not processed_clips: print("No clips for video."); return None
214
  transition = 0.8
215
  final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
216
  if final_video_clip_obj.duration > transition*2:
@@ -219,22 +292,26 @@ class VisualEngine:
219
  if overall_narration_path and os.path.exists(overall_narration_path):
220
  try:
221
  narration_audio_clip = AudioFileClip(overall_narration_path)
222
- final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
223
- if narration_audio_clip.duration < final_video_clip_obj.duration:
 
224
  final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
225
- print("Overall narration added.")
226
- except Exception as e: print(f"Error adding narration: {e}.")
 
 
227
 
228
  output_path = os.path.join(self.output_dir, output_filename)
229
  try:
230
- final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium', # 'medium' or 'slow'
 
231
  audio_codec='aac',
232
  temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
233
- remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
234
- print(f"Video created: {output_path}"); return output_path
235
- except Exception as e: print(f"Error writing video: {e}"); return None
236
  finally:
237
- for c in processed_clips:
238
- if hasattr(c, 'close'): c.close()
239
  if narration_audio_clip and hasattr(narration_audio_clip, 'close'): narration_audio_clip.close()
240
  if final_video_clip_obj and hasattr(final_video_clip_obj, 'close'): final_video_clip_obj.close()
 
10
  import io
11
  import time
12
  import random
13
+ import subprocess # For the dummy video fallback
14
+
15
+ # --- ElevenLabs Import ---
16
+ ELEVENLABS_CLIENT_IMPORTED = False
17
+ ElevenLabsAPIClient = None # Placeholder for the class
18
+ Voice = None # Placeholder for the class
19
+ VoiceSettings = None # Placeholder for the class
20
+
21
+ try:
22
+ from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
23
+ from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
24
+
25
+ ElevenLabsAPIClient = ImportedElevenLabsClient
26
+ Voice = ImportedVoice
27
+ VoiceSettings = ImportedVoiceSettings
28
+ ELEVENLABS_CLIENT_IMPORTED = True
29
+ print("Successfully imported ElevenLabs client components (SDK v1.x.x pattern).")
30
+ except ImportError as e_eleven:
31
+ print(f"WARNING: Could not import ElevenLabs client components: {e_eleven}. ElevenLabs audio generation will be disabled.")
32
+ except Exception as e_gen_eleven: # Catch any other general import error for elevenlabs
33
+ print(f"WARNING: General error importing ElevenLabs: {e_gen_eleven}. ElevenLabs audio generation will be disabled.")
34
+
35
+
36
  class VisualEngine:
37
  def __init__(self, output_dir="temp_cinegen_media"):
38
+ self.output_dir = output_dir
39
+ os.makedirs(self.output_dir, exist_ok=True)
40
+
41
+ self.font_filename = "arial.ttf"
42
+ self.font_path_in_container = f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
43
+ self.font_size_pil = 20
44
+ self.video_overlay_font_size = 30
45
+ self.video_overlay_font_color = 'white'
46
+ self.video_overlay_font = 'Arial-Bold'
47
+
48
+ try:
49
+ self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
50
+ # print(f"Placeholder font loaded: {self.font_path_in_container}.") # Less verbose
51
+ except IOError:
52
+ print(f"Warning: Placeholder font '{self.font_path_in_container}' not loaded. Using default.")
53
+ self.font = ImageFont.load_default()
54
+ self.font_size_pil = 10
55
+
56
+ self.openai_api_key = None
57
+ self.USE_AI_IMAGE_GENERATION = False
58
+ self.dalle_model = "dall-e-3"
59
+ self.image_size_dalle3 = "1792x1024"
60
  self.video_frame_size = (1280, 720)
61
+
62
+ # ElevenLabs Client
63
+ self.elevenlabs_api_key = None
64
+ self.USE_ELEVENLABS = False
65
+ self.elevenlabs_client = None
66
+ self.elevenlabs_voice_id = "Rachel" # Default, can be name or ID
67
+ if VoiceSettings: # Check if VoiceSettings was successfully imported
68
+ self.elevenlabs_voice_settings = VoiceSettings(
69
+ stability=0.65, similarity_boost=0.75,
70
+ style=0.1, use_speaker_boost=True
71
+ )
72
+ else:
73
+ self.elevenlabs_voice_settings = None
74
+
75
+ self.pexels_api_key = None
76
+ self.USE_PEXELS = False
77
 
78
  def set_openai_api_key(self,k):
79
+ self.openai_api_key=k
80
+ self.USE_AI_IMAGE_GENERATION=bool(k)
81
+ # print(f"DALL-E ({self.dalle_model}) {'Ready' if k else 'Disabled'}.")
82
+
83
+ def set_elevenlabs_api_key(self,api_key):
84
+ self.elevenlabs_api_key=api_key
85
+ if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
86
+ try:
87
+ self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
88
+ # Optional: Test client (e.g., fetch voices) can be added here for robust init
89
+ # voices_test = self.elevenlabs_client.voices.get_all() # This makes an API call
90
+ # if voices_test and voices_test.voices: print("ElevenLabs client connected.")
91
+ self.USE_ELEVENLABS=True
92
+ # print("ElevenLabs Client Ready.")
93
+ except Exception as e:
94
+ print(f"Error initializing ElevenLabs client with API key: {e}. ElevenLabs Disabled.");
95
+ self.USE_ELEVENLABS=False; self.elevenlabs_client = None
96
+ else:
97
+ self.USE_ELEVENLABS=False; self.elevenlabs_client = None
98
+ # if not ELEVENLABS_CLIENT_IMPORTED or not ElevenLabsAPIClient:
99
+ # print("ElevenLabs Client class was not imported. ElevenLabs Disabled.") # Already printed at import
100
+ # else:
101
+ # print("ElevenLabs API Key not provided. ElevenLabs Disabled.") # Less verbose
102
+
103
  def set_pexels_api_key(self,k):
104
  self.pexels_api_key=k; self.USE_PEXELS=bool(k)
105
+ # print(f"Pexels {'Ready' if k else 'Disabled'}.")
106
+
107
+ def _get_text_dimensions(self,text_content,font_obj):
108
+ if not text_content: return 0,self.font_size_pil
109
  try:
110
+ if hasattr(font_obj,'getbbox'):
111
+ bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1]
112
+ return w, h if h > 0 else self.font_size_pil
113
+ elif hasattr(font_obj,'getsize'):
114
+ w,h=font_obj.getsize(text_content)
115
+ return w, h if h > 0 else self.font_size_pil
116
+ else: # Fallback
117
+ return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
118
+ except Exception: # Generic fallback on error
119
+ return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
120
 
121
+ def _create_placeholder_image_content(self,text_description,filename,size=(1280,720)):
122
+ img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
123
+ if not text_description: text_description="(Placeholder: No prompt text)"
124
+ words=text_description.split();current_line=""
125
+ for word in words:
126
+ test_line=current_line+word+" "
127
+ if self._get_text_dimensions(test_line,self.font)[0] <= max_w: current_line=test_line
128
  else:
129
+ if current_line: lines.append(current_line.strip())
130
+ current_line=word+" "
131
+ if current_line: lines.append(current_line.strip())
132
+ if not lines: lines.append("(Text error or too long for placeholder)")
133
+
134
+ _,single_line_h=self._get_text_dimensions("Ay",self.font)
135
+ single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
136
+
137
+ max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2)) # Max lines based on height
138
+
139
+ y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
140
+
141
+ for i in range(max_lines_to_display):
142
+ line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
143
+ d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
144
+ if i==6 and max_lines_to_display > 7: # Show ellipsis if more text
145
+ d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
146
+ filepath=os.path.join(self.output_dir,filename)
147
+ try:img.save(filepath);return filepath
148
+ except Exception as e:print(f"Error saving placeholder image {filepath}: {e}");return None
149
+
150
+ def _search_pexels_image(self, query, output_filename_base):
151
  if not self.USE_PEXELS or not self.pexels_api_key: return None
152
  headers = {"Authorization": self.pexels_api_key}
153
+ params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"} # Get only 1 relevant image
154
+ pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(100,999)}.jpg")
155
  filepath = os.path.join(self.output_dir, pexels_filename)
156
  try:
157
  print(f"Searching Pexels for: '{query}'")
158
+ effective_query = " ".join(query.split()[:5]) # Use first 5 words for Pexels query
159
  params["query"] = effective_query
160
+
161
+ response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
162
  response.raise_for_status(); data = response.json()
163
+ if data.get("photos") and len(data["photos"]) > 0:
164
+ photo_url = data["photos"][0]["src"]["large2x"] # High quality
165
+ image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
166
  img_data = Image.open(io.BytesIO(image_response.content))
167
  if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
168
  img_data.save(filepath); print(f"Pexels image saved: {filepath}"); return filepath
169
+ else: print(f"No photos found on Pexels for query: '{effective_query}'")
170
+ except Exception as e: print(f"Pexels search/download error for query '{query}': {e}")
171
  return None
172
 
173
  def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename):
 
179
  print(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:120]}...")
180
  client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
181
  response = client.images.generate(
182
+ model=self.dalle_model, prompt=image_prompt_text, n=1,
183
+ size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid"
 
 
 
 
 
184
  )
185
  image_url = response.data[0].url
186
  revised_prompt = getattr(response.data[0], 'revised_prompt', None)
187
+ if revised_prompt: print(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
 
188
 
189
  image_response = requests.get(image_url, timeout=120)
190
  image_response.raise_for_status()
191
  img_data = Image.open(io.BytesIO(image_response.content))
192
+ if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
 
193
 
194
+ img_data.save(filepath); print(f"AI Image (DALL-E) saved: {filepath}"); return filepath
 
 
 
195
  except openai.RateLimitError as e:
196
  print(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s...")
197
  time.sleep(5 * (attempt + 1))
198
+ if attempt == max_retries - 1: print("Max retries for RateLimitError."); break
199
+ else: continue
200
+ except openai.APIError as e: print(f"OpenAI API Error: {e}"); break
201
+ except requests.exceptions.RequestException as e: print(f"Requests Error (DALL-E download): {e}"); break
202
+ except Exception as e: print(f"Generic error (DALL-E gen): {e}"); break
 
 
 
 
 
 
 
 
 
 
 
203
 
204
+ print("DALL-E generation failed. Trying Pexels fallback...")
205
+ pexels_query_text = scene_data.get('pexels_search_query_감독',
206
+ f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
 
207
  pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
208
+ if pexels_path: return pexels_path
 
209
 
210
  print("Pexels also failed/disabled. Using placeholder.")
211
  return self._create_placeholder_image_content(
212
  f"[AI/Pexels Failed] Original Prompt: {image_prompt_text[:100]}...",
213
  scene_identifier_filename, size=self.video_frame_size
214
  )
215
+ else:
216
  return self._create_placeholder_image_content(
217
  image_prompt_text, scene_identifier_filename, size=self.video_frame_size
218
  )
219
 
220
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
221
+ if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
222
+ # print("ElevenLabs not enabled, client not initialized, or no text. Skipping audio.") # Less verbose
223
+ return None
224
+
225
  audio_filepath = os.path.join(self.output_dir, output_filename)
226
  try:
227
  print(f"Generating ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
228
+
229
+ voice_param = self.elevenlabs_voice_id # Default to string ID
230
+ if Voice and self.elevenlabs_voice_settings: # Check if Voice & VoiceSettings were imported
231
+ voice_param = Voice(
232
+ voice_id=self.elevenlabs_voice_id,
233
+ settings=self.elevenlabs_voice_settings
234
+ )
235
+
236
+ audio_data_iterator = self.elevenlabs_client.generate(
237
+ text=text_to_narrate,
238
+ voice=voice_param,
239
+ model="eleven_multilingual_v2" # Or other models e.g. "eleven_turbo_v2"
240
+ )
241
+
242
+ with open(audio_filepath, "wb") as f:
243
+ for chunk in audio_data_iterator:
244
+ if chunk: f.write(chunk)
245
+
246
+ print(f"ElevenLabs audio saved: {audio_filepath}")
247
+ return audio_filepath
248
+ except AttributeError as ae:
249
+ print(f"AttributeError with ElevenLabs client (method name like 'generate' might differ): {ae}")
250
+ except Exception as e:
251
+ print(f"Error generating ElevenLabs audio: {e}")
252
  return None
253
 
254
  def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
255
+ if not image_data_list: print("No image data for video."); return None
256
+ # print(f"Creating video from {len(image_data_list)} image sets.") # Less verbose
257
+ processed_clips = []; narration_audio_clip = None; final_video_clip_obj = None
 
 
258
 
259
  for i, data in enumerate(image_data_list):
260
  img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
 
264
  if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
265
  img_copy = pil_img.copy()
266
  img_copy.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
267
+ canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,10), random.randint(0,10), random.randint(0,10)))
268
  xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
269
  canvas.paste(img_copy, (xo,yo))
270
  frame_np = np.array(canvas)
271
  img_clip = ImageClip(frame_np).set_duration(duration_per_image)
272
+ end_scale = random.uniform(1.05, 1.12)
273
  img_clip = img_clip.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
274
  img_clip = img_clip.set_position('center')
275
  if key_action:
 
281
  final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
282
  else: final_scene_clip = img_clip
283
  processed_clips.append(final_scene_clip)
284
+ except Exception as e: print(f"Error creating video clip for {img_path}: {e}.")
285
 
286
+ if not processed_clips: print("No clips processed for video."); return None
287
  transition = 0.8
288
  final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
289
  if final_video_clip_obj.duration > transition*2:
 
292
  if overall_narration_path and os.path.exists(overall_narration_path):
293
  try:
294
  narration_audio_clip = AudioFileClip(overall_narration_path)
295
+ current_video_duration = final_video_clip_obj.duration
296
+ # If narration is shorter, trim video. If narration is longer, audio will be cut by video duration.
297
+ if narration_audio_clip.duration < current_video_duration:
298
  final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
299
+
300
+ final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip)
301
+ print("Overall narration added to video.")
302
+ except Exception as e: print(f"Error adding overall narration: {e}.")
303
 
304
  output_path = os.path.join(self.output_dir, output_filename)
305
  try:
306
+ print(f"Writing final video to: {output_path}")
307
+ final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
308
  audio_codec='aac',
309
  temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
310
+ remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k") # Consider 'medium' preset
311
+ print(f"Video successfully created: {output_path}"); return output_path
312
+ except Exception as e: print(f"Error writing video file: {e}"); return None
313
  finally:
314
+ for c_item in processed_clips:
315
+ if hasattr(c_item, 'close'): c_item.close()
316
  if narration_audio_clip and hasattr(narration_audio_clip, 'close'): narration_audio_clip.close()
317
  if final_video_clip_obj and hasattr(final_video_clip_obj, 'close'): final_video_clip_obj.close()