Update app.py
Browse files
app.py
CHANGED
@@ -6,45 +6,34 @@ import logging
|
|
6 |
# --- Streamlit PermissionError Mitigation Attempts ---
|
7 |
if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
|
8 |
os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
|
9 |
-
|
10 |
-
if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ: # For newer versions
|
11 |
os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
|
12 |
-
|
13 |
-
streamlit_home_path_app = "/app/.streamlit_cai_config_v2" # Changed name slightly just in case
|
14 |
if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
|
15 |
os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
|
16 |
-
try:
|
17 |
-
|
18 |
-
# print(f"INFO: app.py - Set STREAMLIT_HOME to: {streamlit_home_path_app}")
|
19 |
-
except Exception as e_mkdir_sh:
|
20 |
-
print(f"WARNING: app.py - Could not create STREAMLIT_HOME '{streamlit_home_path_app}': {e_mkdir_sh}")
|
21 |
|
22 |
from core.gemini_handler import GeminiHandler
|
23 |
from core.visual_engine import VisualEngine
|
24 |
from core.prompt_engineering import (
|
25 |
-
create_cinematic_treatment_prompt,
|
26 |
-
|
27 |
-
|
28 |
-
create_narration_script_prompt_enhanced,
|
29 |
-
create_scene_regeneration_prompt,
|
30 |
-
create_visual_regeneration_prompt
|
31 |
)
|
32 |
|
33 |
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
|
34 |
-
logging.basicConfig(
|
35 |
-
level=logging.DEBUG,
|
36 |
-
format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)'
|
37 |
-
)
|
38 |
logger = logging.getLogger(__name__)
|
39 |
|
40 |
SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
|
41 |
DEFAULT_SCENE_DURATION_SECS = 5; DEFAULT_SHOT_TYPE = "Director's Choice"; ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
|
42 |
|
43 |
-
def load_api_key(key_name_st, key_name_e, service_n):
|
44 |
key_val = None; secrets_avail = hasattr(st, 'secrets')
|
45 |
try:
|
46 |
if secrets_avail and key_name_st in st.secrets: key_val = st.secrets.get(key_name_st);
|
47 |
-
if key_val: logger.info(f"API Key for {service_n} found in
|
48 |
except Exception as e: logger.warning(f"No st.secrets for {key_name_st} ({service_n}): {e}")
|
49 |
if not key_val and key_name_e in os.environ: key_val = os.environ.get(key_name_e);
|
50 |
if key_val: logger.info(f"API Key for {service_n} found in env var '{key_name_e}'.")
|
@@ -53,15 +42,17 @@ def load_api_key(key_name_st, key_name_e, service_n): # Renamed for clarity
|
|
53 |
|
54 |
if 'services_initialized_flag' not in st.session_state:
|
55 |
logger.info("APP_INIT: Initializing services and API keys...")
|
|
|
56 |
st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
|
57 |
st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
|
58 |
st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
|
59 |
st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
|
60 |
st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
|
61 |
st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
|
|
|
62 |
if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
|
63 |
try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
|
64 |
-
except Exception as e: st.error(f"CRITICAL:
|
65 |
try:
|
66 |
el_def_voice = "Rachel"; el_res_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_def_voice
|
67 |
st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_res_voice_id)
|
@@ -69,8 +60,8 @@ if 'services_initialized_flag' not in st.session_state:
|
|
69 |
st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
|
70 |
st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
|
71 |
st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
|
72 |
-
logger.info("VisualEngine initialized and
|
73 |
-
except Exception as e: st.error(f"CRITICAL:
|
74 |
st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")
|
75 |
|
76 |
PROJECT_SS_DEFAULTS = {'project_story_treatment_scenes_list':[],'project_scene_generation_prompts_list':[],'project_generated_assets_info_list':[],'project_final_video_path':None,'project_character_definitions_map':{},'project_global_style_keywords_str':"",'project_overall_narration_audio_path':None,'project_narration_script_text':""}
|
@@ -83,9 +74,9 @@ def initialize_new_project_data_in_session():
|
|
83 |
logger.info("PROJECT_DATA: New project data re-initialized.")
|
84 |
|
85 |
def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_ui="Auto (Director's Choice)"):
|
86 |
-
# (
|
87 |
logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {sc_idx}, ver {asset_v}, user_type: {user_asset_type_ui}")
|
88 |
-
gen_as_vid_final = False; gemini_sugg_type = sc_data.get('suggested_asset_type_κ°λ
','image').lower()
|
89 |
if user_asset_type_ui=="Image": gen_as_vid_final=False
|
90 |
elif user_asset_type_ui=="Video Clip": gen_as_vid_final=True
|
91 |
elif user_asset_type_ui=="Auto (Director's Choice)": gen_as_vid_final=(gemini_sugg_type=="video_clip")
|
@@ -93,13 +84,13 @@ def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_
|
|
93 |
prompt_base_img = construct_dalle_prompt(sc_data,st.session_state.project_character_definitions_map,st.session_state.project_global_style_keywords_str)
|
94 |
prompt_motion_vid = ""
|
95 |
if gen_as_vid_final: prompt_motion_vid=construct_text_to_video_prompt_for_gen4(sc_data,st.session_state.project_global_style_keywords_str) or sc_data.get('video_clip_motion_description_κ°λ
',"subtle motion")
|
96 |
-
if not prompt_base_img: logger.error(f"Base
|
97 |
while len(st.session_state.project_scene_generation_prompts_list)<=sc_idx:st.session_state.project_scene_generation_prompts_list.append("")
|
98 |
while len(st.session_state.project_generated_assets_info_list)<=sc_idx:st.session_state.project_generated_assets_info_list.append(None)
|
99 |
st.session_state.project_scene_generation_prompts_list[sc_idx]=prompt_motion_vid if gen_as_vid_final else prompt_base_img
|
100 |
fn_base_asset=f"scene_{sc_data.get('scene_number',sc_idx+1)}_asset_v{asset_v}"
|
101 |
rwy_dur=sc_data.get('video_clip_duration_estimate_secs_κ°λ
',sc_data.get('user_scene_duration_secs',DEFAULT_SCENE_DURATION_SECS));rwy_dur=max(1,rwy_dur)
|
102 |
-
asset_res_dict=st.session_state.visual_content_engine.generate_scene_asset(image_generation_prompt_text=prompt_base_img,motion_prompt_text_for_video=prompt_motion_vid,scene_data_dict=sc_data,scene_identifier_fn_base=fn_base_asset,generate_as_video_clip_flag=gen_as_vid_final,runway_target_dur_val=rwy_dur)
|
103 |
st.session_state.project_generated_assets_info_list[sc_idx]=asset_res_dict
|
104 |
if asset_res_dict and asset_res_dict.get('prompt_used')and st.session_state.project_scene_generation_prompts_list[sc_idx]!=asset_res_dict['prompt_used']:st.session_state.project_scene_generation_prompts_list[sc_idx]=asset_res_dict['prompt_used']
|
105 |
if asset_res_dict and not asset_res_dict['error']and asset_res_dict.get('path')and os.path.exists(asset_res_dict['path']):logger.info(f"APP: Asset ({asset_res_dict.get('type')}) OK S{sc_data.get('scene_number',sc_idx+1)}:{os.path.basename(asset_res_dict['path'])}");return True
|
@@ -109,228 +100,210 @@ with st.sidebar: # Sidebar UI
|
|
109 |
if os.path.exists("assets/logo.png"): st.image("assets/logo.png", width=150)
|
110 |
else: st.sidebar.markdown("## π¬ CineGen AI Ultra+"); logger.warning("assets/logo.png not found.")
|
111 |
st.markdown("### Creative Seed")
|
112 |
-
sb_user_idea = st.text_area("Core Idea:", "Lone wanderer, mythical oasis, post-apocalyptic desert, mirages, mechanical scavengers.", height=100, key="
|
113 |
-
sb_genre = st.selectbox("Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="
|
114 |
-
sb_mood = st.selectbox("Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical"], index=0, key="
|
115 |
-
sb_num_scenes = st.slider("Key Scenes:", 1, 10, 1, key="
|
116 |
sb_guidance_opts = {"Standard": "standard", "Artistic": "more_artistic", "Experimental": "experimental_narrative"}
|
117 |
-
sb_guidance_key = st.selectbox("AI Director Style:", list(sb_guidance_opts.keys()), key="
|
118 |
sb_actual_guidance = sb_guidance_opts[sb_guidance_key]
|
119 |
|
120 |
-
if st.button("π Generate Cinematic Treatment", type="primary", key="
|
121 |
initialize_new_project_data_in_session()
|
122 |
if not sb_user_idea.strip(): st.warning("Please provide a story idea.")
|
123 |
else:
|
124 |
-
|
125 |
-
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_operation:
|
126 |
try:
|
127 |
-
|
128 |
prompt_treat = create_cinematic_treatment_prompt(sb_user_idea, sb_genre, sb_mood, sb_num_scenes, sb_actual_guidance)
|
129 |
raw_treat_list = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_treat)
|
130 |
if not isinstance(raw_treat_list, list) or not raw_treat_list: raise ValueError("Gemini invalid scene list.")
|
131 |
-
|
132 |
init_scenes = []
|
133 |
for scene_gemini in raw_treat_list:
|
134 |
-
gem_dur = scene_gemini.get('video_clip_duration_estimate_secs_κ°λ
', 0)
|
135 |
-
scene_gemini['
|
136 |
-
scene_gemini['user_shot_type'] = scene_gemini.get('PROACTIVE_camera_work_κ°λ
', DEFAULT_SHOT_TYPE)
|
137 |
-
scene_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"
|
138 |
-
init_scenes.append(scene_gemini)
|
139 |
st.session_state.project_story_treatment_scenes_list = init_scenes
|
140 |
-
|
141 |
-
num_gen_sc =
|
142 |
-
|
143 |
-
st.session_state.project_generated_assets_info_list = [None]*num_gen_sc
|
144 |
-
logger.info(f"APP: P1 done. {num_gen_sc} scenes."); main_status_operation.update(label="Treatment complete! β
Generating assets...", state="running")
|
145 |
-
|
146 |
-
main_status_operation.write("Phase 2: Creating assets..."); logger.info("APP: P2 - Asset Gen.")
|
147 |
success_assets = 0
|
148 |
for i, scene_item in enumerate(st.session_state.project_story_treatment_scenes_list):
|
149 |
-
sc_num_log = scene_item.get('scene_number', i+1)
|
150 |
-
main_status_operation.write(f" Asset for Scene {sc_num_log}..."); logger.info(f" APP: Asset S{sc_num_log}.")
|
151 |
if generate_asset_for_scene_in_app(i, scene_item, asset_v=1): success_assets += 1
|
152 |
-
|
153 |
lbl_p2 = "Assets generated! "; next_state = "running"
|
154 |
-
if success_assets == 0 and num_gen_sc > 0: logger.error("APP: Asset gen FAIL all."); lbl_p2 = "Asset gen FAIL all."; next_state="error";
|
155 |
elif success_assets < num_gen_sc: logger.warning(f"APP: Assets partial ({success_assets}/{num_gen_sc})."); lbl_p2 = f"Assets partial ({success_assets}/{num_gen_sc}). "
|
156 |
-
|
157 |
if next_state == "error": st.stop()
|
158 |
-
|
159 |
-
main_status_operation.write("Phase 3: Narration script..."); logger.info("APP: P3 - Narration Script.") # Corrected var
|
160 |
voice_style = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
|
161 |
prompt_narr = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, sb_mood, sb_genre, voice_style)
|
162 |
st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr)
|
163 |
-
logger.info("APP: Narration script OK.");
|
164 |
-
|
165 |
-
main_status_operation.write("Phase 4: Synthesizing voice..."); logger.info("APP: P4 - Voice Synth.") # <<< CORRECTED VARIABLE NAME >>>
|
166 |
st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
|
167 |
-
|
168 |
final_lbl = "All components ready! Review storyboard. π"; final_state = "complete"
|
169 |
if not st.session_state.project_overall_narration_audio_path: final_lbl = f"{lbl_p2}Storyboard ready (Voiceover failed)."; logger.warning("APP: Narration audio fail.")
|
170 |
else: logger.info("APP: Narration audio OK.")
|
171 |
-
|
172 |
-
|
173 |
-
except
|
174 |
-
except
|
175 |
-
except Exception as e_unhandled_main_flow: # Renamed e_gen
|
176 |
-
logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_unhandled_main_flow}", exc_info=True)
|
177 |
-
main_status_operation.update(label=f"Unexpected Error: {e_unhandled_main_flow}", state="error", expanded=True) # <<< CORRECTED VARIABLE NAME >>>
|
178 |
|
179 |
-
# (Sidebar Fine-Tuning Options - ensure unique keys and correct session state access)
|
180 |
-
# ... (Characters, Global Style, Voice sections - use project_character_definitions_map, etc.)
|
181 |
with st.expander("Define Characters", expanded=False):
|
182 |
-
|
183 |
-
if st.button("Save Character", key="
|
184 |
-
if
|
185 |
else: st.warning("Name and description needed.")
|
186 |
if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()]
|
187 |
with st.expander("Global Style Overrides", expanded=False):
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
if st.button("Apply Global Styles", key="
|
193 |
-
|
194 |
-
if
|
195 |
-
st.session_state.project_global_style_keywords_str =
|
196 |
-
if
|
197 |
else: st.info("Global styles cleared.")
|
198 |
-
if
|
199 |
with st.expander("Voice & Narration Style", expanded=False):
|
200 |
-
|
201 |
-
if hasattr(st.session_state, 'visual_content_engine')
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
if st.button("Set Narrator Voice & Style", key="
|
206 |
-
|
207 |
-
if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id =
|
208 |
-
st.session_state.selected_voice_style_for_generation =
|
209 |
-
st.success(f"Narrator Voice
|
210 |
-
logger.info(f"User updated 11L Voice ID: {
|
211 |
|
212 |
-
# --- Main Content Area ---
|
213 |
st.header("π¬ Cinematic Storyboard & Treatment")
|
214 |
if st.session_state.project_narration_script_text:
|
215 |
with st.expander("π View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
|
216 |
|
217 |
if not st.session_state.project_story_treatment_scenes_list: st.info("Use the sidebar to generate your cinematic treatment.")
|
218 |
else:
|
219 |
-
for
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
|
|
|
|
|
|
228 |
with st.expander("π Scene Treatment & Controls", expanded=True):
|
229 |
-
# (Display scene
|
230 |
-
st.markdown(f"**Beat:** {
|
231 |
st.markdown("##### Shot, Pacing & Asset Controls")
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
if new_asset_type_ui != curr_asset_type_ui: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type'] = new_asset_type_ui
|
246 |
st.markdown("---")
|
247 |
-
|
248 |
-
if
|
249 |
-
with st.popover("ποΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used:**"); st.code(
|
250 |
-
|
251 |
-
if
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
if
|
256 |
-
|
257 |
-
if
|
258 |
-
elif
|
259 |
try:
|
260 |
-
with open(
|
261 |
-
st.video(
|
262 |
-
except Exception as
|
263 |
-
else: st.warning(f"Unknown asset type '{
|
264 |
else:
|
265 |
if st.session_state.project_story_treatment_scenes_list:
|
266 |
-
|
267 |
-
st.caption(
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
if st.button(f"π Update S{
|
272 |
-
if
|
273 |
-
with st.status(f"Updating S{
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
try:
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
st.session_state.project_story_treatment_scenes_list[
|
283 |
-
|
284 |
-
|
285 |
-
if
|
286 |
-
try:
|
287 |
-
except:
|
288 |
-
if generate_asset_for_scene_in_app(
|
289 |
-
else:
|
290 |
st.rerun()
|
291 |
-
except Exception as
|
292 |
-
else: st.warning("Please provide feedback
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
st.caption("Current Asset Generation Prompt:"); st.code(
|
297 |
-
|
298 |
-
if st.button(f"π Update S{
|
299 |
-
if
|
300 |
-
with st.status(f"Refining prompt & asset for S{
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
if not
|
305 |
-
|
306 |
-
try:
|
307 |
-
except Exception as
|
308 |
else:
|
309 |
-
|
310 |
-
if not
|
311 |
-
|
312 |
-
if
|
313 |
-
try:
|
314 |
-
except:
|
315 |
-
if generate_asset_for_scene_in_app(
|
316 |
-
else:
|
317 |
st.rerun()
|
318 |
else: st.warning("Please provide feedback for visual asset regeneration.")
|
319 |
st.markdown("---")
|
320 |
|
321 |
-
if st.session_state.project_story_treatment_scenes_list and any(
|
322 |
if st.button("π¬ Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_3", type="primary", use_container_width=True):
|
323 |
with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op_main:
|
324 |
-
|
325 |
-
for
|
326 |
-
|
327 |
-
if
|
328 |
-
|
329 |
-
status_video_assembly_final_op_main.write(f"Adding S{
|
330 |
-
else: logger.warning(f"Skipping S{
|
331 |
-
if
|
332 |
status_video_assembly_final_op_main.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
|
333 |
-
st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=
|
334 |
if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op_main.update(label="Ultra animatic assembled! π", state="complete", expanded=False); st.balloons()
|
335 |
else: status_video_assembly_final_op_main.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
|
336 |
else: status_video_assembly_final_op_main.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
|
|
|
6 |
# --- Streamlit PermissionError Mitigation Attempts ---
|
7 |
if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
|
8 |
os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
|
9 |
+
if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ:
|
|
|
10 |
os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
|
11 |
+
streamlit_home_path_app = "/app/.streamlit_cai_config_v2"
|
|
|
12 |
if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
|
13 |
os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
|
14 |
+
try: os.makedirs(streamlit_home_path_app, exist_ok=True)
|
15 |
+
except Exception: pass # Ignore if fails, Dockerfile ENV is primary
|
|
|
|
|
|
|
16 |
|
17 |
from core.gemini_handler import GeminiHandler
|
18 |
from core.visual_engine import VisualEngine
|
19 |
from core.prompt_engineering import (
|
20 |
+
create_cinematic_treatment_prompt, construct_dalle_prompt,
|
21 |
+
construct_text_to_video_prompt_for_gen4, create_narration_script_prompt_enhanced,
|
22 |
+
create_scene_regeneration_prompt, create_visual_regeneration_prompt
|
|
|
|
|
|
|
23 |
)
|
24 |
|
25 |
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
|
26 |
+
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)')
|
|
|
|
|
|
|
27 |
logger = logging.getLogger(__name__)
|
28 |
|
29 |
SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
|
30 |
DEFAULT_SCENE_DURATION_SECS = 5; DEFAULT_SHOT_TYPE = "Director's Choice"; ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
|
31 |
|
32 |
+
def load_api_key(key_name_st, key_name_e, service_n):
|
33 |
key_val = None; secrets_avail = hasattr(st, 'secrets')
|
34 |
try:
|
35 |
if secrets_avail and key_name_st in st.secrets: key_val = st.secrets.get(key_name_st);
|
36 |
+
if key_val: logger.info(f"API Key for {service_n} found in St secrets.")
|
37 |
except Exception as e: logger.warning(f"No st.secrets for {key_name_st} ({service_n}): {e}")
|
38 |
if not key_val and key_name_e in os.environ: key_val = os.environ.get(key_name_e);
|
39 |
if key_val: logger.info(f"API Key for {service_n} found in env var '{key_name_e}'.")
|
|
|
42 |
|
43 |
if 'services_initialized_flag' not in st.session_state:
|
44 |
logger.info("APP_INIT: Initializing services and API keys...")
|
45 |
+
# (API Key Loading as before)
|
46 |
st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
|
47 |
st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
|
48 |
st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
|
49 |
st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
|
50 |
st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
|
51 |
st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
|
52 |
+
|
53 |
if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
|
54 |
try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
|
55 |
+
except Exception as e: st.error(f"CRITICAL: GeminiHandler init fail: {e}"); logger.critical(f"GeminiHandler init fail: {e}", exc_info=True); st.stop()
|
56 |
try:
|
57 |
el_def_voice = "Rachel"; el_res_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_def_voice
|
58 |
st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_res_voice_id)
|
|
|
60 |
st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
|
61 |
st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
|
62 |
st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
|
63 |
+
logger.info("VisualEngine initialized and keys set.")
|
64 |
+
except Exception as e: st.error(f"CRITICAL: VisualEngine init fail: {e}"); logger.critical(f"VisualEngine init fail: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop()
|
65 |
st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")
|
66 |
|
67 |
PROJECT_SS_DEFAULTS = {'project_story_treatment_scenes_list':[],'project_scene_generation_prompts_list':[],'project_generated_assets_info_list':[],'project_final_video_path':None,'project_character_definitions_map':{},'project_global_style_keywords_str':"",'project_overall_narration_audio_path':None,'project_narration_script_text':""}
|
|
|
74 |
logger.info("PROJECT_DATA: New project data re-initialized.")
|
75 |
|
76 |
def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_ui="Auto (Director's Choice)"):
|
77 |
+
# (Logic as in previous app.py, ensuring it uses scene_data_dict for visual_engine call)
|
78 |
logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {sc_idx}, ver {asset_v}, user_type: {user_asset_type_ui}")
|
79 |
+
gen_as_vid_final = False; gemini_sugg_type = sc_data.get('suggested_asset_type_κ°λ
', 'image').lower()
|
80 |
if user_asset_type_ui=="Image": gen_as_vid_final=False
|
81 |
elif user_asset_type_ui=="Video Clip": gen_as_vid_final=True
|
82 |
elif user_asset_type_ui=="Auto (Director's Choice)": gen_as_vid_final=(gemini_sugg_type=="video_clip")
|
|
|
84 |
prompt_base_img = construct_dalle_prompt(sc_data,st.session_state.project_character_definitions_map,st.session_state.project_global_style_keywords_str)
|
85 |
prompt_motion_vid = ""
|
86 |
if gen_as_vid_final: prompt_motion_vid=construct_text_to_video_prompt_for_gen4(sc_data,st.session_state.project_global_style_keywords_str) or sc_data.get('video_clip_motion_description_κ°λ
',"subtle motion")
|
87 |
+
if not prompt_base_img: logger.error(f"Base image prompt construction failed for S{sc_data.get('scene_number',sc_idx+1)}"); return False
|
88 |
while len(st.session_state.project_scene_generation_prompts_list)<=sc_idx:st.session_state.project_scene_generation_prompts_list.append("")
|
89 |
while len(st.session_state.project_generated_assets_info_list)<=sc_idx:st.session_state.project_generated_assets_info_list.append(None)
|
90 |
st.session_state.project_scene_generation_prompts_list[sc_idx]=prompt_motion_vid if gen_as_vid_final else prompt_base_img
|
91 |
fn_base_asset=f"scene_{sc_data.get('scene_number',sc_idx+1)}_asset_v{asset_v}"
|
92 |
rwy_dur=sc_data.get('video_clip_duration_estimate_secs_κ°λ
',sc_data.get('user_scene_duration_secs',DEFAULT_SCENE_DURATION_SECS));rwy_dur=max(1,rwy_dur)
|
93 |
+
asset_res_dict=st.session_state.visual_content_engine.generate_scene_asset(image_generation_prompt_text=prompt_base_img,motion_prompt_text_for_video=prompt_motion_vid,scene_data_dict=sc_data,scene_identifier_fn_base=fn_base_asset,generate_as_video_clip_flag=gen_as_vid_final,runway_target_dur_val=rwy_dur) # Uses scene_data_dict
|
94 |
st.session_state.project_generated_assets_info_list[sc_idx]=asset_res_dict
|
95 |
if asset_res_dict and asset_res_dict.get('prompt_used')and st.session_state.project_scene_generation_prompts_list[sc_idx]!=asset_res_dict['prompt_used']:st.session_state.project_scene_generation_prompts_list[sc_idx]=asset_res_dict['prompt_used']
|
96 |
if asset_res_dict and not asset_res_dict['error']and asset_res_dict.get('path')and os.path.exists(asset_res_dict['path']):logger.info(f"APP: Asset ({asset_res_dict.get('type')}) OK S{sc_data.get('scene_number',sc_idx+1)}:{os.path.basename(asset_res_dict['path'])}");return True
|
|
|
100 |
if os.path.exists("assets/logo.png"): st.image("assets/logo.png", width=150)
|
101 |
else: st.sidebar.markdown("## π¬ CineGen AI Ultra+"); logger.warning("assets/logo.png not found.")
|
102 |
st.markdown("### Creative Seed")
|
103 |
+
sb_user_idea = st.text_area("Core Idea:", "Lone wanderer, mythical oasis, post-apocalyptic desert, mirages, mechanical scavengers.", height=100, key="sb_user_idea_unique")
|
104 |
+
sb_genre = st.selectbox("Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="sb_genre_unique")
|
105 |
+
sb_mood = st.selectbox("Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical"], index=0, key="sb_mood_unique")
|
106 |
+
sb_num_scenes = st.slider("Key Scenes:", 1, 10, 1, key="sb_num_scenes_unique")
|
107 |
sb_guidance_opts = {"Standard": "standard", "Artistic": "more_artistic", "Experimental": "experimental_narrative"}
|
108 |
+
sb_guidance_key = st.selectbox("AI Director Style:", list(sb_guidance_opts.keys()), key="sb_guidance_unique")
|
109 |
sb_actual_guidance = sb_guidance_opts[sb_guidance_key]
|
110 |
|
111 |
+
if st.button("π Generate Cinematic Treatment", type="primary", key="sb_btn_gen_treat_unique", use_container_width=True):
|
112 |
initialize_new_project_data_in_session()
|
113 |
if not sb_user_idea.strip(): st.warning("Please provide a story idea.")
|
114 |
else:
|
115 |
+
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_op: # Renamed this for clarity
|
|
|
116 |
try:
|
117 |
+
main_status_op.write("Phase 1: Crafting treatment... π"); logger.info("APP: P1 - Treatment Gen.")
|
118 |
prompt_treat = create_cinematic_treatment_prompt(sb_user_idea, sb_genre, sb_mood, sb_num_scenes, sb_actual_guidance)
|
119 |
raw_treat_list = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_treat)
|
120 |
if not isinstance(raw_treat_list, list) or not raw_treat_list: raise ValueError("Gemini invalid scene list.")
|
|
|
121 |
init_scenes = []
|
122 |
for scene_gemini in raw_treat_list:
|
123 |
+
gem_dur = scene_gemini.get('video_clip_duration_estimate_secs_κ°λ
', 0); scene_gemini['user_scene_duration_secs'] = gem_dur if gem_dur > 0 else DEFAULT_SCENE_DURATION_SECS
|
124 |
+
scene_gemini['user_shot_type'] = scene_gemini.get('PROACTIVE_camera_work_κ°λ
', DEFAULT_SHOT_TYPE); scene_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"; init_scenes.append(scene_gemini)
|
|
|
|
|
|
|
125 |
st.session_state.project_story_treatment_scenes_list = init_scenes
|
126 |
+
num_gen_sc = len(init_scenes); st.session_state.project_scene_generation_prompts_list = [""]*num_gen_sc; st.session_state.project_generated_assets_info_list = [None]*num_gen_sc
|
127 |
+
logger.info(f"APP: P1 done. {num_gen_sc} scenes."); main_status_op.update(label="Treatment complete! β
Generating assets...", state="running")
|
128 |
+
main_status_op.write("Phase 2: Creating assets..."); logger.info("APP: P2 - Asset Gen.")
|
|
|
|
|
|
|
|
|
129 |
success_assets = 0
|
130 |
for i, scene_item in enumerate(st.session_state.project_story_treatment_scenes_list):
|
131 |
+
sc_num_log = scene_item.get('scene_number', i+1); main_status_op.write(f" Asset for Scene {sc_num_log}..."); logger.info(f" APP: Asset S{sc_num_log}.")
|
|
|
132 |
if generate_asset_for_scene_in_app(i, scene_item, asset_v=1): success_assets += 1
|
|
|
133 |
lbl_p2 = "Assets generated! "; next_state = "running"
|
134 |
+
if success_assets == 0 and num_gen_sc > 0: logger.error("APP: Asset gen FAIL all."); lbl_p2 = "Asset gen FAIL all."; next_state="error"; main_status_op.update(label=lbl_p2, state=next_state, expanded=True); st.stop()
|
135 |
elif success_assets < num_gen_sc: logger.warning(f"APP: Assets partial ({success_assets}/{num_gen_sc})."); lbl_p2 = f"Assets partial ({success_assets}/{num_gen_sc}). "
|
136 |
+
main_status_op.update(label=f"{lbl_p2}Generating narration...", state=next_state)
|
137 |
if next_state == "error": st.stop()
|
138 |
+
main_status_op.write("Phase 3: Narration script..."); logger.info("APP: P3 - Narration Script.")
|
|
|
139 |
voice_style = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
|
140 |
prompt_narr = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, sb_mood, sb_genre, voice_style)
|
141 |
st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr)
|
142 |
+
logger.info("APP: Narration script OK."); main_status_op.update(label="Narration ready! Synthesizing voice...", state="running")
|
143 |
+
main_status_op.write("Phase 4: Synthesizing voice..."); logger.info("APP: P4 - Voice Synth.")
|
|
|
144 |
st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
|
|
|
145 |
final_lbl = "All components ready! Review storyboard. π"; final_state = "complete"
|
146 |
if not st.session_state.project_overall_narration_audio_path: final_lbl = f"{lbl_p2}Storyboard ready (Voiceover failed)."; logger.warning("APP: Narration audio fail.")
|
147 |
else: logger.info("APP: Narration audio OK.")
|
148 |
+
main_status_op.update(label=final_lbl, state=final_state, expanded=False)
|
149 |
+
except ValueError as e_val_main: logger.error(f"APP: ValueError: {e_val_main}", exc_info=True); main_status_op.update(label=f"Data/Response Error: {e_val_main}", state="error", expanded=True);
|
150 |
+
except TypeError as e_type_main: logger.error(f"APP: TypeError: {e_type_main}", exc_info=True); main_status_op.update(label=f"Type Error: {e_type_main}", state="error", expanded=True);
|
151 |
+
except Exception as e_unhandled_main_flow: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_unhandled_main_flow}", exc_info=True); main_status_op.update(label=f"Unexpected Error: {e_unhandled_main_flow}", state="error", expanded=True);
|
|
|
|
|
|
|
152 |
|
|
|
|
|
153 |
with st.expander("Define Characters", expanded=False):
|
154 |
+
sb_char_name = st.text_input("Character Name", key="sb_char_name_unique_char_main"); sb_char_desc = st.text_area("Visual Description", key="sb_char_desc_unique_char_main", height=100)
|
155 |
+
if st.button("Save Character", key="sb_add_char_unique_char_main"):
|
156 |
+
if sb_char_name and sb_char_desc: st.session_state.project_character_definitions_map[sb_char_name.strip().lower()] = sb_char_desc.strip(); st.success(f"Char '{sb_char_name.strip()}' saved.")
|
157 |
else: st.warning("Name and description needed.")
|
158 |
if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()]
|
159 |
with st.expander("Global Style Overrides", expanded=False):
|
160 |
+
sb_style_presets = { "Default": "", "Noir": "gritty neo-noir...", "Fantasy": "epic fantasy...", "Sci-Fi": "analog sci-fi..."}
|
161 |
+
sb_selected_preset = st.selectbox("Base Style Preset:", list(sb_style_presets.keys()), key="sb_style_preset_unique_global_main")
|
162 |
+
sb_custom_keywords = st.text_area("Additional Custom Keywords:", key="sb_custom_style_unique_global_main", height=80)
|
163 |
+
sb_current_global_style = st.session_state.project_global_style_keywords_str
|
164 |
+
if st.button("Apply Global Styles", key="sb_apply_styles_unique_global_main"):
|
165 |
+
final_style = sb_style_presets[sb_selected_preset];
|
166 |
+
if sb_custom_keywords.strip(): final_style = f"{final_style}, {sb_custom_keywords.strip()}" if final_style else sb_custom_keywords.strip()
|
167 |
+
st.session_state.project_global_style_keywords_str = final_style.strip(); sb_current_global_style = final_style.strip()
|
168 |
+
if sb_current_global_style: st.success("Global styles applied!")
|
169 |
else: st.info("Global styles cleared.")
|
170 |
+
if sb_current_global_style: st.caption(f"Active: \"{sb_current_global_style}\"")
|
171 |
with st.expander("Voice & Narration Style", expanded=False):
|
172 |
+
sb_engine_default_voice = "Rachel"
|
173 |
+
if hasattr(st.session_state, 'visual_content_engine'): sb_engine_default_voice = st.session_state.visual_content_engine.elevenlabs_voice_id
|
174 |
+
sb_user_voice_id = st.text_input("11L Voice ID (override):", value=sb_engine_default_voice, key="sb_el_voice_id_override_unique_global_main")
|
175 |
+
sb_narration_styles = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
|
176 |
+
sb_selected_narr_style = st.selectbox("Narration Script Style:", list(sb_narration_styles.keys()), key="sb_narr_style_sel_unique_global_main", index=0)
|
177 |
+
if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_global_main"):
|
178 |
+
final_el_voice_id = sb_user_voice_id.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
|
179 |
+
if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_el_voice_id
|
180 |
+
st.session_state.selected_voice_style_for_generation = sb_narration_styles[sb_selected_narr_style]
|
181 |
+
st.success(f"Narrator Voice: {final_el_voice_id}. Script Style: {sb_selected_narr_style}")
|
182 |
+
logger.info(f"User updated 11L Voice ID: {final_el_voice_id}, Narr Style: {sb_selected_narr_style}")
|
183 |
|
|
|
184 |
st.header("π¬ Cinematic Storyboard & Treatment")
|
185 |
if st.session_state.project_narration_script_text:
|
186 |
with st.expander("π View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
|
187 |
|
188 |
if not st.session_state.project_story_treatment_scenes_list: st.info("Use the sidebar to generate your cinematic treatment.")
|
189 |
else:
|
190 |
+
for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.project_story_treatment_scenes_list):
|
191 |
+
scene_num_for_display = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
|
192 |
+
scene_title_for_display_main = scene_content_item_display.get('scene_title', 'Untitled Scene')
|
193 |
+
key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_loop_v2_{i_main_loop_content}" # Ensure unique keys
|
194 |
+
|
195 |
+
if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"π¬ Director Note S{scene_num_for_display}: {scene_content_item_display['director_note']}")
|
196 |
+
st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}")
|
197 |
+
|
198 |
+
# <<< CORRECTED COLUMN VARIABLE NAMES >>>
|
199 |
+
treatment_display_col, visual_display_col = st.columns([0.45, 0.55])
|
200 |
+
|
201 |
+
with treatment_display_col: # Use the correctly defined variable
|
202 |
with st.expander("π Scene Treatment & Controls", expanded=True):
|
203 |
+
# (Display textual scene details)
|
204 |
+
st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_κ°λ
', 'N/A')}_"); st.markdown("---")
|
205 |
st.markdown("##### Shot, Pacing & Asset Controls")
|
206 |
+
ui_shot_type_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
|
207 |
+
try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
|
208 |
+
except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
|
209 |
+
ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
|
210 |
+
if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type'] = ui_shot_type_new
|
211 |
+
ui_duration_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
|
212 |
+
ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
|
213 |
+
if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs'] = ui_duration_new
|
214 |
+
ui_asset_type_override_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
|
215 |
+
try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
|
216 |
+
except ValueError: ui_asset_type_idx_val = 0
|
217 |
+
ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
|
218 |
+
if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type'] = ui_asset_type_override_new
|
|
|
219 |
st.markdown("---")
|
220 |
+
prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else None
|
221 |
+
if prompt_for_asset_to_display:
|
222 |
+
with st.popover("ποΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
|
223 |
+
pexels_query_to_display = scene_content_item_display.get('pexels_search_query_κ°λ
', None)
|
224 |
+
if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")
|
225 |
+
|
226 |
+
with visual_display_col: # <<< CORRECTED: Use the correctly defined variable name >>>
|
227 |
+
current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_generated_assets_info_list) else None
|
228 |
+
if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
|
229 |
+
path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
|
230 |
+
if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
|
231 |
+
elif type_of_asset_for_display == 'video':
|
232 |
try:
|
233 |
+
with open(path_of_asset_for_display, 'rb') as vid_file_obj_read: video_bytes_for_st_video = vid_file_obj_read.read()
|
234 |
+
st.video(video_bytes_for_st_video, format="video/mp4", start_time=0); st.caption(f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
|
235 |
+
except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
|
236 |
+
else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
|
237 |
else:
|
238 |
if st.session_state.project_story_treatment_scenes_list:
|
239 |
+
error_msg_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
|
240 |
+
st.caption(error_msg_for_asset_display)
|
241 |
+
|
242 |
+
with st.popover(f"βοΈ Edit S{scene_num_for_display} Treatment"):
|
243 |
+
feedback_input_for_treatment_regen = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{key_base_main_area_widgets}", height=150)
|
244 |
+
if st.button(f"π Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{key_base_main_area_widgets}"):
|
245 |
+
if feedback_input_for_treatment_regen:
|
246 |
+
with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
|
247 |
+
preserved_user_shot_type = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type']
|
248 |
+
preserved_user_duration = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs']
|
249 |
+
preserved_user_asset_type = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type']
|
250 |
+
prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_input_for_treatment_regen, st.session_state.project_story_treatment_scenes_list)
|
251 |
try:
|
252 |
+
updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
|
253 |
+
final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
|
254 |
+
final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type
|
255 |
+
st.session_state.project_story_treatment_scenes_list[i_main_loop_content] = final_merged_updated_scene_data_pop
|
256 |
+
status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
|
257 |
+
version_for_regenerated_asset_pop = 1
|
258 |
+
if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
|
259 |
+
try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
|
260 |
+
except: version_for_regenerated_asset_pop = 2
|
261 |
+
if generate_asset_for_scene_in_app(i_main_loop_content, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_asset_type_ui=preserved_user_asset_type): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! π", state="complete", expanded=False)
|
262 |
+
else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
|
263 |
st.rerun()
|
264 |
+
except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
|
265 |
+
else: st.warning("Please provide feedback to update the treatment.")
|
266 |
+
|
267 |
+
with st.popover(f"π¨ Edit S{scene_num_for_display} Visual Prompt/Asset"):
|
268 |
+
prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else "No prompt."
|
269 |
+
st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
|
270 |
+
feedback_for_visual_asset_regen_input = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_{key_base_main_area_widgets}", height=150)
|
271 |
+
if st.button(f"π Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{key_base_main_area_widgets}"):
|
272 |
+
if feedback_for_visual_asset_regen_input:
|
273 |
+
with st.status(f"Refining prompt & asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
|
274 |
+
user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type']
|
275 |
+
is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_κ°λ
') == 'video_clip')
|
276 |
+
newly_constructed_asset_prompt_regen_pop = ""
|
277 |
+
if not is_video_type_for_regen_pop:
|
278 |
+
gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
|
279 |
+
try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_main_loop_content] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running")
|
280 |
+
except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz_pop}", exc_info=True); continue
|
281 |
else:
|
282 |
+
newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_main_loop_content] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
|
283 |
+
if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction failed.", state="error"); continue
|
284 |
+
version_for_regenerated_visual_asset_pop = 1
|
285 |
+
if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
|
286 |
+
try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
|
287 |
+
except: version_for_regenerated_visual_asset_pop = 2
|
288 |
+
if generate_asset_for_scene_in_app(i_main_loop_content, st.session_state.project_story_treatment_scenes_list[i_main_loop_content], asset_ver_num=version_for_regenerated_visual_asset_pop, user_asset_type_ui=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! π", state="complete", expanded=False)
|
289 |
+
else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
|
290 |
st.rerun()
|
291 |
else: st.warning("Please provide feedback for visual asset regeneration.")
|
292 |
st.markdown("---")
|
293 |
|
294 |
+
if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None):
|
295 |
if st.button("π¬ Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_3", type="primary", use_container_width=True):
|
296 |
with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op_main:
|
297 |
+
assets_for_final_video_assembly_list_main = []
|
298 |
+
for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes_list):
|
299 |
+
asset_info_current_scene_for_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None
|
300 |
+
if asset_info_current_scene_for_vid_main and not asset_info_current_scene_for_vid_main.get('error') and asset_info_current_scene_for_vid_main.get('path') and os.path.exists(asset_info_current_scene_for_vid_main['path']):
|
301 |
+
assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid_main['path'], 'type': asset_info_current_scene_for_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
|
302 |
+
status_video_assembly_final_op_main.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid_main.get('type')}).")
|
303 |
+
else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
|
304 |
+
if assets_for_final_video_assembly_list_main:
|
305 |
status_video_assembly_final_op_main.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
|
306 |
+
st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
|
307 |
if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op_main.update(label="Ultra animatic assembled! π", state="complete", expanded=False); st.balloons()
|
308 |
else: status_video_assembly_final_op_main.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
|
309 |
else: status_video_assembly_final_op_main.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
|