File size: 43,172 Bytes
a48cea9 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 1c41bb9 1c22261 1c41bb9 3903b53 de2fdbb a48cea9 1c22261 a48cea9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 a48cea9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 1c41bb9 1c22261 1c41bb9 a48cea9 1c41bb9 a48cea9 1c22261 1c41bb9 a48cea9 1c41bb9 a48cea9 1c22261 a48cea9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 1c22261 e20b484 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 1c22261 1c41bb9 a48cea9 1c22261 1c41bb9 a48cea9 1c22261 a48cea9 1c22261 a48cea9 1c41bb9 a48cea9 1c22261 a48cea9 1c22261 1c41bb9 a48cea9 1c41bb9 a48cea9 1c22261 a48cea9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 a48cea9 1c22261 1c41bb9 1c22261 1c41bb9 3903b53 1c41bb9 1c22261 1c41bb9 8a6537e 1c41bb9 a48cea9 1c22261 a48cea9 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 a48cea9 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 1c41bb9 a48cea9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 1c41bb9 1c22261 a48cea9 a7374a3 1c22261 1c41bb9 1c22261 1c41bb9 a48cea9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 |
# app.py
import streamlit as st
import os
import logging
# --- Streamlit PermissionError Mitigation Attempts ---
if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
print("INFO: app.py - Disabled Streamlit client usage stats gathering via env var.")
if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ:
os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
print("INFO: app.py - Set STREAMLIT_BROWSER_GATHERUSAGESTATS to false.")
streamlit_home_path_app = "/app/.streamlit_cai_config"
if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
try:
os.makedirs(streamlit_home_path_app, exist_ok=True)
print(f"INFO: app.py - Set STREAMLIT_HOME to: {streamlit_home_path_app}")
except Exception as e_mkdir_sh:
print(f"WARNING: app.py - Could not create STREAMLIT_HOME '{streamlit_home_path_app}': {e_mkdir_sh}")
from core.gemini_handler import GeminiHandler
from core.visual_engine import VisualEngine
from core.prompt_engineering import (
create_cinematic_treatment_prompt,
construct_dalle_prompt,
construct_text_to_video_prompt_for_gen4,
create_narration_script_prompt_enhanced,
create_scene_regeneration_prompt,
create_visual_regeneration_prompt
)
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)'
)
logger = logging.getLogger(__name__)
SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
DEFAULT_SCENE_DURATION_SECS = 5
DEFAULT_SHOT_TYPE = "Director's Choice"
ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
def load_api_key(key_name_streamlit, key_name_env, service_name):
key_value = None; secrets_available = hasattr(st, 'secrets')
try:
if secrets_available and key_name_streamlit in st.secrets:
key_value = st.secrets.get(key_name_streamlit)
if key_value: logger.info(f"API Key for {service_name} found in Streamlit secrets.")
except Exception as e: logger.warning(f"Could not access st.secrets for {key_name_streamlit} ({service_name}): {e}")
if not key_value and key_name_env in os.environ:
key_value = os.environ.get(key_name_env)
if key_value: logger.info(f"API Key for {service_name} found in env var '{key_name_env}'.")
if not key_value: logger.warning(f"API Key for {service_name} (Key: {key_name_streamlit}/{key_name_env}) NOT FOUND.")
return key_value
if 'services_initialized_flag' not in st.session_state:
logger.info("APP_INIT: Initializing services and API keys...")
st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
except Exception as e: st.error(f"CRITICAL: Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
try:
el_default_voice = "Rachel"; el_resolved_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_default_voice
st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_resolved_voice_id)
st.session_state.visual_content_engine.set_openai_api_key(st.session_state.API_KEY_OPENAI)
st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
logger.info("VisualEngine initialized and API keys set.")
except Exception as e: st.error(f"CRITICAL: Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop()
st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")
# <<< CORRECTED SESSION STATE INITIALIZATION >>>
PROJECT_SESSION_STATE_DEFAULTS = {
'project_story_treatment_scenes_list': [],
'project_scene_generation_prompts_list': [],
'project_generated_assets_info_list': [],
'project_final_video_path': None,
'project_character_definitions_map': {},
'project_global_style_keywords_str': "",
'project_overall_narration_audio_path': None,
'project_narration_script_text': ""
}
for key_name, default_value in PROJECT_SESSION_STATE_DEFAULTS.items():
if key_name not in st.session_state:
st.session_state[key_name] = default_value
# <<< END CORRECTION >>>
def initialize_new_project_data_in_session():
# <<< USE CORRECTED KEYS HERE TOO >>>
st.session_state.project_story_treatment_scenes_list = []
st.session_state.project_scene_generation_prompts_list = []
st.session_state.project_generated_assets_info_list = []
st.session_state.project_final_video_path = None
st.session_state.project_overall_narration_audio_path = None # Was correct
st.session_state.project_narration_script_text = "" # Was correct
logger.info("PROJECT_DATA: New project data initialized.")
def generate_asset_for_scene_in_app(scene_idx_num, scene_data_obj, asset_ver_num=1, user_asset_type_choice_ui="Auto (Director's Choice)"):
logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {scene_idx_num}, ver {asset_ver_num}, user_type: {user_asset_type_choice_ui}")
final_decision_generate_as_video = False
gemini_suggested_type = scene_data_obj.get('suggested_asset_type_κ°λ
', 'image').lower()
if user_asset_type_choice_ui == "Image": final_decision_generate_as_video = False
elif user_asset_type_choice_ui == "Video Clip": final_decision_generate_as_video = True
elif user_asset_type_choice_ui == "Auto (Director's Choice)": final_decision_generate_as_video = (gemini_suggested_type == "video_clip")
logger.debug(f"APP: Final asset type decision: {'Video' if final_decision_generate_as_video else 'Image'}")
prompt_for_base_img_gen = construct_dalle_prompt(scene_data_obj, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
prompt_for_video_motion = ""
if final_decision_generate_as_video:
prompt_for_video_motion = construct_text_to_video_prompt_for_gen4(scene_data_obj, st.session_state.project_global_style_keywords_str)
if not prompt_for_video_motion: prompt_for_video_motion = scene_data_obj.get('video_clip_motion_description_κ°λ
', "subtle ambient cinematic motion"); logger.warning(f"S{scene_data_obj.get('scene_number', scene_idx_num+1)}: Empty motion prompt, default.")
if not prompt_for_base_img_gen: logger.error(f"Base image prompt construction failed for S{scene_data_obj.get('scene_number', scene_idx_num+1)}"); return False
while len(st.session_state.project_scene_generation_prompts_list) <= scene_idx_num: st.session_state.project_scene_generation_prompts_list.append("")
while len(st.session_state.project_generated_assets_info_list) <= scene_idx_num: st.session_state.project_generated_assets_info_list.append(None)
st.session_state.project_scene_generation_prompts_list[scene_idx_num] = prompt_for_video_motion if final_decision_generate_as_video else prompt_for_base_img_gen
filename_base_for_output_asset = f"scene_{scene_data_obj.get('scene_number', scene_idx_num+1)}_asset_v{asset_ver_num}"
duration_for_rwy_vid = scene_data_obj.get('video_clip_duration_estimate_secs_κ°λ
', scene_data_obj.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
if duration_for_rwy_vid <= 0 : duration_for_rwy_vid = DEFAULT_SCENE_DURATION_SECS
generated_asset_info_dict = st.session_state.visual_content_engine.generate_scene_asset(
image_generation_prompt_text=prompt_for_base_img_gen,
motion_prompt_text_for_video=prompt_for_video_motion,
scene_data_dict=scene_data_obj, # CORRECTED
scene_identifier_fn_base=filename_base_for_output_asset,
generate_as_video_clip_flag=final_decision_generate_as_video,
runway_target_dur_val=duration_for_rwy_vid
)
st.session_state.project_generated_assets_info_list[scene_idx_num] = generated_asset_info_dict
if generated_asset_info_dict and generated_asset_info_dict.get('prompt_used') and st.session_state.project_scene_generation_prompts_list[scene_idx_num] != generated_asset_info_dict['prompt_used']:
st.session_state.project_scene_generation_prompts_list[scene_idx_num] = generated_asset_info_dict['prompt_used']
if generated_asset_info_dict and not generated_asset_info_dict['error'] and generated_asset_info_dict.get('path') and os.path.exists(generated_asset_info_dict['path']):
logger.info(f"APP: Asset ({generated_asset_info_dict.get('type')}) generated for S{scene_data_obj.get('scene_number', scene_idx_num+1)}: {os.path.basename(generated_asset_info_dict['path'])}")
return True
else:
error_msg_from_asset_gen = generated_asset_info_dict.get('error_message', 'Unknown error') if generated_asset_info_dict else 'Asset result None'
logger.warning(f"APP: Asset gen FAILED S{scene_data_obj.get('scene_number', scene_idx_num+1)}. Type: {'Video' if final_decision_generate_as_video else 'Image'}. Err: {error_msg_from_asset_gen}")
current_prompt = st.session_state.project_scene_generation_prompts_list[scene_idx_num]
st.session_state.project_generated_assets_info_list[scene_idx_num] = {'path': None, 'type': 'none', 'error': True, 'error_message': error_msg_from_asset_gen, 'prompt_used': current_prompt}
return False
# --- Sidebar UI ---
with st.sidebar:
# (Logo and rest of sidebar UI - keeping variables distinct as in last app.py version)
# Make sure "assets/logo.png" exists in your repository
if os.path.exists("assets/logo.png"):
st.image("assets/logo.png", width=150)
else:
st.sidebar.markdown("## π¬ CineGen AI Ultra+") # Fallback title
logger.warning("assets/logo.png not found. Displaying text title instead.")
# ... (rest of sidebar UI as in the last full app.py version, with distinct variable names for UI elements) ...
st.markdown("### Creative Seed")
ui_user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=100, key="ui_user_idea_unique")
ui_genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="ui_genre_unique")
ui_mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="ui_mood_unique")
ui_num_scenes = st.slider("Number of Key Scenes:", 1, 10, 1, key="ui_num_scenes_unique")
ui_creative_guidance_map = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
ui_selected_guidance_key = st.selectbox("AI Creative Director Style:", options=list(ui_creative_guidance_map.keys()), key="ui_creative_guidance_unique")
ui_actual_guidance = ui_creative_guidance_map[ui_selected_guidance_key]
if st.button("π Generate Cinematic Treatment", type="primary", key="btn_generate_treatment_unique", use_container_width=True):
initialize_new_project_data_in_session() # Corrected function name
if not ui_user_idea.strip(): st.warning("Please provide a story idea.")
else:
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_operation:
try:
main_status_operation.write("Phase 1: Gemini crafting cinematic treatment... π"); logger.info("APP_MAIN_FLOW: Phase 1 - Treatment Gen.")
prompt_for_treatment = create_cinematic_treatment_prompt(ui_user_idea, ui_genre, ui_mood, ui_num_scenes, ui_actual_guidance)
list_of_raw_treatment_scenes = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_for_treatment)
if not isinstance(list_of_raw_treatment_scenes, list) or not list_of_raw_treatment_scenes: raise ValueError("Gemini returned invalid scene list format.")
temp_processed_scenes = []
for scene_data_gemini in list_of_raw_treatment_scenes:
gemini_dur_est = scene_data_gemini.get('video_clip_duration_estimate_secs_κ°λ
', 0)
scene_data_gemini['user_scene_duration_secs'] = gemini_dur_est if gemini_dur_est > 0 else DEFAULT_SCENE_DURATION_SECS
scene_data_gemini['user_shot_type'] = scene_data_gemini.get('PROACTIVE_camera_work_κ°λ
', DEFAULT_SHOT_TYPE)
scene_data_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"
temp_processed_scenes.append(scene_data_gemini)
st.session_state.project_story_treatment_scenes_list = temp_processed_scenes # Corrected key
num_scenes_generated = len(st.session_state.project_story_treatment_scenes_list) # Corrected key
st.session_state.project_scene_generation_prompts_list = [""]*num_scenes_generated # Corrected key
st.session_state.project_generated_assets_info_list = [None]*num_scenes_generated # Corrected key
logger.info(f"APP_MAIN_FLOW: Phase 1 complete. {num_scenes_generated} scenes."); main_status_operation.update(label="Treatment complete! β
Generating visual assets...", state="running")
main_status_operation.write("Phase 2: Creating visual assets..."); logger.info("APP_MAIN_FLOW: Phase 2 - Asset Gen.")
num_successful_assets = 0
for idx, scene_item_data in enumerate(st.session_state.project_story_treatment_scenes_list): # Corrected key
scene_num_log = scene_item_data.get('scene_number', idx+1)
main_status_operation.write(f" Processing asset for Scene {scene_num_log}..."); logger.info(f" APP_MAIN_FLOW: Processing asset for Scene {scene_num_log}.")
if generate_asset_for_scene_in_app(idx, scene_item_data, asset_ver_num=1): num_successful_assets += 1
status_label_p2 = "Visual assets generated! "; next_state_p2 = "running"
if num_successful_assets == 0 and num_scenes_generated > 0: logger.error("APP_MAIN_FLOW: Asset gen FAILED for all scenes."); status_label_p2 = "Asset gen FAILED for all scenes."; next_state_p2="error"; main_status_operation.update(label=status_label_p2, state=next_state_p2, expanded=True); st.stop()
elif num_successful_assets < num_scenes_generated: logger.warning(f"APP_MAIN_FLOW: Assets partially generated ({num_successful_assets}/{num_scenes_generated})."); status_label_p2 = f"Assets partially done ({num_successful_assets}/{num_scenes_generated}). "
main_status_operation.update(label=f"{status_label_p2}Generating narration...", state=next_state_p2)
if next_state_p2 == "error": st.stop()
main_status_operation.write("Phase 3: Generating narration script..."); logger.info("APP_MAIN_FLOW: Phase 3 - Narration Script.")
voice_style_narr = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
prompt_narr_script = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, ui_mood, ui_genre, voice_style_narr) # Corrected key
st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr_script) # Corrected key
logger.info("APP_MAIN_FLOW: Narration script generated."); main_status_operation.update(label="Narration script ready! Synthesizing voice...", state="running")
status_operation.write("Phase 4: Synthesizing voice (ElevenLabs)..."); logger.info("APP_MAIN_FLOW: Phase 4 - Voice Synthesis.")
st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text) # Corrected key
final_status_msg = "All initial components ready! Review storyboard. π"; final_op_status_val = "complete"
if not st.session_state.project_overall_narration_audio_path: final_status_msg = f"{status_label_p2}Storyboard ready (Voiceover failed/skipped)."; logger.warning("APP_MAIN_FLOW: Narration audio failed/skipped.") # Corrected key
else: logger.info("APP_MAIN_FLOW: Narration audio generated.")
status_operation.update(label=final_status_msg, state=final_op_status_val, expanded=False)
except ValueError as e_val: logger.error(f"APP_MAIN_FLOW: ValueError: {e_val}", exc_info=True); status_operation.update(label=f"Data/Response Error: {e_val}", state="error", expanded=True);
except TypeError as e_type: logger.error(f"APP_MAIN_FLOW: TypeError: {e_type}", exc_info=True); status_operation.update(label=f"Type Error: {e_type}", state="error", expanded=True);
except Exception as e_gen: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_gen}", exc_info=True); status_operation.update(label=f"Unexpected Error: {e_gen}", state="error", expanded=True);
with st.expander("Define Characters", expanded=False):
sb_char_name = st.text_input("Character Name", key="sb_char_name_unique_3"); sb_char_desc = st.text_area("Visual Description", key="sb_char_desc_unique_3", height=100)
if st.button("Save Character", key="sb_add_char_unique_3"):
if sb_char_name and sb_char_desc: st.session_state.project_character_definitions_map[sb_char_name.strip().lower()] = sb_char_desc.strip(); st.success(f"Character '{sb_char_name.strip()}' saved.") # Corrected key
else: st.warning("Name and description needed.")
if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()] # Corrected key
with st.expander("Global Style Overrides", expanded=False):
sb_style_presets = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."}
sb_selected_preset = st.selectbox("Base Style Preset:", options=list(sb_style_presets.keys()), key="sb_style_preset_unique_3")
sb_custom_keywords = st.text_area("Additional Custom Style Keywords:", key="sb_custom_style_unique_3", height=80)
sb_current_global_style = st.session_state.project_global_style_keywords_str # Corrected key
if st.button("Apply Global Styles", key="sb_apply_styles_unique_3"):
final_style = sb_style_presets[sb_selected_preset];
if sb_custom_keywords.strip(): final_style = f"{final_style}, {sb_custom_keywords.strip()}" if final_style else sb_custom_keywords.strip()
st.session_state.project_global_style_keywords_str = final_style.strip(); sb_current_global_style = final_style.strip() # Corrected key
if sb_current_global_style: st.success("Global styles applied!")
else: st.info("Global style additions cleared.")
if sb_current_global_style: st.caption(f"Active: \"{sb_current_global_style}\"")
with st.expander("Voice & Narration Style", expanded=False):
sb_engine_default_voice = "Rachel"
if hasattr(st.session_state, 'visual_content_engine') and st.session_state.visual_content_engine: sb_engine_default_voice = st.session_state.visual_content_engine.elevenlabs_voice_id
sb_user_voice_id = st.text_input("ElevenLabs Voice ID (override):", value=sb_engine_default_voice, key="sb_el_voice_id_override_unique_3")
sb_narration_styles = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
sb_selected_narr_style = st.selectbox("Narration Script Style:", list(sb_narration_styles.keys()), key="sb_narr_style_sel_unique_3", index=0)
if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_3"):
final_el_voice_id = sb_user_voice_id.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_el_voice_id
st.session_state.selected_voice_style_for_generation = sb_narration_styles[sb_selected_narr_style]
st.success(f"Narrator Voice ID: {final_el_voice_id}. Script Style: {sb_selected_narr_style}")
logger.info(f"User updated 11L Voice ID: {final_el_voice_id}, Narration Script Style: {sb_selected_narr_style}")
# --- Main Content Area ---
# <<< CHECK THIS KEY WHEN DISPLAYING >>>
st.header("π¬ Cinematic Storyboard & Treatment")
if st.session_state.project_narration_script_text:
with st.expander("π View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
# <<< CHECK THIS KEY FOR THE MAIN LOOP >>>
if not st.session_state.project_story_treatment_scenes_list:
st.info("Use the sidebar to generate your cinematic treatment.")
else:
# (Loop through project_story_treatment_scenes_list and display - ensure all st.session_state access uses corrected keys)
# ... (The rest of your main content display loop, ensure keys like
# st.session_state.project_scene_generation_prompts_list[i_loop_main_display]
# st.session_state.project_generated_assets_info_list[i_loop_main_display]
# are used correctly)
for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.project_story_treatment_scenes_list):
scene_num_for_display = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
scene_title_for_display_main = scene_content_item_display.get('scene_title', 'Untitled Scene')
key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_loop_{i_main_loop_content}" # Ensure unique keys
if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"π¬ Director Note S{scene_num_for_display}: {scene_content_item_display['director_note']}")
st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}"); main_col_treatment_area, main_col_visual_area = st.columns([0.45, 0.55])
with main_col_treatment_area:
with st.expander("π Scene Treatment & Controls", expanded=True):
st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_κ°λ
', 'N/A')}_"); st.markdown("---")
st.markdown("##### Shot, Pacing & Asset Controls")
ui_shot_type_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type'] = ui_shot_type_new
ui_duration_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs'] = ui_duration_new
ui_asset_type_override_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
except ValueError: ui_asset_type_idx_val = 0
ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type'] = ui_asset_type_override_new
st.markdown("---")
prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else None
if prompt_for_asset_to_display:
with st.popover("ποΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
pexels_query_to_display = scene_content_item_display.get('pexels_search_query_κ°λ
', None)
if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")
with main_col_visual_area:
current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_generated_assets_info_list) else None
if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
elif type_of_asset_for_display == 'video':
try:
with open(path_of_asset_for_display, 'rb') as vid_file_obj_read: video_bytes_for_st_video = vid_file_obj_read.read()
st.video(video_bytes_for_st_video, format="video/mp4", start_time=0); st.caption(f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
else:
if st.session_state.project_story_treatment_scenes_list: # Corrected key
error_msg_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
st.caption(error_msg_for_asset_display)
with st.popover(f"βοΈ Edit S{scene_num_for_display} Treatment"):
feedback_input_for_treatment_regen = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{widget_key_base_main_area}", height=150)
if st.button(f"π Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{widget_key_base_main_area}"):
if feedback_input_for_treatment_regen:
with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
preserved_user_shot_type = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_shot_type'] # Corrected
preserved_user_duration = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_scene_duration_secs'] # Corrected
preserved_user_asset_type = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_selected_asset_type'] # Corrected
prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_input_for_treatment_regen, st.session_state.project_story_treatment_scenes_list) # Corrected
try:
updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type
st.session_state.project_story_treatment_scenes_list[i_loop_main_display] = final_merged_updated_scene_data_pop # Corrected
status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
version_for_regenerated_asset_pop = 1
if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
except: version_for_regenerated_asset_pop = 2
if generate_asset_for_scene_in_app(i_loop_main_display, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_selected_asset_type_override=preserved_user_asset_type): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! π", state="complete", expanded=False)
else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
st.rerun()
except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
else: st.warning("Please provide feedback to update the treatment.")
with st.popover(f"π¨ Edit S{scene_num_for_display} Visual Prompt/Asset"):
prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_scene_generation_prompts_list) else "No prompt." # Corrected
st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
feedback_for_visual_asset_regen_input = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_{widget_key_base_main_area}", height=150)
if st.button(f"π Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{widget_key_base_main_area}"):
if feedback_for_visual_asset_regen_input:
with st.status(f"Refining prompt & asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_selected_asset_type'] # Corrected
is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_κ°λ
') == 'video_clip')
newly_constructed_asset_prompt_regen_pop = ""
if not is_video_type_for_regen_pop:
gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str) # Corrected
try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running") # Corrected
except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz_pop}", exc_info=True); continue
else:
newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running") # Corrected
if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction failed.", state="error"); continue
version_for_regenerated_visual_asset_pop = 1
if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
except: version_for_regenerated_visual_asset_pop = 2
if generate_asset_for_scene_in_app(i_loop_main_display, st.session_state.project_story_treatment_scenes_list[i_loop_main_display], asset_ver_num=version_for_regenerated_visual_asset_pop, user_selected_asset_type_override=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! π", state="complete", expanded=False) # Corrected
else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
st.rerun()
else: st.warning("Please provide feedback for visual asset regeneration.")
st.markdown("---")
if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None): # Corrected
if st.button("π¬ Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_2", type="primary", use_container_width=True):
with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op:
assets_for_final_video_assembly_list_main = []
for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes_list): # Corrected
asset_info_current_scene_for_vid = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None # Corrected
if asset_info_current_scene_for_vid and not asset_info_current_scene_for_vid.get('error') and asset_info_current_scene_for_vid.get('path') and os.path.exists(asset_info_current_scene_for_vid['path']):
assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid['path'], 'type': asset_info_current_scene_for_vid.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
status_video_assembly_final_op.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid.get('type')}).")
else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
if assets_for_final_video_assembly_list_main:
status_video_assembly_final_op.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24) # Corrected
if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op.update(label="Ultra animatic assembled! π", state="complete", expanded=False); st.balloons() # Corrected
else: status_video_assembly_final_op.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
else: status_video_assembly_final_op.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
elif st.session_state.project_story_treatment_scenes_list: st.info("Generate visual assets for your scenes before attempting to assemble the animatic.") # Corrected
if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): # Corrected
st.header("π¬ Generated Cinematic Animatic");
try:
with open(st.session_state.project_final_video_path, 'rb') as final_video_file_obj_display: final_video_bytes_for_display = final_video_file_obj_display.read() # Corrected
st.video(final_video_bytes_for_display, format="video/mp4")
st.download_button(label="Download Ultra Animatic", data=final_video_bytes_for_display, file_name=os.path.basename(st.session_state.project_final_video_path), mime="video/mp4", use_container_width=True, key="download_video_main_area_btn_final_unique_2" ) # Corrected
except Exception as e_final_video_display_op_main: st.error(f"Error displaying final animatic video: {e_final_video_display_op_main}"); logger.error(f"Error displaying final animatic video: {e_final_video_display_op_main}", exc_info=True)
st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production") |