File size: 40,070 Bytes
a48cea9 3903b53 a48cea9 3903b53 a48cea9 de2fdbb 857e0f9 de2fdbb 3903b53 a48cea9 3903b53 de2fdbb a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 e20b484 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 8a6537e 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 3903b53 a48cea9 a7374a3 3903b53 a48cea9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 |
# app.py
import streamlit as st
from core.gemini_handler import GeminiHandler
from core.visual_engine import VisualEngine
from core.prompt_engineering import (
create_cinematic_treatment_prompt,
construct_dalle_prompt,
construct_text_to_video_prompt_for_gen4,
create_narration_script_prompt_enhanced,
create_scene_regeneration_prompt,
create_visual_regeneration_prompt
)
import os
import logging
# --- Mitigate Streamlit PermissionError on Hugging Face Spaces ---
# Streamlit tries to write to ~/.streamlit or /.streamlit.
# By setting global. ΟΟΟΞ΅SIMIT_CONFIG_DIR or specific telemetry flags,
# we can sometimes avoid this. A common fix is to disable telemetry.
# However, the error specifically mentions '/.streamlit', which is root.
# Setting STREAMLIT_HOME in Dockerfile is a more robust fix for this specific path.
# If that's not enough, disabling usage stats collection is a good fallback.
if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
# Also, ensure Streamlit doesn't try to create a global config dir if HOME is weird
if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"): # Common for HF
os.environ["STREAMLIT_HOME"] = "/app/.streamlit_config" # Use a writable path within the app dir
# --- Configuration & Initialization ---
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# --- Global Definitions ---
SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
DEFAULT_SCENE_DURATION_SECS = 5
DEFAULT_SHOT_TYPE = "Director's Choice"
ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
def load_api_key(key_name_streamlit, key_name_env, service_name):
key = None; secrets_available = hasattr(st, 'secrets')
try:
if secrets_available and key_name_streamlit in st.secrets:
key = st.secrets[key_name_streamlit]
if key: logger.info(f"{service_name} API Key found in Streamlit secrets.")
except Exception as e: logger.warning(f"Could not access st.secrets for {key_name_streamlit}: {e}")
if not key and key_name_env in os.environ:
key = os.environ[key_name_env]
if key: logger.info(f"{service_name} API Key found in environment variable.")
if not key: logger.warning(f"{service_name} API Key NOT FOUND for {service_name}. Related features may be disabled or use fallbacks.")
return key
if 'services_initialized' not in st.session_state:
logger.info("Initializing services and API keys for the first time this session...")
st.session_state.GEMINI_API_KEY = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
st.session_state.OPENAI_API_KEY = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
st.session_state.ELEVENLABS_API_KEY = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
st.session_state.PEXELS_API_KEY = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
st.session_state.ELEVENLABS_VOICE_ID_CONFIG = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
st.session_state.RUNWAY_API_KEY = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
if not st.session_state.GEMINI_API_KEY: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
try: st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY); logger.info("GeminiHandler initialized.")
except Exception as e: st.error(f"Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
try:
default_voice_id_el = "Rachel"; configured_voice_id_el = st.session_state.ELEVENLABS_VOICE_ID_CONFIG or default_voice_id_el
st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=configured_voice_id_el)
st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY, voice_id_from_secret=st.session_state.ELEVENLABS_VOICE_ID_CONFIG)
st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
st.session_state.visual_engine.set_runway_api_key(st.session_state.RUNWAY_API_KEY)
logger.info("VisualEngine initialized and API keys set.")
except Exception as e: st.error(f"Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue.")
st.session_state.services_initialized = True; logger.info("Service initialization complete.")
for key_ss, default_val_ss in [ ('story_treatment_scenes', []), ('scene_generation_prompts', []), ('generated_scene_assets_info', []), ('video_path', None), ('character_definitions', {}), ('global_style_additions', ""), ('overall_narration_audio_path', None), ('narration_script_display', "")]:
if key_ss not in st.session_state: st.session_state[key_ss] = default_val_ss
def initialize_new_project_state():
st.session_state.story_treatment_scenes = []; st.session_state.scene_generation_prompts = []; st.session_state.generated_scene_assets_info = []
st.session_state.video_path, st.session_state.overall_narration_audio_path, st.session_state.narration_script_display = None, None, ""
logger.info("New project state initialized.")
def generate_asset_for_scene_wrapper(scene_idx, scene_data_item_for_asset, version_num=1, user_selected_asset_type_override="Auto (Director's Choice)"): # Renamed scene_data
generate_as_video_clip_final = False
gemini_suggested_asset_type = scene_data_item_for_asset.get('suggested_asset_type_κ°λ
', 'image').lower()
if user_selected_asset_type_override == "Image": generate_as_video_clip_final = False
elif user_selected_asset_type_override == "Video Clip": generate_as_video_clip_final = True
elif user_selected_asset_type_override == "Auto (Director's Choice)": generate_as_video_clip_final = (gemini_suggested_asset_type == "video_clip")
image_gen_prompt_text = construct_dalle_prompt(scene_data_item_for_asset, st.session_state.character_definitions, st.session_state.global_style_additions)
motion_gen_prompt_text = ""
if generate_as_video_clip_final:
motion_gen_prompt_text = construct_text_to_video_prompt_for_gen4(scene_data_item_for_asset, st.session_state.global_style_additions)
if not motion_gen_prompt_text: motion_gen_prompt_text = scene_data_item_for_asset.get('video_clip_motion_description_κ°λ
', "subtle ambient motion"); logger.warning(f"S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}: Empty motion prompt, using default.")
if not image_gen_prompt_text: logger.error(f"Base image prompt construction failed for S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}"); return False
while len(st.session_state.scene_generation_prompts) <= scene_idx: st.session_state.scene_generation_prompts.append("")
while len(st.session_state.generated_scene_assets_info) <= scene_idx: st.session_state.generated_scene_assets_info.append(None)
st.session_state.scene_generation_prompts[scene_idx] = motion_gen_prompt_text if generate_as_video_clip_final else image_gen_prompt_text
filename_base_for_asset = f"scene_{scene_data_item_for_asset.get('scene_number', scene_idx+1)}_asset_v{version_num}"
runway_dur_for_scene = scene_data_item_for_asset.get('video_clip_duration_estimate_secs_κ°λ
', scene_data_item_for_asset.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
if runway_dur_for_scene <= 0 : runway_dur_for_scene = DEFAULT_SCENE_DURATION_SECS
asset_result_dict = st.session_state.visual_engine.generate_scene_asset(
image_generation_prompt_text=image_gen_prompt_text,
motion_prompt_text_for_video=motion_gen_prompt_text,
scene_data_dict=scene_data_item_for_asset, # <<< CHANGED 'scene_data' TO 'scene_data_dict'
scene_identifier_fn_base=filename_base_for_asset,
generate_as_video_clip_flag=generate_as_video_clip_final, # Renamed for clarity
runway_target_dur_val=runway_dur_for_scene # Renamed for clarity
)
st.session_state.generated_scene_assets_info[scene_idx] = asset_result_dict
if asset_result_dict and asset_result_dict.get('prompt_used'): st.session_state.scene_generation_prompts[scene_idx] = asset_result_dict['prompt_used']
if asset_result_dict and not asset_result_dict['error'] and asset_result_dict.get('path') and os.path.exists(asset_result_dict['path']):
logger.info(f"Asset ({asset_result_dict.get('type')}) generated for S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}: {os.path.basename(asset_result_dict['path'])}")
return True
else:
err_msg_asset = asset_result_dict.get('error_message', 'Unknown error') if asset_result_dict else 'Asset result is None'
logger.warning(f"Asset gen FAILED for S{scene_data_item_for_asset.get('scene_number', scene_idx+1)}. Type: {'Video' if generate_as_video_clip_final else 'Image'}. Err: {err_msg_asset}")
if not st.session_state.generated_scene_assets_info[scene_idx] or not st.session_state.generated_scene_assets_info[scene_idx]['error']:
st.session_state.generated_scene_assets_info[scene_idx] = {'path': None, 'type': 'none', 'error': True, 'error_message': err_msg_asset, 'prompt_used': st.session_state.scene_generation_prompts[scene_idx]}
return False
# --- UI Sidebar ---
with st.sidebar:
st.title("π¬ CineGen AI Ultra+")
st.markdown("### Creative Seed")
user_idea_input = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main_v5_sb_unique")
genre_selection = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main_v5_sb_unique")
mood_selection = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main_v5_sb_unique")
num_scenes_slider = st.slider("Number of Key Scenes:", 1, 10, 1, key="num_scenes_main_v5_sb_unique") # Default to 1 for faster testing
creative_guidance_options_map = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
selected_creative_guidance_key_sb = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options_map.keys()), key="creative_guidance_select_v5_sb_unique")
actual_creative_guidance_val = creative_guidance_options_map[selected_creative_guidance_key_sb]
if st.button("π Generate Cinematic Treatment", type="primary", key="generate_treatment_btn_v5_sb_unique", use_container_width=True):
initialize_new_project_state()
if not user_idea_input.strip(): st.warning("Please provide a story idea.")
else:
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_op_main:
try:
status_op_main.write("Phase 1: Gemini crafting cinematic treatment... π"); logger.info("Phase 1: Cinematic Treatment Gen.")
treatment_gen_prompt_gemini = create_cinematic_treatment_prompt(user_idea_input, genre_selection, mood_selection, num_scenes_slider, actual_creative_guidance_val)
raw_treatment_result_list = st.session_state.gemini_handler.generate_story_breakdown(treatment_gen_prompt_gemini)
if not isinstance(raw_treatment_result_list, list) or not raw_treatment_result_list: raise ValueError("Gemini returned invalid scene list format.")
processed_scene_list_init = []
for scene_from_gemini_init in raw_treatment_result_list:
scene_from_gemini_init['user_shot_type'] = scene_from_gemini_init.get('PROACTIVE_camera_work_κ°λ
', DEFAULT_SHOT_TYPE)
gemini_dur_est_init = scene_from_gemini_init.get('video_clip_duration_estimate_secs_κ°λ
', 0)
scene_from_gemini_init['user_scene_duration_secs'] = gemini_dur_est_init if gemini_dur_est_init > 0 else DEFAULT_SCENE_DURATION_SECS
scene_from_gemini_init['user_selected_asset_type'] = "Auto (Director's Choice)"
processed_scene_list_init.append(scene_from_gemini_init)
st.session_state.story_treatment_scenes = processed_scene_list_init
num_generated_scenes_val = len(st.session_state.story_treatment_scenes)
st.session_state.scene_generation_prompts = [""]*num_generated_scenes_val; st.session_state.generated_scene_assets_info = [None]*num_generated_scenes_val
logger.info(f"Phase 1 complete. {num_generated_scenes_val} scenes."); status_op_main.update(label="Treatment complete! β
Generating visuals...", state="running")
status_op_main.write("Phase 2: Creating visual assets (Image/Video)... πΌοΈπ¬"); logger.info("Phase 2: Visual Asset Gen.")
successful_asset_count_val = 0
for i_scene_init, scene_data_item_init in enumerate(st.session_state.story_treatment_scenes):
scene_num_display_init = scene_data_item_init.get('scene_number', i_scene_init+1)
status_op_main.write(f" Asset for Scene {scene_num_display_init}..."); logger.info(f" Processing asset for Scene {scene_num_display_init}.")
if generate_asset_for_scene_wrapper(i_scene_init, scene_data_item_init, version_num=1): successful_asset_count_val += 1
status_label_phase2_val = "Visual assets ready! "; next_op_state_val = "running"
if successful_asset_count_val == 0 and num_generated_scenes_val > 0:
logger.error("Asset gen failed for all scenes."); status_label_phase2_val = "Asset gen FAILED for all scenes."; next_op_state_val="error";
status_op_main.update(label=status_label_phase2_val, state=next_op_state_val, expanded=True); st.stop()
elif successful_asset_count_val < num_generated_scenes_val: logger.warning(f"Assets partially generated ({successful_asset_count_val}/{num_generated_scenes_val})."); status_label_phase2_val = f"Assets partially generated ({successful_asset_count_val}/{num_generated_scenes_val}). "
status_op_main.update(label=f"{status_label_phase2_val}Generating narration script...", state=next_op_state_val)
if next_op_state_val == "error": st.stop()
status_op_main.write("Phase 3: Generating narration script..."); logger.info("Phase 3: Narration Script Gen.")
voice_style_for_narration_prompt_val = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
narration_gen_prompt_val = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood_selection, genre_selection, voice_style_for_narration_prompt_val)
st.session_state.narration_script_display = st.session_state.gemini_handler.generate_image_prompt(narration_gen_prompt_val)
logger.info("Narration script generated."); status_op_main.update(label="Narration script ready! Synthesizing voice...", state="running")
status_op_main.write("Phase 4: Synthesizing voice (ElevenLabs)... π"); logger.info("Phase 4: Voice Synthesis.")
st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(st.session_state.narration_script_display)
final_status_label_val = "All components ready! Storyboard below. π"; final_op_state_val = "complete"
if not st.session_state.overall_narration_audio_path: final_status_label_val = f"{status_label_phase2_val}Storyboard ready (Voiceover skipped or failed)."; logger.warning("Voiceover generation skipped/failed.")
else: logger.info("Voiceover generated successfully.")
status_op_main.update(label=final_status_label_val, state=final_op_state_val, expanded=False)
except ValueError as ve_err_main: logger.error(f"ValueError in main gen: {ve_err_main}", exc_info=True); status_op_main.update(label=f"Input/Gemini response error: {ve_err_main}", state="error", expanded=True);
except Exception as e_unhandled_main: logger.error(f"Unhandled Exception in main gen: {e_unhandled_main}", exc_info=True); status_op_main.update(label=f"Unexpected error: {e_unhandled_main}", state="error", expanded=True);
with st.expander("Define Characters", expanded=False):
# ... (Keep as before)
char_name_input_sidebar = st.text_input("Character Name", key="char_name_sidebar_unique"); char_desc_input_sidebar = st.text_area("Visual Description", key="char_desc_sidebar_unique", height=100)
if st.button("Save Character", key="add_char_sidebar_unique"):
if char_name_input_sidebar and char_desc_input_sidebar: st.session_state.character_definitions[char_name_input_sidebar.strip().lower()] = char_desc_input_sidebar.strip(); st.success(f"Char '{char_name_input_sidebar.strip()}' saved.")
else: st.warning("Name and description needed.")
if st.session_state.character_definitions: st.caption("Current Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.character_definitions.items()]
with st.expander("Global Style Overrides", expanded=False):
# ... (Keep as before)
style_presets_dict_sidebar = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."}
selected_style_key_sidebar = st.selectbox("Base Style Preset:", options=list(style_presets_dict_sidebar.keys()), key="style_preset_sidebar_unique")
custom_kw_input_sidebar = st.text_area("Additional Custom Style Keywords:", key="custom_style_sidebar_unique", height=80)
current_global_style_val_sidebar = st.session_state.global_style_additions
if st.button("Apply Global Styles", key="apply_styles_sidebar_unique"):
final_style_str_sidebar = style_presets_dict_sidebar[selected_style_key_sidebar];
if custom_kw_input_sidebar.strip(): final_style_str_sidebar = f"{final_style_str_sidebar}, {custom_kw_input_sidebar.strip()}" if final_style_str_sidebar else custom_kw_input_sidebar.strip()
st.session_state.global_style_additions = final_style_str_sidebar.strip(); current_global_style_val_sidebar = final_style_str_sidebar.strip()
if current_global_style_val_sidebar: st.success("Global styles applied!")
else: st.info("Global style additions cleared.")
if current_global_style_val_sidebar: st.caption(f"Active: \"{current_global_style_val_sidebar}\"")
with st.expander("Voice & Narration Style", expanded=False):
# ... (Keep as before)
engine_default_voice_val = "Rachel"
if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine: engine_default_voice_val = st.session_state.visual_engine.elevenlabs_voice_id
user_voice_id_override_input = st.text_input("ElevenLabs Voice ID (override):", value=engine_default_voice_val, key="el_voice_id_sidebar_unique")
narration_styles_map_sidebar = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
selected_narration_style_key_sidebar = st.selectbox("Narration Script Style:", list(narration_styles_map_sidebar.keys()), key="narr_style_sidebar_unique", index=0)
if st.button("Set Narrator Voice & Style", key="set_voice_btn_sidebar_unique"):
final_voice_id_el_sidebar = user_voice_id_override_input.strip() or st.session_state.get("ELEVENLABS_VOICE_ID_CONFIG", "Rachel")
if hasattr(st.session_state, 'visual_engine'): st.session_state.visual_engine.elevenlabs_voice_id = final_voice_id_el_sidebar
st.session_state.selected_voice_style_for_generation = narration_styles_map_sidebar[selected_narration_style_key_sidebar]
st.success(f"Narrator Voice ID: {final_voice_id_el_sidebar}. Script Style: {selected_narration_style_key_sidebar}")
logger.info(f"User updated 11L Voice ID: {final_voice_id_el_sidebar}, Script Style: {selected_narration_style_key_sidebar}")
# --- Main Content Area ---
st.header("π¬ Cinematic Storyboard & Treatment")
if st.session_state.narration_script_display:
with st.expander("π View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.narration_script_display}_")
if not st.session_state.story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
else:
for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.story_treatment_scenes): # Renamed loop variables
scene_num_display_val = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
scene_title_display_val = scene_content_item_display.get('scene_title', 'Untitled Scene')
key_base_for_widgets_main = f"s{scene_num_display_val}_{''.join(filter(str.isalnum, scene_title_display_val[:10]))}_main_content_{i_main_loop_content}" # Renamed for uniqueness
if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"π¬ Director Note S{scene_num_display_val}: {scene_content_item_display['director_note']}")
st.subheader(f"SCENE {scene_num_display_val}: {scene_title_display_val.upper()}"); col_treatment_main, col_visual_main = st.columns([0.45, 0.55])
with col_treatment_main:
with st.expander("π Scene Treatment & Controls", expanded=True):
# (Display textual scene details - as before)
st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_κ°λ
', 'N/A')}_"); st.markdown("---")
st.markdown("##### Shot, Pacing & Asset Controls")
# (Shot Type, Scene Duration, Asset Type Override selectboxes - as before)
current_user_shot_type_widget = st.session_state.story_treatment_scenes[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
try: shot_type_idx_widget = SHOT_TYPES_OPTIONS.index(current_user_shot_type_widget)
except ValueError: shot_type_idx_widget = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
new_user_shot_type_widget = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=shot_type_idx_widget, key=f"shot_type_widget_{key_base_for_widgets_main}")
if new_user_shot_type_widget != current_user_shot_type_widget: st.session_state.story_treatment_scenes[i_main_loop_content]['user_shot_type'] = new_user_shot_type_widget
current_user_duration_widget = st.session_state.story_treatment_scenes[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
new_user_duration_widget = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=current_user_duration_widget, step=1, key=f"duration_widget_{key_base_for_widgets_main}")
if new_user_duration_widget != current_user_duration_widget: st.session_state.story_treatment_scenes[i_main_loop_content]['user_scene_duration_secs'] = new_user_duration_widget
current_user_asset_type_widget = st.session_state.story_treatment_scenes[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
try: asset_type_idx_widget = ASSET_TYPE_OPTIONS.index(current_user_asset_type_widget)
except ValueError: asset_type_idx_widget = 0
new_user_asset_type_widget = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=asset_type_idx_widget, key=f"asset_type_sel_{key_base_for_widgets_main}", help="Choose asset type. 'Auto' uses AI suggestion.")
if new_user_asset_type_widget != current_user_asset_type_widget: st.session_state.story_treatment_scenes[i_main_loop_content]['user_selected_asset_type'] = new_user_asset_type_widget
st.markdown("---")
prompt_for_current_asset_display = st.session_state.scene_generation_prompts[i_main_loop_content] if i_main_loop_content < len(st.session_state.scene_generation_prompts) else None
if prompt_for_current_asset_display:
with st.popover("ποΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used:**"); st.code(prompt_for_current_asset_display, language='text')
pexels_query_display_val = scene_content_item_display.get('pexels_search_query_κ°λ
', None)
if pexels_query_display_val: st.caption(f"Pexels Fallback: `{pexels_query_display_val}`")
with col_visual_main:
# (Display logic for different asset types - as before)
asset_info_for_display = st.session_state.generated_scene_assets_info[i_main_loop_content] if i_main_loop_content < len(st.session_state.generated_scene_assets_info) else None
if asset_info_for_display and not asset_info_for_display.get('error') and asset_info_for_display.get('path') and os.path.exists(asset_info_for_display['path']):
path_to_asset_display = asset_info_for_display['path']; type_of_asset_display = asset_info_for_display.get('type', 'image')
if type_of_asset_display == 'image': st.image(path_to_asset_display, caption=f"S{scene_num_display_val} ({type_of_asset_display}): {scene_title_display_val}")
elif type_of_asset_display == 'video':
try:
with open(path_to_asset_display, 'rb') as vf_read_main: video_bytes_content_main = vf_read_main.read()
st.video(video_bytes_content_main, format="video/mp4", start_time=0); st.caption(f"S{scene_num_display_val} ({type_of_asset_display}): {scene_title_display_val}")
except Exception as e_vid_disp_main: st.error(f"Error displaying video {path_to_asset_display}: {e_vid_disp_main}"); logger.error(f"Error displaying video: {e_vid_disp_main}", exc_info=True)
else: st.warning(f"Unknown asset type '{type_of_asset_display}' for S{scene_num_display_val}.")
else:
if st.session_state.story_treatment_scenes:
error_msg_display_main = asset_info_for_display.get('error_message', 'Visual pending/failed.') if asset_info_for_display else 'Visual pending/failed.'
st.caption(error_msg_display_main)
with st.popover(f"βοΈ Edit S{scene_num_display_val} Treatment"):
feedback_for_treatment_input = st.text_area("Changes to treatment:", key=f"treat_fb_{key_base_for_widgets_main}", height=150)
if st.button(f"π Update S{scene_num_display_val} Treatment", key=f"regen_treat_btn_{key_base_for_widgets_main}"):
if feedback_for_treatment_input:
with st.status(f"Updating S{scene_num_display_val} Treatment & Asset...", expanded=True) as status_treat_regen_main:
user_shot_type_pref_treat = st.session_state.story_treatment_scenes[i_main_loop_content]['user_shot_type']
user_duration_pref_treat = st.session_state.story_treatment_scenes[i_main_loop_content]['user_scene_duration_secs']
user_asset_type_pref_treat = st.session_state.story_treatment_scenes[i_main_loop_content]['user_selected_asset_type']
regen_prompt_gemini_treat = create_scene_regeneration_prompt(scene_content_item_display, feedback_for_treatment_input, st.session_state.story_treatment_scenes)
try:
updated_scene_data_from_gemini_treat = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_gemini_treat)
final_updated_scene_data_treat = {**updated_scene_data_from_gemini_treat}
final_updated_scene_data_treat['user_shot_type'] = user_shot_type_pref_treat; final_updated_scene_data_treat['user_scene_duration_secs'] = user_duration_pref_treat; final_updated_scene_data_treat['user_selected_asset_type'] = user_asset_type_pref_treat
st.session_state.story_treatment_scenes[i_main_loop_content] = final_updated_scene_data_treat
status_treat_regen_main.update(label="Treatment updated! Regenerating asset...", state="running")
version_num_asset_treat = 1
if asset_info_for_display and asset_info_for_display.get('path') and os.path.exists(asset_info_for_display['path']):
try: base_fn_treat,_=os.path.splitext(os.path.basename(asset_info_for_display['path'])); version_num_asset_treat = int(base_fn_treat.split('_v')[-1])+1 if '_v' in base_fn_treat else 2
except: version_num_asset_treat = 2
if generate_asset_for_scene_wrapper(i_main_loop_content, final_updated_scene_data_treat, version_num=version_num_asset_treat, user_selected_asset_type_override=user_asset_type_pref_treat): status_treat_regen_main.update(label="Treatment & Asset Updated! π", state="complete", expanded=False)
else: status_treat_regen_main.update(label="Treatment updated, asset failed.", state="complete", expanded=False)
st.rerun()
except Exception as e_treat_regen_loop: status_treat_regen_main.update(label=f"Error: {e_treat_regen_loop}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_loop}", exc_info=True)
else: st.warning("Please provide feedback for treatment.")
with st.popover(f"π¨ Edit S{scene_num_display_val} Visual Prompt/Asset"):
current_gen_prompt_edit_display = st.session_state.scene_generation_prompts[i_main_loop_content] if i_main_loop_content < len(st.session_state.scene_generation_prompts) else "No prompt."
st.caption("Current Asset Generation Prompt:"); st.code(current_gen_prompt_edit_display, language='text')
feedback_for_visual_input = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_{key_base_for_widgets_main}", height=150)
if st.button(f"π Update S{scene_num_display_val} Asset", key=f"regen_visual_btn_{key_base_for_widgets_main}"):
if feedback_for_visual_input:
with st.status(f"Refining prompt & asset for S{scene_num_display_val}...", expanded=True) as status_visual_regen_main:
user_asset_type_choice_viz = st.session_state.story_treatment_scenes[i_main_loop_content]['user_selected_asset_type']
is_video_type_for_regen = (user_asset_type_choice_viz == "Video Clip") or (user_asset_type_choice_viz == "Auto (Director's Choice)" and scene_content_item_display.get('suggested_asset_type_κ°λ
') == 'video_clip')
newly_constructed_asset_prompt_regen = ""
if not is_video_type_for_regen:
gemini_refinement_prompt_viz = create_visual_regeneration_prompt(current_gen_prompt_edit_display, feedback_for_visual_input, scene_content_item_display, st.session_state.character_definitions, st.session_state.global_style_additions)
try: newly_constructed_asset_prompt_regen = st.session_state.gemini_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz); st.session_state.scene_generation_prompts[i_main_loop_content] = newly_constructed_asset_prompt_regen; status_visual_regen_main.update(label="Image prompt refined! Regenerating asset...", state="running")
except Exception as e_gemini_refine_viz: status_visual_regen_main.update(label=f"Error refining prompt: {e_gemini_refine_viz}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz}", exc_info=True); continue
else:
logger.info(f"Reconstructing video motion prompt for S{scene_num_display_val}. Feedback (indirect): {feedback_for_visual_input}")
newly_constructed_asset_prompt_regen = construct_text_to_video_prompt_for_gen4(scene_content_item_display, st.session_state.global_style_additions); st.session_state.scene_generation_prompts[i_main_loop_content] = newly_constructed_asset_prompt_regen; status_visual_regen_main.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
if not newly_constructed_asset_prompt_regen: status_visual_regen_main.update(label="Prompt construction failed.", state="error"); continue
version_num_viz_asset_regen = 1
if asset_info_for_display and asset_info_for_display.get('path') and os.path.exists(asset_info_for_display['path']):
try: base_fn_viz_regen,_=os.path.splitext(os.path.basename(asset_info_for_display['path'])); version_num_viz_asset_regen = int(base_fn_viz_regen.split('_v')[-1])+1 if '_v' in base_fn_viz_regen else 2
except: version_num_viz_asset_regen = 2
if generate_asset_for_scene_wrapper(i_main_loop_content, st.session_state.story_treatment_scenes[i_main_loop_content], version_num=version_num_viz_asset_regen, user_selected_asset_type_override=user_asset_type_choice_viz): status_visual_regen_main.update(label="Asset Updated! π", state="complete", expanded=False)
else: status_visual_regen_main.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
st.rerun()
else: st.warning("Please provide feedback for visual asset.")
st.markdown("---")
if st.session_state.story_treatment_scenes and any(asset_info_item_loop and not asset_info_item_loop.get('error') and asset_info_item_loop.get('path') for asset_info_item_loop in st.session_state.generated_scene_assets_info if asset_info_item_loop is not None):
if st.button("π¬ Assemble Narrated Cinematic Animatic", key="assemble_video_main_btn_unique", type="primary", use_container_width=True):
with st.status("Assembling Ultra Animatic...", expanded=True) as status_vid_assembly_main:
assets_for_final_vid_assembly = []
for i_vid_assembly_loop, scene_data_for_vid_loop in enumerate(st.session_state.story_treatment_scenes):
asset_info_curr_scene_vid = st.session_state.generated_scene_assets_info[i_vid_assembly_loop] if i_vid_assembly_loop < len(st.session_state.generated_scene_assets_info) else None
if asset_info_curr_scene_vid and not asset_info_curr_scene_vid.get('error') and asset_info_curr_scene_vid.get('path') and os.path.exists(asset_info_curr_scene_vid['path']):
assets_for_final_vid_assembly.append({'path': asset_info_curr_scene_vid['path'], 'type': asset_info_curr_scene_vid.get('type', 'image'), 'scene_num': scene_data_for_vid_loop.get('scene_number', i_vid_assembly_loop + 1), 'key_action': scene_data_for_vid_loop.get('key_plot_beat', ''), 'duration': scene_data_for_vid_loop.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
status_vid_assembly_main.write(f"Adding S{scene_data_for_vid_loop.get('scene_number', i_vid_assembly_loop + 1)} ({asset_info_curr_scene_vid.get('type')}).")
else: logger.warning(f"Skipping S{scene_data_for_vid_loop.get('scene_number', i_vid_assembly_loop+1)} for video: No valid asset.")
if assets_for_final_vid_assembly:
status_vid_assembly_main.write("Calling video engine...");
st.session_state.video_path = st.session_state.visual_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_vid_assembly, overall_narration_path=st.session_state.overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
if st.session_state.video_path and os.path.exists(st.session_state.video_path): status_vid_assembly_main.update(label="Ultra animatic assembled! π", state="complete", expanded=False); st.balloons()
else: status_vid_assembly_main.update(label="Video assembly failed. Check logs.", state="error", expanded=False); logger.error("Video assembly returned None or file does not exist.")
else: status_vid_assembly_main.update(label="No valid assets for video assembly.", state="error", expanded=False); logger.warning("No valid assets found for video assembly.")
elif st.session_state.story_treatment_scenes: st.info("Generate visual assets before assembling the animatic.")
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
st.header("π¬ Generated Cinematic Animatic");
try:
with open(st.session_state.video_path, 'rb') as vf_obj_read_final: video_bytes_final_content = vf_obj_read_final.read()
st.video(video_bytes_final_content, format="video/mp4")
st.download_button(label="Download Ultra Animatic", data=video_bytes_final_content, file_name=os.path.basename(st.session_state.video_path), mime="video/mp4", use_container_width=True, key="download_video_main_btn_unique" )
except Exception as e_vid_final_disp_main: st.error(f"Error displaying final video: {e_vid_final_disp_main}"); logger.error(f"Error displaying final video: {e_vid_final_disp_main}", exc_info=True)
st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production") |