mgbam commited on
Commit
1c22261
Β·
verified Β·
1 Parent(s): b2b5e0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -212
app.py CHANGED
@@ -3,29 +3,22 @@ import streamlit as st
3
  import os
4
  import logging
5
 
6
- # --- Streamlit PermissionError Mitigation Attempts (Python level) ---
7
- # These are fallbacks; Dockerfile ENV STREAMLIT_HOME is the primary fix for '/.streamlit'
8
  if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
9
  os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
10
  print("INFO: app.py - Disabled Streamlit client usage stats gathering via env var.")
11
-
12
- if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ: # Another potential flag
13
  os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
14
  print("INFO: app.py - Set STREAMLIT_BROWSER_GATHERUSAGESTATS to false.")
15
-
16
- # If running in /app and STREAMLIT_HOME isn't set, try to define a writable one.
17
- # The Dockerfile's ENV STREAMLIT_HOME should ideally handle this.
18
- streamlit_home_path_app = "/app/.streamlit_cai_config" # Using a unique name
19
  if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
20
  os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
21
  try:
22
  os.makedirs(streamlit_home_path_app, exist_ok=True)
23
- # In Docker, this path should already be writable by the app user if Dockerfile is set up correctly.
24
  print(f"INFO: app.py - Set STREAMLIT_HOME to: {streamlit_home_path_app}")
25
  except Exception as e_mkdir_sh:
26
  print(f"WARNING: app.py - Could not create STREAMLIT_HOME '{streamlit_home_path_app}': {e_mkdir_sh}")
27
 
28
-
29
  from core.gemini_handler import GeminiHandler
30
  from core.visual_engine import VisualEngine
31
  from core.prompt_engineering import (
@@ -37,40 +30,33 @@ from core.prompt_engineering import (
37
  create_visual_regeneration_prompt
38
  )
39
 
40
- # --- Page Config & Logging ---
41
  st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
42
- # Configure logging (level can be adjusted for production vs. development)
43
  logging.basicConfig(
44
- level=logging.DEBUG, # DEBUG for development, INFO for production
45
  format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)'
46
  )
47
  logger = logging.getLogger(__name__)
48
 
49
- # --- Global Definitions ---
50
  SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
51
  DEFAULT_SCENE_DURATION_SECS = 5
52
  DEFAULT_SHOT_TYPE = "Director's Choice"
53
  ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
54
 
55
- # --- API Key Loading ---
56
  def load_api_key(key_name_streamlit, key_name_env, service_name):
57
- key_value = None
58
- secrets_available = hasattr(st, 'secrets')
59
  try:
60
  if secrets_available and key_name_streamlit in st.secrets:
61
- key_value = st.secrets.get(key_name_streamlit) # Use .get for safety
62
  if key_value: logger.info(f"API Key for {service_name} found in Streamlit secrets.")
63
- except Exception as e_secrets: logger.warning(f"Could not access st.secrets for {key_name_streamlit} ({service_name}): {e_secrets}")
64
  if not key_value and key_name_env in os.environ:
65
- key_value = os.environ.get(key_name_env) # Use .get for safety
66
  if key_value: logger.info(f"API Key for {service_name} found in env var '{key_name_env}'.")
67
- if not key_value: logger.warning(f"API Key for {service_name} (Key: {key_name_streamlit}/{key_name_env}) NOT FOUND. Service may be disabled.")
68
  return key_value
69
 
70
- # --- Service Initialization (Singleton Pattern using Session State) ---
71
- if 'services_initialized_flag' not in st.session_state: # Renamed flag for clarity
72
  logger.info("APP_INIT: Initializing services and API keys...")
73
- # Load all keys first
74
  st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
75
  st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
76
  st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
@@ -78,86 +64,65 @@ if 'services_initialized_flag' not in st.session_state: # Renamed flag for clari
78
  st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
79
  st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
80
 
81
- # Critical services check
82
- if not st.session_state.API_KEY_GEMINI:
83
- st.error("CRITICAL FAILURE: Gemini API Key is missing! Application cannot function without it."); logger.critical("Gemini API Key missing."); st.stop()
84
-
85
- # Initialize Gemini Handler
86
  try:
87
- st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI) # Renamed
88
- logger.info("APP_INIT: GeminiHandler component initialized successfully.")
89
- except Exception as e_gem_init:
90
- st.error(f"CRITICAL FAILURE: Failed to initialize GeminiHandler: {e_gem_init}"); logger.critical(f"GeminiHandler init failed: {e_gem_init}", exc_info=True); st.stop()
91
-
92
- # Initialize Visual Engine
93
- try:
94
- el_default_voice = "Rachel" # Hardcoded fallback default
95
- el_resolved_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_default_voice
96
-
97
- # <<< THIS IS WHERE VisualEngine IS INSTANTIATED >>>
98
- st.session_state.visual_content_engine = VisualEngine( # Renamed
99
- output_dir="temp_cinegen_media",
100
- default_elevenlabs_voice_id=el_resolved_voice_id
101
- )
102
- # Set keys for individual services within VisualEngine
103
  st.session_state.visual_content_engine.set_openai_api_key(st.session_state.API_KEY_OPENAI)
104
  st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
105
  st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
106
  st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
107
- logger.info("APP_INIT: VisualEngine component initialized and all sub-service API keys configured.")
108
- except Exception as e_vis_eng_init:
109
- st.error(f"CRITICAL FAILURE: Failed to initialize VisualEngine: {e_vis_eng_init}"); logger.critical(f"VisualEngine init or key setting failed: {e_vis_eng_init}", exc_info=True); st.warning("VisualEngine encountered a critical setup issue. Many features will be disabled or will fail."); st.stop()
110
-
111
- st.session_state.services_initialized_flag = True
112
- logger.info("APP_INIT: All services initialized successfully.")
113
-
114
- # Initialize project-specific session state variables
115
- for project_ss_key, project_ss_default_val in [
116
- ('project_treatment_scenes_list', []),
117
- ('project_scene_generation_prompts_list', []),
118
- ('project_generated_assets_info_list', []),
119
- ('project_final_video_path', None),
120
- ('project_character_definitions_map', {}),
121
- ('project_global_style_keywords_str', ""),
122
- ('project_narration_audio_filepath', None),
123
- ('project_narration_script_text', "")
124
- ]:
125
- if project_ss_key not in st.session_state: st.session_state[project_ss_key] = project_ss_default_val
126
 
127
  def initialize_new_project_data_in_session():
128
- st.session_state.project_treatment_scenes_list = []
 
129
  st.session_state.project_scene_generation_prompts_list = []
130
  st.session_state.project_generated_assets_info_list = []
131
  st.session_state.project_final_video_path = None
132
- st.session_state.project_overall_narration_audio_path = None # Corrected key
133
- st.session_state.project_narration_script_text = ""
134
- logger.info("PROJECT_DATA: New project data initialized (treatment, assets, narration). Character defs and global styles persist.")
135
 
136
- # --- Asset Generation Wrapper ---
137
  def generate_asset_for_scene_in_app(scene_idx_num, scene_data_obj, asset_ver_num=1, user_asset_type_choice_ui="Auto (Director's Choice)"):
138
- logger.debug(f"APP: generate_asset_for_scene_in_app called for scene index {scene_idx_num}, version {asset_ver_num}, user type: {user_asset_type_choice_ui}")
139
-
140
  final_decision_generate_as_video = False
141
  gemini_suggested_type = scene_data_obj.get('suggested_asset_type_감독', 'image').lower()
142
-
143
  if user_asset_type_choice_ui == "Image": final_decision_generate_as_video = False
144
  elif user_asset_type_choice_ui == "Video Clip": final_decision_generate_as_video = True
145
  elif user_asset_type_choice_ui == "Auto (Director's Choice)": final_decision_generate_as_video = (gemini_suggested_type == "video_clip")
146
- logger.debug(f"APP: Final decision for asset type: {'Video' if final_decision_generate_as_video else 'Image'}")
147
 
148
  prompt_for_base_img_gen = construct_dalle_prompt(scene_data_obj, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
149
- prompt_for_runway_motion = ""
150
  if final_decision_generate_as_video:
151
- prompt_for_runway_motion = construct_text_to_video_prompt_for_gen4(scene_data_obj, st.session_state.project_global_style_keywords_str)
152
- if not prompt_for_runway_motion: prompt_for_runway_motion = scene_data_obj.get('video_clip_motion_description_감독', "subtle ambient cinematic motion"); logger.warning(f"S{scene_data_obj.get('scene_number', scene_idx_num+1)}: Empty motion prompt, using default for Runway.")
153
-
154
  if not prompt_for_base_img_gen: logger.error(f"Base image prompt construction failed for S{scene_data_obj.get('scene_number', scene_idx_num+1)}"); return False
155
 
156
- # Ensure session state lists are long enough
157
  while len(st.session_state.project_scene_generation_prompts_list) <= scene_idx_num: st.session_state.project_scene_generation_prompts_list.append("")
158
  while len(st.session_state.project_generated_assets_info_list) <= scene_idx_num: st.session_state.project_generated_assets_info_list.append(None)
159
-
160
- st.session_state.project_scene_generation_prompts_list[scene_idx_num] = prompt_for_runway_motion if final_decision_generate_as_video else prompt_for_base_img_gen
161
 
162
  filename_base_for_output_asset = f"scene_{scene_data_obj.get('scene_number', scene_idx_num+1)}_asset_v{asset_ver_num}"
163
  duration_for_rwy_vid = scene_data_obj.get('video_clip_duration_estimate_secs_감독', scene_data_obj.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
@@ -165,51 +130,56 @@ def generate_asset_for_scene_in_app(scene_idx_num, scene_data_obj, asset_ver_num
165
 
166
  generated_asset_info_dict = st.session_state.visual_content_engine.generate_scene_asset(
167
  image_generation_prompt_text=prompt_for_base_img_gen,
168
- motion_prompt_text_for_video=prompt_for_runway_motion,
169
- scene_data_dict=scene_data_obj, # <<< CORRECTED KEYWORD ARGUMENT NAME
170
  scene_identifier_fn_base=filename_base_for_output_asset,
171
  generate_as_video_clip_flag=final_decision_generate_as_video,
172
  runway_target_dur_val=duration_for_rwy_vid
173
  )
174
  st.session_state.project_generated_assets_info_list[scene_idx_num] = generated_asset_info_dict
175
- if generated_asset_info_dict and generated_asset_info_dict.get('prompt_used') and \
176
- st.session_state.project_scene_generation_prompts_list[scene_idx_num] != generated_asset_info_dict['prompt_used']:
177
  st.session_state.project_scene_generation_prompts_list[scene_idx_num] = generated_asset_info_dict['prompt_used']
178
 
179
  if generated_asset_info_dict and not generated_asset_info_dict['error'] and generated_asset_info_dict.get('path') and os.path.exists(generated_asset_info_dict['path']):
180
  logger.info(f"APP: Asset ({generated_asset_info_dict.get('type')}) generated for S{scene_data_obj.get('scene_number', scene_idx_num+1)}: {os.path.basename(generated_asset_info_dict['path'])}")
181
  return True
182
  else:
183
- error_msg_from_asset_gen = generated_asset_info_dict.get('error_message', 'Unknown error') if generated_asset_info_dict else 'Asset result dictionary is None'
184
- logger.warning(f"APP: Asset gen FAILED for S{scene_data_obj.get('scene_number', scene_idx_num+1)}. Type: {'Video' if final_decision_generate_as_video else 'Image'}. Err: {error_msg_from_asset_gen}")
185
  current_prompt = st.session_state.project_scene_generation_prompts_list[scene_idx_num]
186
  st.session_state.project_generated_assets_info_list[scene_idx_num] = {'path': None, 'type': 'none', 'error': True, 'error_message': error_msg_from_asset_gen, 'prompt_used': current_prompt}
187
  return False
188
 
189
  # --- Sidebar UI ---
190
  with st.sidebar:
191
- st.image("assets/logo.png", width=150) # Display logo
192
- st.title("CineGen AI Ultra+")
 
 
 
 
 
 
 
193
  st.markdown("### Creative Seed")
194
- ui_user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=100, key="ui_user_idea")
195
- ui_genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="ui_genre")
196
- ui_mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="ui_mood")
197
- ui_num_scenes = st.slider("Number of Key Scenes:", 1, 10, 1, key="ui_num_scenes")
198
  ui_creative_guidance_map = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
199
- ui_selected_guidance_key = st.selectbox("AI Creative Director Style:", options=list(ui_creative_guidance_map.keys()), key="ui_creative_guidance")
200
  ui_actual_guidance = ui_creative_guidance_map[ui_selected_guidance_key]
201
 
202
- if st.button("🌌 Generate Cinematic Treatment", type="primary", key="btn_generate_treatment", use_container_width=True):
203
- initialize_new_project_data_in_session()
204
  if not ui_user_idea.strip(): st.warning("Please provide a story idea.")
205
  else:
206
- with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_operation:
207
  try:
208
- status_operation.write("Phase 1: Gemini crafting cinematic treatment... πŸ“œ"); logger.info("APP_MAIN_FLOW: Phase 1 - Treatment Gen.")
209
  prompt_for_treatment = create_cinematic_treatment_prompt(ui_user_idea, ui_genre, ui_mood, ui_num_scenes, ui_actual_guidance)
210
  list_of_raw_treatment_scenes = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_for_treatment)
211
  if not isinstance(list_of_raw_treatment_scenes, list) or not list_of_raw_treatment_scenes: raise ValueError("Gemini returned invalid scene list format.")
212
-
213
  temp_processed_scenes = []
214
  for scene_data_gemini in list_of_raw_treatment_scenes:
215
  gemini_dur_est = scene_data_gemini.get('video_clip_duration_estimate_secs_감독', 0)
@@ -217,37 +187,31 @@ with st.sidebar:
217
  scene_data_gemini['user_shot_type'] = scene_data_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE)
218
  scene_data_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"
219
  temp_processed_scenes.append(scene_data_gemini)
220
- st.session_state.project_story_treatment_scenes = temp_processed_scenes
221
-
222
- num_scenes_generated = len(st.session_state.project_story_treatment_scenes)
223
- st.session_state.project_scene_generation_prompts_list = [""]*num_scenes_generated
224
- st.session_state.project_generated_assets_info_list = [None]*num_scenes_generated
225
- logger.info(f"APP_MAIN_FLOW: Phase 1 complete. {num_scenes_generated} scenes."); status_operation.update(label="Treatment complete! βœ… Generating visual assets...", state="running")
226
-
227
- status_operation.write("Phase 2: Creating visual assets..."); logger.info("APP_MAIN_FLOW: Phase 2 - Asset Gen.")
228
  num_successful_assets = 0
229
- for idx, scene_item_data in enumerate(st.session_state.project_story_treatment_scenes):
230
  scene_num_log = scene_item_data.get('scene_number', idx+1)
231
- status_operation.write(f" Processing asset for Scene {scene_num_log}..."); logger.info(f" APP_MAIN_FLOW: Processing asset for Scene {scene_num_log}.")
232
  if generate_asset_for_scene_in_app(idx, scene_item_data, asset_ver_num=1): num_successful_assets += 1
233
-
234
  status_label_p2 = "Visual assets generated! "; next_state_p2 = "running"
235
- if num_successful_assets == 0 and num_scenes_generated > 0: logger.error("APP_MAIN_FLOW: Asset gen FAILED for all scenes."); status_label_p2 = "Asset gen FAILED for all scenes."; next_state_p2="error"; status_operation.update(label=status_label_p2, state=next_state_p2, expanded=True); st.stop()
236
  elif num_successful_assets < num_scenes_generated: logger.warning(f"APP_MAIN_FLOW: Assets partially generated ({num_successful_assets}/{num_scenes_generated})."); status_label_p2 = f"Assets partially done ({num_successful_assets}/{num_scenes_generated}). "
237
- status_operation.update(label=f"{status_label_p2}Generating narration...", state=next_state_p2)
238
  if next_state_p2 == "error": st.stop()
239
-
240
- status_operation.write("Phase 3: Generating narration script..."); logger.info("APP_MAIN_FLOW: Phase 3 - Narration Script.")
241
  voice_style_narr = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
242
- prompt_narr_script = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes, ui_mood, ui_genre, voice_style_narr)
243
- st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr_script) # generate_image_prompt for general text
244
- logger.info("APP_MAIN_FLOW: Narration script generated."); status_operation.update(label="Narration script ready! Synthesizing voice...", state="running")
245
-
246
  status_operation.write("Phase 4: Synthesizing voice (ElevenLabs)..."); logger.info("APP_MAIN_FLOW: Phase 4 - Voice Synthesis.")
247
- st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
248
-
249
  final_status_msg = "All initial components ready! Review storyboard. πŸš€"; final_op_status_val = "complete"
250
- if not st.session_state.project_overall_narration_audio_path: final_status_msg = f"{status_label_p2}Storyboard ready (Voiceover failed/skipped)."; logger.warning("APP_MAIN_FLOW: Narration audio failed/skipped.")
251
  else: logger.info("APP_MAIN_FLOW: Narration audio generated.")
252
  status_operation.update(label=final_status_msg, state=final_op_status_val, expanded=False)
253
  except ValueError as e_val: logger.error(f"APP_MAIN_FLOW: ValueError: {e_val}", exc_info=True); status_operation.update(label=f"Data/Response Error: {e_val}", state="error", expanded=True);
@@ -255,85 +219,88 @@ with st.sidebar:
255
  except Exception as e_gen: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_gen}", exc_info=True); status_operation.update(label=f"Unexpected Error: {e_gen}", state="error", expanded=True);
256
 
257
  with st.expander("Define Characters", expanded=False):
258
- # (Keep as before - unique keys for inputs)
259
- sb_char_name_input = st.text_input("Character Name", key="sb_char_name_unique_2"); sb_char_desc_input = st.text_area("Visual Description", key="sb_char_desc_unique_2", height=100, placeholder="e.g., Jax: rugged astronaut...")
260
- if st.button("Save Character", key="sb_add_char_unique_2"):
261
- if sb_char_name_input and sb_char_desc_input: st.session_state.project_character_definitions_map[sb_char_name_input.strip().lower()] = sb_char_desc_input.strip(); st.success(f"Character '{sb_char_name_input.strip()}' saved.")
262
- else: st.warning("Both character name and description are required.")
263
- if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{char_key.title()}:** _{char_val}_") for char_key,char_val in st.session_state.project_character_definitions_map.items()]
264
 
265
  with st.expander("Global Style Overrides", expanded=False):
266
- # (Keep as before - unique keys for inputs)
267
- sb_style_presets_map = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir, extreme detail...", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy elements...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi film aesthetic..."}
268
- sb_selected_preset_key_style = st.selectbox("Base Style Preset:", options=list(sb_style_presets_map.keys()), key="sb_style_preset_unique_2")
269
- sb_custom_keywords_style = st.text_area("Additional Custom Style Keywords:", key="sb_custom_style_unique_2", height=80, placeholder="e.g., 'Dutch angle', 'lens flare'")
270
- sb_current_global_style_display = st.session_state.project_global_style_keywords_str
271
- if st.button("Apply Global Styles", key="sb_apply_styles_unique_2"):
272
- final_style_str_global = sb_style_presets_map[sb_selected_preset_key_style];
273
- if sb_custom_keywords_style.strip(): final_style_str_global = f"{final_style_str_global}, {sb_custom_keywords_style.strip()}" if final_style_str_global else sb_custom_keywords_style.strip()
274
- st.session_state.project_global_style_keywords_str = final_style_str_global.strip(); sb_current_global_style_display = final_style_str_global.strip()
275
- if sb_current_global_style_display: st.success("Global visual styles applied!")
276
- else: st.info("Global visual style additions cleared.")
277
- if sb_current_global_style_display: st.caption(f"Active global styles: \"{sb_current_global_style_display}\"")
278
 
279
  with st.expander("Voice & Narration Style", expanded=False):
280
- # (Keep as before - unique keys for inputs)
281
- sb_engine_default_voice_val_narr = "Rachel"
282
- if hasattr(st.session_state, 'visual_content_engine') and st.session_state.visual_content_engine: sb_engine_default_voice_val_narr = st.session_state.visual_content_engine.elevenlabs_voice_id
283
- sb_user_voice_id_input_narr = st.text_input("ElevenLabs Voice ID (override):", value=sb_engine_default_voice_val_narr, key="sb_el_voice_id_override_unique_2", help=f"Defaulting to '{sb_engine_default_voice_val_narr}'.")
284
- sb_narration_styles_map_narr = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
285
- sb_selected_narration_style_key_narr = st.selectbox("Narration Script Style:", list(sb_narration_styles_map_narr.keys()), key="sb_narr_style_sel_unique_2", index=0)
286
- if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_2"):
287
- final_voice_id_to_use_el_narr = sb_user_voice_id_input_narr.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
288
- if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_voice_id_to_use_el_narr
289
- st.session_state.selected_voice_style_for_generation = sb_narration_styles_map_narr[sb_selected_narration_style_key_narr]
290
- st.success(f"Narrator Voice ID set to: {final_voice_id_to_use_el_narr}. Script Style: {sb_selected_narration_style_key_narr}")
291
- logger.info(f"User updated 11L Voice ID: {final_voice_id_to_use_el_narr}, Narration Script Style: {sb_selected_narration_style_key_narr}")
292
 
293
  # --- Main Content Area ---
 
294
  st.header("🎬 Cinematic Storyboard & Treatment")
295
  if st.session_state.project_narration_script_text:
296
  with st.expander("πŸ“œ View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
297
 
298
- if not st.session_state.project_story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
 
 
299
  else:
300
- for i_loop_main_display, scene_item_for_display in enumerate(st.session_state.project_story_treatment_scenes):
301
- scene_num_for_display = scene_item_for_display.get('scene_number', i_loop_main_display + 1)
302
- scene_title_for_display_main = scene_item_for_display.get('scene_title', 'Untitled Scene')
303
- key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_{i_loop_main_display}"
304
-
305
- if "director_note" in scene_item_for_display and scene_item_for_display['director_note']: st.info(f"🎬 Director Note S{scene_num_for_display}: {scene_item_for_display['director_note']}")
 
 
 
 
 
306
  st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}"); main_col_treatment_area, main_col_visual_area = st.columns([0.45, 0.55])
307
 
308
  with main_col_treatment_area:
309
  with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
310
- # (Display textual scene details - as before)
311
- st.markdown(f"**Beat:** {scene_item_for_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_item_for_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_item_for_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_item_for_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_item_for_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_item_for_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_item_for_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_item_for_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_item_for_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
312
  st.markdown("##### Shot, Pacing & Asset Controls")
313
- # (Shot Type, Scene Duration, Asset Type Override selectboxes - as before, using unique keys)
314
- ui_shot_type_current = st.session_state.project_story_treatment_scenes[i_loop_main_display].get('user_shot_type', DEFAULT_SHOT_TYPE)
315
  try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
316
  except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
317
  ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
318
- if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_shot_type'] = ui_shot_type_new
319
- ui_duration_current = st.session_state.project_story_treatment_scenes[i_loop_main_display].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
320
  ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
321
- if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_scene_duration_secs'] = ui_duration_new
322
- ui_asset_type_override_current = st.session_state.project_story_treatment_scenes[i_loop_main_display].get('user_selected_asset_type', "Auto (Director's Choice)")
323
  try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
324
  except ValueError: ui_asset_type_idx_val = 0
325
  ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
326
- if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_selected_asset_type'] = ui_asset_type_override_new
327
  st.markdown("---")
328
- prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_scene_generation_prompts_list) else None
329
  if prompt_for_asset_to_display:
330
  with st.popover("πŸ‘οΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
331
- pexels_query_to_display = scene_item_for_display.get('pexels_search_query_감독', None)
332
  if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")
333
 
334
  with main_col_visual_area:
335
- # (Display logic for different asset types - as before)
336
- current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_generated_assets_info_list) else None
337
  if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
338
  path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
339
  if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
@@ -344,90 +311,86 @@ else:
344
  except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
345
  else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
346
  else:
347
- if st.session_state.project_story_treatment_scenes:
348
- error_message_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
349
- st.caption(error_message_for_asset_display)
350
 
351
  with st.popover(f"✏️ Edit S{scene_num_for_display} Treatment"):
352
- # (Treatment Regeneration Popover - using corrected generate_asset_for_scene_in_app call)
353
- feedback_for_treatment_regen_input = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{widget_key_base_main_area}", height=150)
354
  if st.button(f"πŸ”„ Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{widget_key_base_main_area}"):
355
- if feedback_for_treatment_regen_input:
356
  with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
357
- preserved_user_shot_type_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_shot_type']
358
- preserved_user_duration_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_scene_duration_secs']
359
- preserved_user_asset_type_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_selected_asset_type']
360
- prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_for_treatment_regen_input, st.session_state.project_story_treatment_scenes)
361
  try:
362
  updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
363
  final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
364
- final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type_pop; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration_pop; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type_pop
365
- st.session_state.project_story_treatment_scenes[i_loop_main_display] = final_merged_updated_scene_data_pop
366
  status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
367
  version_for_regenerated_asset_pop = 1
368
  if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
369
  try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
370
  except: version_for_regenerated_asset_pop = 2
371
- if generate_asset_for_scene_in_app(i_loop_main_display, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_selected_asset_type_override=preserved_user_asset_type_pop): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
372
  else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
373
  st.rerun()
374
  except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
375
  else: st.warning("Please provide feedback to update the treatment.")
376
 
377
  with st.popover(f"🎨 Edit S{scene_num_for_display} Visual Prompt/Asset"):
378
- # (Visual Asset Regeneration Popover - using corrected generate_asset_for_scene_in_app call)
379
- prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_scene_generation_prompts_list) else "No prompt available."
380
  st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
381
- feedback_for_visual_asset_regen_input = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_input_pop_{widget_key_base_main_area}", height=150)
382
  if st.button(f"πŸ”„ Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{widget_key_base_main_area}"):
383
  if feedback_for_visual_asset_regen_input:
384
- with st.status(f"Refining prompt & regenerating asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
385
- user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes[i_loop_main_display]['user_selected_asset_type']
386
  is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_감독') == 'video_clip')
387
  newly_constructed_asset_prompt_regen_pop = ""
388
- if not is_video_type_for_regen_pop: # IMAGE
389
- gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
390
- try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running")
391
- except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement (Gemini) error: {e_gemini_refine_viz_pop}", exc_info=True); continue
392
- else: # VIDEO
393
- logger.info(f"Reconstructing video motion prompt for S{scene_num_for_display}. Feedback (indirect): {feedback_for_visual_asset_regen_input}")
394
- newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
395
- if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction/refinement failed.", state="error"); continue
396
  version_for_regenerated_visual_asset_pop = 1
397
  if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
398
  try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
399
  except: version_for_regenerated_visual_asset_pop = 2
400
- if generate_asset_for_scene_in_app(i_loop_main_display, st.session_state.project_story_treatment_scenes[i_loop_main_display], asset_ver_num=version_for_regenerated_visual_asset_pop, user_selected_asset_type_override=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
401
  else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
402
  st.rerun()
403
  else: st.warning("Please provide feedback for visual asset regeneration.")
404
  st.markdown("---")
405
 
406
- # Video Assembly Button Logic
407
- if st.session_state.project_story_treatment_scenes and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None):
408
- if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique", type="primary", use_container_width=True): # Unique key
409
- with st.status("Assembling Ultra Animatic (this may take a few minutes)...", expanded=True) as status_video_assembly_final_op:
410
  assets_for_final_video_assembly_list_main = []
411
- for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes):
412
- asset_info_current_scene_for_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None
413
- if asset_info_current_scene_for_vid_main and not asset_info_current_scene_for_vid_main.get('error') and asset_info_current_scene_for_vid_main.get('path') and os.path.exists(asset_info_current_scene_for_vid_main['path']):
414
- assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid_main['path'], 'type': asset_info_current_scene_for_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
415
- status_video_assembly_final_op.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid_main.get('type')}).")
416
  else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
417
  if assets_for_final_video_assembly_list_main:
418
  status_video_assembly_final_op.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
419
- st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
420
- if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
421
  else: status_video_assembly_final_op.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
422
  else: status_video_assembly_final_op.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
423
- elif st.session_state.project_story_treatment_scenes: st.info("Generate visual assets for your scenes before attempting to assemble the animatic.")
424
 
425
- if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path):
426
  st.header("🎬 Generated Cinematic Animatic");
427
  try:
428
- with open(st.session_state.project_final_video_path, 'rb') as final_video_file_obj_display: final_video_bytes_for_display = final_video_file_obj_display.read()
429
  st.video(final_video_bytes_for_display, format="video/mp4")
430
- st.download_button(label="Download Ultra Animatic", data=final_video_bytes_for_display, file_name=os.path.basename(st.session_state.project_final_video_path), mime="video/mp4", use_container_width=True, key="download_video_main_area_btn_final_unique" )
431
  except Exception as e_final_video_display_op_main: st.error(f"Error displaying final animatic video: {e_final_video_display_op_main}"); logger.error(f"Error displaying final animatic video: {e_final_video_display_op_main}", exc_info=True)
432
 
433
  st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")
 
3
  import os
4
  import logging
5
 
6
+ # --- Streamlit PermissionError Mitigation Attempts ---
 
7
  if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
8
  os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
9
  print("INFO: app.py - Disabled Streamlit client usage stats gathering via env var.")
10
+ if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ:
 
11
  os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
12
  print("INFO: app.py - Set STREAMLIT_BROWSER_GATHERUSAGESTATS to false.")
13
+ streamlit_home_path_app = "/app/.streamlit_cai_config"
 
 
 
14
  if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
15
  os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
16
  try:
17
  os.makedirs(streamlit_home_path_app, exist_ok=True)
 
18
  print(f"INFO: app.py - Set STREAMLIT_HOME to: {streamlit_home_path_app}")
19
  except Exception as e_mkdir_sh:
20
  print(f"WARNING: app.py - Could not create STREAMLIT_HOME '{streamlit_home_path_app}': {e_mkdir_sh}")
21
 
 
22
  from core.gemini_handler import GeminiHandler
23
  from core.visual_engine import VisualEngine
24
  from core.prompt_engineering import (
 
30
  create_visual_regeneration_prompt
31
  )
32
 
 
33
  st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
 
34
  logging.basicConfig(
35
+ level=logging.DEBUG,
36
  format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)'
37
  )
38
  logger = logging.getLogger(__name__)
39
 
 
40
  SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
41
  DEFAULT_SCENE_DURATION_SECS = 5
42
  DEFAULT_SHOT_TYPE = "Director's Choice"
43
  ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
44
 
 
45
  def load_api_key(key_name_streamlit, key_name_env, service_name):
46
+ key_value = None; secrets_available = hasattr(st, 'secrets')
 
47
  try:
48
  if secrets_available and key_name_streamlit in st.secrets:
49
+ key_value = st.secrets.get(key_name_streamlit)
50
  if key_value: logger.info(f"API Key for {service_name} found in Streamlit secrets.")
51
+ except Exception as e: logger.warning(f"Could not access st.secrets for {key_name_streamlit} ({service_name}): {e}")
52
  if not key_value and key_name_env in os.environ:
53
+ key_value = os.environ.get(key_name_env)
54
  if key_value: logger.info(f"API Key for {service_name} found in env var '{key_name_env}'.")
55
+ if not key_value: logger.warning(f"API Key for {service_name} (Key: {key_name_streamlit}/{key_name_env}) NOT FOUND.")
56
  return key_value
57
 
58
+ if 'services_initialized_flag' not in st.session_state:
 
59
  logger.info("APP_INIT: Initializing services and API keys...")
 
60
  st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
61
  st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
62
  st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
 
64
  st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
65
  st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
66
 
67
+ if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
68
+ try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
69
+ except Exception as e: st.error(f"CRITICAL: Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
 
 
70
  try:
71
+ el_default_voice = "Rachel"; el_resolved_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_default_voice
72
+ st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_resolved_voice_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  st.session_state.visual_content_engine.set_openai_api_key(st.session_state.API_KEY_OPENAI)
74
  st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
75
  st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
76
  st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
77
+ logger.info("VisualEngine initialized and API keys set.")
78
+ except Exception as e: st.error(f"CRITICAL: Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop()
79
+ st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")
80
+
81
+ # <<< CORRECTED SESSION STATE INITIALIZATION >>>
82
+ PROJECT_SESSION_STATE_DEFAULTS = {
83
+ 'project_story_treatment_scenes_list': [],
84
+ 'project_scene_generation_prompts_list': [],
85
+ 'project_generated_assets_info_list': [],
86
+ 'project_final_video_path': None,
87
+ 'project_character_definitions_map': {},
88
+ 'project_global_style_keywords_str': "",
89
+ 'project_overall_narration_audio_path': None,
90
+ 'project_narration_script_text': ""
91
+ }
92
+ for key_name, default_value in PROJECT_SESSION_STATE_DEFAULTS.items():
93
+ if key_name not in st.session_state:
94
+ st.session_state[key_name] = default_value
95
+ # <<< END CORRECTION >>>
96
 
97
  def initialize_new_project_data_in_session():
98
+ # <<< USE CORRECTED KEYS HERE TOO >>>
99
+ st.session_state.project_story_treatment_scenes_list = []
100
  st.session_state.project_scene_generation_prompts_list = []
101
  st.session_state.project_generated_assets_info_list = []
102
  st.session_state.project_final_video_path = None
103
+ st.session_state.project_overall_narration_audio_path = None # Was correct
104
+ st.session_state.project_narration_script_text = "" # Was correct
105
+ logger.info("PROJECT_DATA: New project data initialized.")
106
 
 
107
  def generate_asset_for_scene_in_app(scene_idx_num, scene_data_obj, asset_ver_num=1, user_asset_type_choice_ui="Auto (Director's Choice)"):
108
+ logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {scene_idx_num}, ver {asset_ver_num}, user_type: {user_asset_type_choice_ui}")
 
109
  final_decision_generate_as_video = False
110
  gemini_suggested_type = scene_data_obj.get('suggested_asset_type_감독', 'image').lower()
 
111
  if user_asset_type_choice_ui == "Image": final_decision_generate_as_video = False
112
  elif user_asset_type_choice_ui == "Video Clip": final_decision_generate_as_video = True
113
  elif user_asset_type_choice_ui == "Auto (Director's Choice)": final_decision_generate_as_video = (gemini_suggested_type == "video_clip")
114
+ logger.debug(f"APP: Final asset type decision: {'Video' if final_decision_generate_as_video else 'Image'}")
115
 
116
  prompt_for_base_img_gen = construct_dalle_prompt(scene_data_obj, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
117
+ prompt_for_video_motion = ""
118
  if final_decision_generate_as_video:
119
+ prompt_for_video_motion = construct_text_to_video_prompt_for_gen4(scene_data_obj, st.session_state.project_global_style_keywords_str)
120
+ if not prompt_for_video_motion: prompt_for_video_motion = scene_data_obj.get('video_clip_motion_description_감독', "subtle ambient cinematic motion"); logger.warning(f"S{scene_data_obj.get('scene_number', scene_idx_num+1)}: Empty motion prompt, default.")
 
121
  if not prompt_for_base_img_gen: logger.error(f"Base image prompt construction failed for S{scene_data_obj.get('scene_number', scene_idx_num+1)}"); return False
122
 
 
123
  while len(st.session_state.project_scene_generation_prompts_list) <= scene_idx_num: st.session_state.project_scene_generation_prompts_list.append("")
124
  while len(st.session_state.project_generated_assets_info_list) <= scene_idx_num: st.session_state.project_generated_assets_info_list.append(None)
125
+ st.session_state.project_scene_generation_prompts_list[scene_idx_num] = prompt_for_video_motion if final_decision_generate_as_video else prompt_for_base_img_gen
 
126
 
127
  filename_base_for_output_asset = f"scene_{scene_data_obj.get('scene_number', scene_idx_num+1)}_asset_v{asset_ver_num}"
128
  duration_for_rwy_vid = scene_data_obj.get('video_clip_duration_estimate_secs_감독', scene_data_obj.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
 
130
 
131
  generated_asset_info_dict = st.session_state.visual_content_engine.generate_scene_asset(
132
  image_generation_prompt_text=prompt_for_base_img_gen,
133
+ motion_prompt_text_for_video=prompt_for_video_motion,
134
+ scene_data_dict=scene_data_obj, # CORRECTED
135
  scene_identifier_fn_base=filename_base_for_output_asset,
136
  generate_as_video_clip_flag=final_decision_generate_as_video,
137
  runway_target_dur_val=duration_for_rwy_vid
138
  )
139
  st.session_state.project_generated_assets_info_list[scene_idx_num] = generated_asset_info_dict
140
+ if generated_asset_info_dict and generated_asset_info_dict.get('prompt_used') and st.session_state.project_scene_generation_prompts_list[scene_idx_num] != generated_asset_info_dict['prompt_used']:
 
141
  st.session_state.project_scene_generation_prompts_list[scene_idx_num] = generated_asset_info_dict['prompt_used']
142
 
143
  if generated_asset_info_dict and not generated_asset_info_dict['error'] and generated_asset_info_dict.get('path') and os.path.exists(generated_asset_info_dict['path']):
144
  logger.info(f"APP: Asset ({generated_asset_info_dict.get('type')}) generated for S{scene_data_obj.get('scene_number', scene_idx_num+1)}: {os.path.basename(generated_asset_info_dict['path'])}")
145
  return True
146
  else:
147
+ error_msg_from_asset_gen = generated_asset_info_dict.get('error_message', 'Unknown error') if generated_asset_info_dict else 'Asset result None'
148
+ logger.warning(f"APP: Asset gen FAILED S{scene_data_obj.get('scene_number', scene_idx_num+1)}. Type: {'Video' if final_decision_generate_as_video else 'Image'}. Err: {error_msg_from_asset_gen}")
149
  current_prompt = st.session_state.project_scene_generation_prompts_list[scene_idx_num]
150
  st.session_state.project_generated_assets_info_list[scene_idx_num] = {'path': None, 'type': 'none', 'error': True, 'error_message': error_msg_from_asset_gen, 'prompt_used': current_prompt}
151
  return False
152
 
153
  # --- Sidebar UI ---
154
  with st.sidebar:
155
+ # (Logo and rest of sidebar UI - keeping variables distinct as in last app.py version)
156
+ # Make sure "assets/logo.png" exists in your repository
157
+ if os.path.exists("assets/logo.png"):
158
+ st.image("assets/logo.png", width=150)
159
+ else:
160
+ st.sidebar.markdown("## 🎬 CineGen AI Ultra+") # Fallback title
161
+ logger.warning("assets/logo.png not found. Displaying text title instead.")
162
+
163
+ # ... (rest of sidebar UI as in the last full app.py version, with distinct variable names for UI elements) ...
164
  st.markdown("### Creative Seed")
165
+ ui_user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=100, key="ui_user_idea_unique")
166
+ ui_genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="ui_genre_unique")
167
+ ui_mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="ui_mood_unique")
168
+ ui_num_scenes = st.slider("Number of Key Scenes:", 1, 10, 1, key="ui_num_scenes_unique")
169
  ui_creative_guidance_map = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
170
+ ui_selected_guidance_key = st.selectbox("AI Creative Director Style:", options=list(ui_creative_guidance_map.keys()), key="ui_creative_guidance_unique")
171
  ui_actual_guidance = ui_creative_guidance_map[ui_selected_guidance_key]
172
 
173
+ if st.button("🌌 Generate Cinematic Treatment", type="primary", key="btn_generate_treatment_unique", use_container_width=True):
174
+ initialize_new_project_data_in_session() # Corrected function name
175
  if not ui_user_idea.strip(): st.warning("Please provide a story idea.")
176
  else:
177
+ with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_operation:
178
  try:
179
+ main_status_operation.write("Phase 1: Gemini crafting cinematic treatment... πŸ“œ"); logger.info("APP_MAIN_FLOW: Phase 1 - Treatment Gen.")
180
  prompt_for_treatment = create_cinematic_treatment_prompt(ui_user_idea, ui_genre, ui_mood, ui_num_scenes, ui_actual_guidance)
181
  list_of_raw_treatment_scenes = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_for_treatment)
182
  if not isinstance(list_of_raw_treatment_scenes, list) or not list_of_raw_treatment_scenes: raise ValueError("Gemini returned invalid scene list format.")
 
183
  temp_processed_scenes = []
184
  for scene_data_gemini in list_of_raw_treatment_scenes:
185
  gemini_dur_est = scene_data_gemini.get('video_clip_duration_estimate_secs_감독', 0)
 
187
  scene_data_gemini['user_shot_type'] = scene_data_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE)
188
  scene_data_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"
189
  temp_processed_scenes.append(scene_data_gemini)
190
+ st.session_state.project_story_treatment_scenes_list = temp_processed_scenes # Corrected key
191
+ num_scenes_generated = len(st.session_state.project_story_treatment_scenes_list) # Corrected key
192
+ st.session_state.project_scene_generation_prompts_list = [""]*num_scenes_generated # Corrected key
193
+ st.session_state.project_generated_assets_info_list = [None]*num_scenes_generated # Corrected key
194
+ logger.info(f"APP_MAIN_FLOW: Phase 1 complete. {num_scenes_generated} scenes."); main_status_operation.update(label="Treatment complete! βœ… Generating visual assets...", state="running")
195
+ main_status_operation.write("Phase 2: Creating visual assets..."); logger.info("APP_MAIN_FLOW: Phase 2 - Asset Gen.")
 
 
196
  num_successful_assets = 0
197
+ for idx, scene_item_data in enumerate(st.session_state.project_story_treatment_scenes_list): # Corrected key
198
  scene_num_log = scene_item_data.get('scene_number', idx+1)
199
+ main_status_operation.write(f" Processing asset for Scene {scene_num_log}..."); logger.info(f" APP_MAIN_FLOW: Processing asset for Scene {scene_num_log}.")
200
  if generate_asset_for_scene_in_app(idx, scene_item_data, asset_ver_num=1): num_successful_assets += 1
 
201
  status_label_p2 = "Visual assets generated! "; next_state_p2 = "running"
202
+ if num_successful_assets == 0 and num_scenes_generated > 0: logger.error("APP_MAIN_FLOW: Asset gen FAILED for all scenes."); status_label_p2 = "Asset gen FAILED for all scenes."; next_state_p2="error"; main_status_operation.update(label=status_label_p2, state=next_state_p2, expanded=True); st.stop()
203
  elif num_successful_assets < num_scenes_generated: logger.warning(f"APP_MAIN_FLOW: Assets partially generated ({num_successful_assets}/{num_scenes_generated})."); status_label_p2 = f"Assets partially done ({num_successful_assets}/{num_scenes_generated}). "
204
+ main_status_operation.update(label=f"{status_label_p2}Generating narration...", state=next_state_p2)
205
  if next_state_p2 == "error": st.stop()
206
+ main_status_operation.write("Phase 3: Generating narration script..."); logger.info("APP_MAIN_FLOW: Phase 3 - Narration Script.")
 
207
  voice_style_narr = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
208
+ prompt_narr_script = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, ui_mood, ui_genre, voice_style_narr) # Corrected key
209
+ st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr_script) # Corrected key
210
+ logger.info("APP_MAIN_FLOW: Narration script generated."); main_status_operation.update(label="Narration script ready! Synthesizing voice...", state="running")
 
211
  status_operation.write("Phase 4: Synthesizing voice (ElevenLabs)..."); logger.info("APP_MAIN_FLOW: Phase 4 - Voice Synthesis.")
212
+ st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text) # Corrected key
 
213
  final_status_msg = "All initial components ready! Review storyboard. πŸš€"; final_op_status_val = "complete"
214
+ if not st.session_state.project_overall_narration_audio_path: final_status_msg = f"{status_label_p2}Storyboard ready (Voiceover failed/skipped)."; logger.warning("APP_MAIN_FLOW: Narration audio failed/skipped.") # Corrected key
215
  else: logger.info("APP_MAIN_FLOW: Narration audio generated.")
216
  status_operation.update(label=final_status_msg, state=final_op_status_val, expanded=False)
217
  except ValueError as e_val: logger.error(f"APP_MAIN_FLOW: ValueError: {e_val}", exc_info=True); status_operation.update(label=f"Data/Response Error: {e_val}", state="error", expanded=True);
 
219
  except Exception as e_gen: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_gen}", exc_info=True); status_operation.update(label=f"Unexpected Error: {e_gen}", state="error", expanded=True);
220
 
221
  with st.expander("Define Characters", expanded=False):
222
+ sb_char_name = st.text_input("Character Name", key="sb_char_name_unique_3"); sb_char_desc = st.text_area("Visual Description", key="sb_char_desc_unique_3", height=100)
223
+ if st.button("Save Character", key="sb_add_char_unique_3"):
224
+ if sb_char_name and sb_char_desc: st.session_state.project_character_definitions_map[sb_char_name.strip().lower()] = sb_char_desc.strip(); st.success(f"Character '{sb_char_name.strip()}' saved.") # Corrected key
225
+ else: st.warning("Name and description needed.")
226
+ if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()] # Corrected key
 
227
 
228
  with st.expander("Global Style Overrides", expanded=False):
229
+ sb_style_presets = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."}
230
+ sb_selected_preset = st.selectbox("Base Style Preset:", options=list(sb_style_presets.keys()), key="sb_style_preset_unique_3")
231
+ sb_custom_keywords = st.text_area("Additional Custom Style Keywords:", key="sb_custom_style_unique_3", height=80)
232
+ sb_current_global_style = st.session_state.project_global_style_keywords_str # Corrected key
233
+ if st.button("Apply Global Styles", key="sb_apply_styles_unique_3"):
234
+ final_style = sb_style_presets[sb_selected_preset];
235
+ if sb_custom_keywords.strip(): final_style = f"{final_style}, {sb_custom_keywords.strip()}" if final_style else sb_custom_keywords.strip()
236
+ st.session_state.project_global_style_keywords_str = final_style.strip(); sb_current_global_style = final_style.strip() # Corrected key
237
+ if sb_current_global_style: st.success("Global styles applied!")
238
+ else: st.info("Global style additions cleared.")
239
+ if sb_current_global_style: st.caption(f"Active: \"{sb_current_global_style}\"")
 
240
 
241
  with st.expander("Voice & Narration Style", expanded=False):
242
+ sb_engine_default_voice = "Rachel"
243
+ if hasattr(st.session_state, 'visual_content_engine') and st.session_state.visual_content_engine: sb_engine_default_voice = st.session_state.visual_content_engine.elevenlabs_voice_id
244
+ sb_user_voice_id = st.text_input("ElevenLabs Voice ID (override):", value=sb_engine_default_voice, key="sb_el_voice_id_override_unique_3")
245
+ sb_narration_styles = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
246
+ sb_selected_narr_style = st.selectbox("Narration Script Style:", list(sb_narration_styles.keys()), key="sb_narr_style_sel_unique_3", index=0)
247
+ if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_3"):
248
+ final_el_voice_id = sb_user_voice_id.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
249
+ if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_el_voice_id
250
+ st.session_state.selected_voice_style_for_generation = sb_narration_styles[sb_selected_narr_style]
251
+ st.success(f"Narrator Voice ID: {final_el_voice_id}. Script Style: {sb_selected_narr_style}")
252
+ logger.info(f"User updated 11L Voice ID: {final_el_voice_id}, Narration Script Style: {sb_selected_narr_style}")
253
+
254
 
255
  # --- Main Content Area ---
256
+ # <<< CHECK THIS KEY WHEN DISPLAYING >>>
257
  st.header("🎬 Cinematic Storyboard & Treatment")
258
  if st.session_state.project_narration_script_text:
259
  with st.expander("πŸ“œ View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")
260
 
261
+ # <<< CHECK THIS KEY FOR THE MAIN LOOP >>>
262
+ if not st.session_state.project_story_treatment_scenes_list:
263
+ st.info("Use the sidebar to generate your cinematic treatment.")
264
  else:
265
+ # (Loop through project_story_treatment_scenes_list and display - ensure all st.session_state access uses corrected keys)
266
+ # ... (The rest of your main content display loop, ensure keys like
267
+ # st.session_state.project_scene_generation_prompts_list[i_loop_main_display]
268
+ # st.session_state.project_generated_assets_info_list[i_loop_main_display]
269
+ # are used correctly)
270
+ for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.project_story_treatment_scenes_list):
271
+ scene_num_for_display = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
272
+ scene_title_for_display_main = scene_content_item_display.get('scene_title', 'Untitled Scene')
273
+ key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_loop_{i_main_loop_content}" # Ensure unique keys
274
+
275
+ if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"🎬 Director Note S{scene_num_for_display}: {scene_content_item_display['director_note']}")
276
  st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}"); main_col_treatment_area, main_col_visual_area = st.columns([0.45, 0.55])
277
 
278
  with main_col_treatment_area:
279
  with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
280
+ st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
 
281
  st.markdown("##### Shot, Pacing & Asset Controls")
282
+ ui_shot_type_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
 
283
  try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
284
  except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
285
  ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
286
+ if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type'] = ui_shot_type_new
287
+ ui_duration_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
288
  ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
289
+ if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs'] = ui_duration_new
290
+ ui_asset_type_override_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
291
  try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
292
  except ValueError: ui_asset_type_idx_val = 0
293
  ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
294
+ if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type'] = ui_asset_type_override_new
295
  st.markdown("---")
296
+ prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else None
297
  if prompt_for_asset_to_display:
298
  with st.popover("πŸ‘οΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
299
+ pexels_query_to_display = scene_content_item_display.get('pexels_search_query_감독', None)
300
  if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")
301
 
302
  with main_col_visual_area:
303
+ current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_generated_assets_info_list) else None
 
304
  if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
305
  path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
306
  if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
 
311
  except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
312
  else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
313
  else:
314
+ if st.session_state.project_story_treatment_scenes_list: # Corrected key
315
+ error_msg_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
316
+ st.caption(error_msg_for_asset_display)
317
 
318
  with st.popover(f"✏️ Edit S{scene_num_for_display} Treatment"):
319
+ feedback_input_for_treatment_regen = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{widget_key_base_main_area}", height=150)
 
320
  if st.button(f"πŸ”„ Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{widget_key_base_main_area}"):
321
+ if feedback_input_for_treatment_regen:
322
  with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
323
+ preserved_user_shot_type = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_shot_type'] # Corrected
324
+ preserved_user_duration = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_scene_duration_secs'] # Corrected
325
+ preserved_user_asset_type = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_selected_asset_type'] # Corrected
326
+ prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_input_for_treatment_regen, st.session_state.project_story_treatment_scenes_list) # Corrected
327
  try:
328
  updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
329
  final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
330
+ final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type
331
+ st.session_state.project_story_treatment_scenes_list[i_loop_main_display] = final_merged_updated_scene_data_pop # Corrected
332
  status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
333
  version_for_regenerated_asset_pop = 1
334
  if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
335
  try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
336
  except: version_for_regenerated_asset_pop = 2
337
+ if generate_asset_for_scene_in_app(i_loop_main_display, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_selected_asset_type_override=preserved_user_asset_type): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
338
  else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
339
  st.rerun()
340
  except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
341
  else: st.warning("Please provide feedback to update the treatment.")
342
 
343
  with st.popover(f"🎨 Edit S{scene_num_for_display} Visual Prompt/Asset"):
344
+ prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_loop_main_display] if i_loop_main_display < len(st.session_state.project_scene_generation_prompts_list) else "No prompt." # Corrected
 
345
  st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
346
+ feedback_for_visual_asset_regen_input = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_{widget_key_base_main_area}", height=150)
347
  if st.button(f"πŸ”„ Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{widget_key_base_main_area}"):
348
  if feedback_for_visual_asset_regen_input:
349
+ with st.status(f"Refining prompt & asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
350
+ user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes_list[i_loop_main_display]['user_selected_asset_type'] # Corrected
351
  is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_감독') == 'video_clip')
352
  newly_constructed_asset_prompt_regen_pop = ""
353
+ if not is_video_type_for_regen_pop:
354
+ gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str) # Corrected
355
+ try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running") # Corrected
356
+ except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz_pop}", exc_info=True); continue
357
+ else:
358
+ newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_loop_main_display] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running") # Corrected
359
+ if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction failed.", state="error"); continue
 
360
  version_for_regenerated_visual_asset_pop = 1
361
  if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
362
  try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
363
  except: version_for_regenerated_visual_asset_pop = 2
364
+ if generate_asset_for_scene_in_app(i_loop_main_display, st.session_state.project_story_treatment_scenes_list[i_loop_main_display], asset_ver_num=version_for_regenerated_visual_asset_pop, user_selected_asset_type_override=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False) # Corrected
365
  else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
366
  st.rerun()
367
  else: st.warning("Please provide feedback for visual asset regeneration.")
368
  st.markdown("---")
369
 
370
+ if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None): # Corrected
371
+ if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_2", type="primary", use_container_width=True):
372
+ with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op:
 
373
  assets_for_final_video_assembly_list_main = []
374
+ for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes_list): # Corrected
375
+ asset_info_current_scene_for_vid = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None # Corrected
376
+ if asset_info_current_scene_for_vid and not asset_info_current_scene_for_vid.get('error') and asset_info_current_scene_for_vid.get('path') and os.path.exists(asset_info_current_scene_for_vid['path']):
377
+ assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid['path'], 'type': asset_info_current_scene_for_vid.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
378
+ status_video_assembly_final_op.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid.get('type')}).")
379
  else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
380
  if assets_for_final_video_assembly_list_main:
381
  status_video_assembly_final_op.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
382
+ st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24) # Corrected
383
+ if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons() # Corrected
384
  else: status_video_assembly_final_op.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
385
  else: status_video_assembly_final_op.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
386
+ elif st.session_state.project_story_treatment_scenes_list: st.info("Generate visual assets for your scenes before attempting to assemble the animatic.") # Corrected
387
 
388
+ if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): # Corrected
389
  st.header("🎬 Generated Cinematic Animatic");
390
  try:
391
+ with open(st.session_state.project_final_video_path, 'rb') as final_video_file_obj_display: final_video_bytes_for_display = final_video_file_obj_display.read() # Corrected
392
  st.video(final_video_bytes_for_display, format="video/mp4")
393
+ st.download_button(label="Download Ultra Animatic", data=final_video_bytes_for_display, file_name=os.path.basename(st.session_state.project_final_video_path), mime="video/mp4", use_container_width=True, key="download_video_main_area_btn_final_unique_2" ) # Corrected
394
  except Exception as e_final_video_display_op_main: st.error(f"Error displaying final animatic video: {e_final_video_display_op_main}"); logger.error(f"Error displaying final animatic video: {e_final_video_display_op_main}", exc_info=True)
395
 
396
  st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")