mgbam commited on
Commit
4afbb07
·
verified ·
1 Parent(s): 6668867

Update core/gemini_handler.py

Browse files
Files changed (1) hide show
  1. core/gemini_handler.py +49 -21
core/gemini_handler.py CHANGED
@@ -1,7 +1,7 @@
1
  # core/gemini_handler.py
2
  import google.generativeai as genai
3
  import json
4
- import re # For cleaning JSON output
5
 
6
  class GeminiHandler:
7
  def __init__(self, api_key):
@@ -9,29 +9,57 @@ class GeminiHandler:
9
  self.model = genai.GenerativeModel('gemini-1.5-flash-latest') # Or your preferred Gemini model
10
 
11
  def _clean_json_response(self, text_response):
12
- # Gemini might sometimes wrap JSON in ```json ... ```
13
- match = re.search(r"```json\s*([\s\S]*?)\s*```", text_response)
14
  if match:
15
- return match.group(1).strip()
16
- return text_response.strip()
 
 
 
17
 
18
- def generate_story_breakdown(self, prompt_text):
19
- try:
20
- response = self.model.generate_content(prompt_text)
21
- cleaned_response = self._clean_json_response(response.text)
22
- # print(f"Debug: Cleaned Story Breakdown JSON: {cleaned_response}") # For debugging
23
- story_scenes = json.loads(cleaned_response)
24
- return story_scenes
25
- except Exception as e:
26
- print(f"Error generating story breakdown: {e}")
27
- print(f"Problematic Gemini Response: {response.text if 'response' in locals() else 'No response object'}")
28
- return None
 
 
 
29
 
30
- def generate_image_prompt(self, prompt_text):
31
  try:
32
  response = self.model.generate_content(prompt_text)
33
- # Image prompts are usually just text, no need for JSON cleaning unless specified
34
- return response.text.strip()
 
 
 
 
 
 
 
 
 
35
  except Exception as e:
36
- print(f"Error generating image prompt: {e}")
37
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # core/gemini_handler.py
2
  import google.generativeai as genai
3
  import json
4
+ import re
5
 
6
  class GeminiHandler:
7
  def __init__(self, api_key):
 
9
  self.model = genai.GenerativeModel('gemini-1.5-flash-latest') # Or your preferred Gemini model
10
 
11
  def _clean_json_response(self, text_response):
12
+ # Attempt to find JSON within backticks or directly
13
+ match = re.search(r"```json\s*([\s\S]*?)\s*```", text_response, re.DOTALL)
14
  if match:
15
+ json_str = match.group(1).strip()
16
+ else:
17
+ # Try to find the start of a list or object
18
+ json_start_list = text_response.find('[')
19
+ json_start_obj = text_response.find('{')
20
 
21
+ if json_start_list != -1 and (json_start_obj == -1 or json_start_list < json_start_obj):
22
+ json_str = text_response[json_start_list:]
23
+ elif json_start_obj != -1:
24
+ json_str = text_response[json_start_obj:]
25
+ else:
26
+ return text_response # Not clearly JSON
27
+
28
+ # Remove trailing characters that might break parsing if JSON is incomplete
29
+ # This is a bit aggressive, might need refinement
30
+ # Find last '}' or ']'
31
+ last_bracket = max(json_str.rfind('}'), json_str.rfind(']'))
32
+ if last_bracket != -1:
33
+ json_str = json_str[:last_bracket+1]
34
+ return json_str.strip()
35
 
36
+ def _execute_gemini_call(self, prompt_text, expect_json=False):
37
  try:
38
  response = self.model.generate_content(prompt_text)
39
+ text_content = response.text
40
+ if expect_json:
41
+ cleaned_response = self._clean_json_response(text_content)
42
+ # print(f"DEBUG: Cleaned JSON for prompt:\n{prompt_text[:200]}...\nResponse:\n{cleaned_response}") # Debug
43
+ return json.loads(cleaned_response)
44
+ return text_content.strip()
45
+ except json.JSONDecodeError as e:
46
+ print(f"Error decoding JSON from Gemini response: {e}")
47
+ print(f"Problematic Gemini Raw Response:\n{text_content if 'text_content' in locals() else 'No response object'}")
48
+ print(f"Cleaned attempt was:\n{cleaned_response if 'cleaned_response' in locals() else 'N/A'}")
49
+ raise # Re-raise to be caught by caller
50
  except Exception as e:
51
+ print(f"Error in Gemini call: {e}")
52
+ print(f"Problematic Gemini Raw Response (if available):\n{response.text if 'response' in locals() else 'No response object'}")
53
+ raise # Re-raise
54
+
55
+ def generate_story_breakdown(self, prompt_text):
56
+ return self._execute_gemini_call(prompt_text, expect_json=True)
57
+
58
+ def generate_image_prompt(self, prompt_text): # This is for generating a new image prompt string
59
+ return self._execute_gemini_call(prompt_text, expect_json=False)
60
+
61
+ def regenerate_scene_script_details(self, prompt_text): # Expects JSON for a single scene
62
+ return self._execute_gemini_call(prompt_text, expect_json=True)
63
+
64
+ def regenerate_image_prompt_from_feedback(self, prompt_text): # Expects a string (the new prompt)
65
+ return self._execute_gemini_call(prompt_text, expect_json=False)