Update core/gemini_handler.py
Browse files- core/gemini_handler.py +37 -0
core/gemini_handler.py
CHANGED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# core/gemini_handler.py
|
2 |
+
import google.generativeai as genai
|
3 |
+
import json
|
4 |
+
import re # For cleaning JSON output
|
5 |
+
|
6 |
+
class GeminiHandler:
|
7 |
+
def __init__(self, api_key):
|
8 |
+
genai.configure(api_key=api_key)
|
9 |
+
self.model = genai.GenerativeModel('gemini-1.5-flash-latest') # Or your preferred Gemini model
|
10 |
+
|
11 |
+
def _clean_json_response(self, text_response):
|
12 |
+
# Gemini might sometimes wrap JSON in ```json ... ```
|
13 |
+
match = re.search(r"```json\s*([\s\S]*?)\s*```", text_response)
|
14 |
+
if match:
|
15 |
+
return match.group(1).strip()
|
16 |
+
return text_response.strip()
|
17 |
+
|
18 |
+
def generate_story_breakdown(self, prompt_text):
|
19 |
+
try:
|
20 |
+
response = self.model.generate_content(prompt_text)
|
21 |
+
cleaned_response = self._clean_json_response(response.text)
|
22 |
+
# print(f"Debug: Cleaned Story Breakdown JSON: {cleaned_response}") # For debugging
|
23 |
+
story_scenes = json.loads(cleaned_response)
|
24 |
+
return story_scenes
|
25 |
+
except Exception as e:
|
26 |
+
print(f"Error generating story breakdown: {e}")
|
27 |
+
print(f"Problematic Gemini Response: {response.text if 'response' in locals() else 'No response object'}")
|
28 |
+
return None
|
29 |
+
|
30 |
+
def generate_image_prompt(self, prompt_text):
|
31 |
+
try:
|
32 |
+
response = self.model.generate_content(prompt_text)
|
33 |
+
# Image prompts are usually just text, no need for JSON cleaning unless specified
|
34 |
+
return response.text.strip()
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error generating image prompt: {e}")
|
37 |
+
return None
|