Spaces:
Running
Running
import json | |
from google import genai | |
from dotenv import load_dotenv | |
import os | |
from google import genai | |
from google.genai import types | |
import re | |
from g4f.client import Client | |
from google.genai.types import GenerateContentConfig, HttpOptions | |
load_dotenv() | |
class Model: | |
def __init__(self): | |
self.gemini_api_key = os.getenv("GEMINI_API_KEY") | |
self.gemini_model = os.getenv("GEMINI_MODEL") | |
self.client = genai.Client(api_key=self.gemini_api_key) | |
def fall_back_llm(self, prompt): | |
"""Fallback method using gpt-4o-mini when Gemini fails""" | |
try: | |
response = Client().chat.completions.create( | |
model="gpt-4o-mini", | |
messages=[{"role": "user", "content": prompt}], | |
web_search=False | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"Both primary and fallback models failed. Error: {str(e)}" | |
def send_message_openrouter(self, prompt): | |
try: | |
response = self.client.models.generate_content( | |
model=self.gemini_model, | |
contents=prompt | |
) | |
return response.text | |
except Exception as e: | |
print(f"Gemini failed: {str(e)}. Trying fallback model...") | |
return self.fall_back_llm(prompt) | |
def llm(self, prompt, query): | |
try: | |
combined_content = f"{prompt}\n\n{query}" | |
response = self.client.models.generate_content( | |
model=self.gemini_model, | |
contents=combined_content, | |
config=GenerateContentConfig( | |
system_instruction=[ | |
"You're a Mr DermaAI a friendly AI based Dermatologist.", | |
"Your mission is to help people based on user queries.", | |
] | |
), | |
) | |
return response.text | |
except Exception as e: | |
print(f"Gemini failed: {str(e)}. Trying fallback model...") | |
return self.fall_back_llm(f"{prompt}\n\n{query}") | |
def llm_image(self, text, image): | |
try: | |
response = self.client.models.generate_content( | |
model=self.gemini_model, | |
contents=[image, text], | |
) | |
return response.text | |
except Exception as e: | |
print(f"Error in llm_image: {str(e)}") | |
return f"Error: {str(e)}" | |
def clean_json_response(self, response_text): | |
"""Clean the model's response to extract valid JSON.""" | |
start = response_text.find('[') | |
end = response_text.rfind(']') + 1 | |
if start != -1 and end != -1: | |
json_str = re.sub(r",\s*]", "]", response_text[start:end]) | |
return json_str | |
return response_text | |
def skinScheduler(self, prompt, max_retries=3): | |
"""Generate a skincare schedule with retries and cleaning.""" | |
for attempt in range(max_retries): | |
try: | |
response = self.client.models.generate_content( | |
model=self.gemini_model, | |
contents=prompt | |
) | |
cleaned_response = self.clean_json_response(response.text) | |
return json.loads(cleaned_response) | |
except json.JSONDecodeError as je: | |
if attempt == max_retries - 1: | |
# If all Gemini retries fail, try fallback model | |
print(f"Gemini failed to produce valid JSON after {max_retries} retries. Trying fallback model...") | |
fallback_response = self.fall_back_llm(prompt) | |
try: | |
cleaned_fallback = self.clean_json_response(fallback_response) | |
return json.loads(cleaned_fallback) | |
except json.JSONDecodeError: | |
return {"error": f"Both models failed to produce valid JSON"} | |
except Exception as e: | |
# For other exceptions, go directly to fallback | |
print(f"Gemini API Error: {str(e)}. Trying fallback model...") | |
fallback_response = self.fall_back_llm(prompt) | |
try: | |
cleaned_fallback = self.clean_json_response(fallback_response) | |
return json.loads(cleaned_fallback) | |
except json.JSONDecodeError: | |
return {"error": "Both models failed to produce valid JSON"} | |
return {"error": "Max retries reached"} |