Spaces:
Running
Running
File size: 4,529 Bytes
75e2b6c fe1a3c4 75e2b6c fe1a3c4 75e2b6c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import json
from google import genai
from dotenv import load_dotenv
import os
from google import genai
from google.genai import types
import re
from g4f.client import Client
from google.genai.types import GenerateContentConfig, HttpOptions
load_dotenv()
class Model:
def __init__(self):
self.gemini_api_key = os.getenv("GEMINI_API_KEY")
self.gemini_model = os.getenv("GEMINI_MODEL")
self.client = genai.Client(api_key=self.gemini_api_key)
def fall_back_llm(self, prompt):
"""Fallback method using gpt-4o-mini when Gemini fails"""
try:
response = Client().chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}],
web_search=False
)
return response.choices[0].message.content
except Exception as e:
return f"Both primary and fallback models failed. Error: {str(e)}"
def send_message_openrouter(self, prompt):
try:
response = self.client.models.generate_content(
model=self.gemini_model,
contents=prompt
)
return response.text
except Exception as e:
print(f"Gemini failed: {str(e)}. Trying fallback model...")
return self.fall_back_llm(prompt)
def llm(self, prompt, query):
try:
combined_content = f"{prompt}\n\n{query}"
response = self.client.models.generate_content(
model=self.gemini_model,
contents=combined_content,
config=GenerateContentConfig(
system_instruction=[
"You're a Mr DermaAI a friendly AI based Dermatologist.",
"Your mission is to help people based on user queries.",
]
),
)
return response.text
except Exception as e:
print(f"Gemini failed: {str(e)}. Trying fallback model...")
return self.fall_back_llm(f"{prompt}\n\n{query}")
def llm_image(self, text, image):
try:
response = self.client.models.generate_content(
model=self.gemini_model,
contents=[image, text],
)
return response.text
except Exception as e:
print(f"Error in llm_image: {str(e)}")
return f"Error: {str(e)}"
def clean_json_response(self, response_text):
"""Clean the model's response to extract valid JSON."""
start = response_text.find('[')
end = response_text.rfind(']') + 1
if start != -1 and end != -1:
json_str = re.sub(r",\s*]", "]", response_text[start:end])
return json_str
return response_text
def skinScheduler(self, prompt, max_retries=3):
"""Generate a skincare schedule with retries and cleaning."""
for attempt in range(max_retries):
try:
response = self.client.models.generate_content(
model=self.gemini_model,
contents=prompt
)
cleaned_response = self.clean_json_response(response.text)
return json.loads(cleaned_response)
except json.JSONDecodeError as je:
if attempt == max_retries - 1:
# If all Gemini retries fail, try fallback model
print(f"Gemini failed to produce valid JSON after {max_retries} retries. Trying fallback model...")
fallback_response = self.fall_back_llm(prompt)
try:
cleaned_fallback = self.clean_json_response(fallback_response)
return json.loads(cleaned_fallback)
except json.JSONDecodeError:
return {"error": f"Both models failed to produce valid JSON"}
except Exception as e:
# For other exceptions, go directly to fallback
print(f"Gemini API Error: {str(e)}. Trying fallback model...")
fallback_response = self.fall_back_llm(prompt)
try:
cleaned_fallback = self.clean_json_response(fallback_response)
return json.loads(cleaned_fallback)
except json.JSONDecodeError:
return {"error": "Both models failed to produce valid JSON"}
return {"error": "Max retries reached"} |