|
import google.generativeai as genai
|
|
|
|
class GoogleGeminiWrapper:
|
|
def __init__(self, api_key: str):
|
|
"""
|
|
Initialize the GoogleGeminiWrapper with the API key.
|
|
|
|
:param api_key: Your Google Gemini API key.
|
|
"""
|
|
self.api_key = api_key
|
|
genai.configure(api_key=self.api_key)
|
|
self.conversation_history = []
|
|
self.chat_session = None
|
|
|
|
def ask(self, prompt: str, model: str = "gemini-2.0-flash", max_tokens: int = 150, temperature: float = 0.7) -> str:
|
|
"""
|
|
Send a prompt to the Google Gemini model and get a response (single turn).
|
|
|
|
:param prompt: The input prompt to send to the model.
|
|
:param model: The model to use (default is "gemini-pro").
|
|
:param max_tokens: The maximum number of tokens to include in the response.
|
|
:param temperature: Sampling temperature (higher values mean more randomness).
|
|
:return: The response from the model as a string.
|
|
"""
|
|
try:
|
|
generation_config = {
|
|
"temperature": temperature,
|
|
"max_output_tokens": max_tokens,
|
|
}
|
|
model_instance = genai.GenerativeModel(model_name=model, generation_config=generation_config)
|
|
response = model_instance.generate_content(prompt)
|
|
return response.text.strip()
|
|
except Exception as e:
|
|
return f"An error occurred: {e}"
|
|
|
|
def start_chat_session(self, model: str = "gemini-2.0-flash", temperature: float = 0.7, max_tokens: int = 150):
|
|
"""
|
|
Starts a new chat session or continues an existing one.
|
|
"""
|
|
generation_config = {
|
|
"temperature": temperature,
|
|
"max_output_tokens": max_tokens,
|
|
}
|
|
model_instance = genai.GenerativeModel(model_name=model, generation_config=generation_config)
|
|
|
|
|
|
|
|
|
|
self.chat_session = model_instance.start_chat(history=self.conversation_history)
|
|
|
|
|
|
def chat(self, prompt: str, model: str = "gemini-2.0-flash", max_tokens: int = 150, temperature: float = 0.7) -> str:
|
|
"""
|
|
Send a prompt to the Google Gemini model, maintaining conversation history for context.
|
|
|
|
:param prompt: The input prompt to send to the model.
|
|
:param model: The model to use (default is "gemini-pro").
|
|
:param max_tokens: The maximum number of tokens to include in the response.
|
|
:param temperature: Sampling temperature (higher values mean more randomness).
|
|
:return: The response from the model as a string.
|
|
"""
|
|
try:
|
|
if self.chat_session is None:
|
|
self.start_chat_session(model=model, temperature=temperature, max_tokens=max_tokens)
|
|
|
|
response = self.chat_session.send_message(prompt)
|
|
assistant_response = response.text.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return assistant_response
|
|
except Exception as e:
|
|
|
|
self.chat_session = None
|
|
return f"An error occurred: {e}"
|
|
|
|
def reset_conversation(self):
|
|
"""
|
|
Reset the conversation history and the chat session.
|
|
"""
|
|
self.conversation_history = []
|
|
self.chat_session = None
|
|
|
|
def list_available_models(self):
|
|
"""
|
|
Lists available Gemini models.
|
|
:return: A list of available models.
|
|
"""
|
|
try:
|
|
print("Available Gemini Models:")
|
|
for m in genai.list_models():
|
|
if 'generateContent' in m.supported_generation_methods:
|
|
print(m.name)
|
|
return [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods]
|
|
except Exception as e:
|
|
return f"An error occurred while listing models: {e}"
|
|
|
|
|
|
if __name__ == "__main__":
|
|
api_key = "AIzaSyBisxoehBz8UF0i9kX42f1V3jp-9RNq04g"
|
|
wrapper = GoogleGeminiWrapper(api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
response_ask = wrapper.ask("What is the largest planet in our solar system?")
|
|
print(f"Ask response: {response_ask}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|