AZAA / main.py
rkihacker's picture
Create main.py
bcdcebb verified
raw
history blame
1.81 kB
import requests
API_URL = "https://api.typegpt.net/v1/chat/completions"
API_KEY = "sk-XzS5hhsa3vpIcRLz3prQirBQXOx2hPydPzSpzdRcE1YddnNm"
BACKEND_MODEL = "pixtral-large-latest"
# Define virtual models with system prompts
SYSTEM_PROMPTS = {
"gpt-3.5": "You are ChatGPT, a helpful assistant.",
"claude": "You are Claude, known for being careful, ethical, and thoughtful.",
"mistral": "You are Mistral, known for giving concise and smart answers.",
"bard": "You are Bard, a friendly and knowledgeable Google AI assistant."
}
def generate_response(model_label: str, user_prompt: str):
if model_label not in SYSTEM_PROMPTS:
raise ValueError(f"Unknown model label '{model_label}'. Valid options: {', '.join(SYSTEM_PROMPTS.keys())}")
system_prompt = SYSTEM_PROMPTS[model_label]
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
payload = {
"model": BACKEND_MODEL,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
}
response = requests.post(API_URL, headers=headers, json=payload)
if response.status_code == 200:
result = response.json()
return result["choices"][0]["message"]["content"]
else:
raise Exception(f"API error {response.status_code}: {response.text}")
if __name__ == "__main__":
print("🧠 Choose a virtual model:")
for name in SYSTEM_PROMPTS:
print(f" - {name}")
model = input("Model: ").strip()
print("\nπŸ‘€ Enter your message:")
user_prompt = input("> ").strip()
try:
print("\nπŸ€– AI Response:")
print(generate_response(model, user_prompt))
except Exception as e:
print(f"❌ Error: {e}")