Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
from huggingface_hub import InferenceClient | |
def query_huggingface(api_key, prompt): | |
url = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" | |
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} | |
data = {"inputs": prompt, "parameters": {"max_tokens": 500}} | |
try: | |
response = requests.post(url, headers=headers, json=data) | |
response_json = response.json() | |
return response_json[0]["generated_text"] if isinstance(response_json, list) else "Error: Unexpected response format" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def query_huggingfacehub(api_key, prompt): | |
client = InferenceClient(token=api_key) | |
try: | |
completion = client.text_generation("mistralai/Mistral-7B-Instruct-v0.3", prompt, max_new_tokens=500) | |
return completion if completion else "Error: No response" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def chatbot(api_key, user_input, advanced_reasoning, chat_history): | |
if not api_key: | |
return "Error: API Key is required.", chat_history | |
if user_input: | |
chat_history += f"You: {user_input}\n" | |
if advanced_reasoning: | |
ai_response = query_huggingfacehub(api_key, user_input) | |
else: | |
ai_response = query_huggingface(api_key, user_input) | |
chat_history += f"VarunGPT: {ai_response}\n" | |
return ai_response, chat_history | |
return "", chat_history | |
def clear_chat(): | |
return "" | |
gr.Interface( | |
fn=chatbot, | |
inputs=[gr.Textbox(label="Enter API Key", type="password"), | |
gr.Textbox(label="Ask anything..."), | |
gr.Checkbox(label="Enable Advanced Reasoning"), | |
gr.State("")], | |
outputs=[gr.Textbox(label="VarunGPT Response"), | |
gr.State("")], | |
title="🤖 VarunGPT - AI Chatbot", | |
description="A simple AI chatbot powered by Hugging Face models with chat history.", | |
live=True, | |
).launch() | |