Spaces:
Sleeping
Sleeping
File size: 1,211 Bytes
1bbddd0 9e8cb4f c11b701 9e8cb4f c11b701 9e8cb4f c11b701 9e8cb4f 1bbddd0 c11b701 1bbddd0 c11b701 9e8cb4f c11b701 1bbddd0 9e8cb4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
import requests
import os
def chat(message, history):
API_URL = "https://api-inference.huggingface.co/models/bitext/Mistral-7B-Customer-Support"
headers = {"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"}
# Format the message for the API
payload = {"inputs": message}
try:
response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
if response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
return result[0]["generated_text"]
else:
return "Sorry, I couldn't generate a response."
else:
return f"API Error: {response.status_code}"
except Exception as e:
return f"Error: {str(e)}"
# Create Gradio interface
demo = gr.ChatInterface(
fn=chat,
title="AI Customer Service Chatbot",
description="Powered by Mistral-7B Customer Support (via Hugging Face API)",
examples=[
["How can I reset my password?"],
["What are your return policies?"],
["I need help with my order"],
["What payment methods do you accept?"]
]
)
demo.launch() |