Spaces:
Running
Running
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
import torch | |
# Agent 1: Intent Classifier | |
intent_classifier = pipeline("text-classification", model="MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli") | |
def detect_intent(text): | |
labels = { | |
"weather": "The user wants to know the weather.", | |
"faq": "The user is asking for help.", | |
"smalltalk": "The user is making casual conversation." | |
} | |
scores = {} | |
for label, hypothesis in labels.items(): | |
result = intent_classifier({"premise": text, "hypothesis": hypothesis})[0] | |
scores[label] = result['score'] if result['label'] == 'ENTAILMENT' else 0 | |
return max(scores, key=scores.get) | |
# Agent 2: Domain Logic | |
def handle_logic(intent): | |
if intent == "weather": | |
return "It’s currently sunny and 26°C." | |
elif intent == "faq": | |
return "Please click on 'Forgot Password' to reset it." | |
else: | |
return "Haha, that’s funny! Tell me more." | |
# Agent 3: NLG with DialoGPT | |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") | |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") | |
def generate_reply(text): | |
input_ids = tokenizer.encode(text + tokenizer.eos_token, return_tensors='pt') | |
output = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id) | |
reply = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True) | |
return reply | |
# Combined chatbot | |
def chatbot(user_input): | |
intent = detect_intent(user_input) | |
logic = handle_logic(intent) | |
reply = generate_reply(logic) | |
return reply | |
# Gradio interface | |
uiface = gr.Interface(fn=chatbot, | |
inputs=gr.Textbox(lines=2, placeholder="Type your message..."), | |
outputs="text", | |
title="Three-Agent Hugging Face Chatbot", | |
description="Intent detection + domain logic + natural generation") | |
uiface.launch() | |