Sujata1 / app.py
rahul988394's picture
Update app.py
b9d0a7b verified
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForMaskedLM
import wikipedia
import speech_recognition as sr
# ✅ নতুন Model
MODEL_NAME = "csebuetnlp/banglabert"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForMaskedLM.from_pretrained(MODEL_NAME)
memory = {}
# ✅ Wikipedia search
def search_wiki(query):
try:
result = wikipedia.summary(query, sentences=2)
return result
except:
return None
# ✅ প্রশ্ন-উত্তর সিস্টেম
def generate_response(user_input):
inputs = tokenizer(user_input, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
predicted_index = torch.argmax(logits[0], dim=-1)
decoded = tokenizer.decode(predicted_index)
return decoded.strip()
# ✅ চ্যাটবট লজিক
def sujata_chat(user_input):
if user_input.startswith("শেখাও:"):
try:
parts = user_input.replace("শেখাও:", "").split("=")
q, a = parts[0].strip(), parts[1].strip()
memory[q] = a
return f"✅ শেখা সম্পন্ন:\n❓ {q}\n✅ {a}"
except:
return "ভুল ফরম্যাট। লিখুন: শেখাও: প্রশ্ন = উত্তর"
elif user_input in memory:
return memory[user_input]
elif "কেন" in user_input or "হয়" in user_input:
return "এটি একটি যুক্তির প্রশ্ন। আমি ব্যাখ্যা করতে চেষ্টা করছি..."
result = search_wiki(user_input)
if result:
return result
return generate_response(user_input)
# ✅ ভয়েস টু টেক্সট
def voice_to_text(audio):
recognizer = sr.Recognizer()
with sr.AudioFile(audio) as source:
audio_data = recognizer.record(source)
try:
return recognizer.recognize_google(audio_data, language="bn-BD")
except:
return "আমি বুঝতে পারিনি, আবার বলুন।"
# ✅ UI
with gr.Blocks(css="body {background-color: #121212; color: white;}") as demo:
gr.Markdown("<h2 style='text-align: center; color:white;'>🌙 সুজাতা AI (বাংলা চ্যাটবট)</h2>")
chatbot = gr.Chatbot(label="সুজাতা", bubble_full_width=False)
msg = gr.Textbox(label="✍️ প্রশ্ন করুন", placeholder="এখানে বাংলায় লিখুন...", lines=2)
with gr.Row():
send = gr.Button("📤 পাঠাও")
clear = gr.Button("🗑️ মুছুন")
with gr.Row():
mic_input = gr.Audio(source="microphone", type="filepath", label="🎙️ কণ্ঠস্বর ইনপুট")
mic_btn = gr.Button("🎧 পাঠাও")
def respond(user_message, chat_history):
output = sujata_chat(user_message)
chat_history.append((user_message, output))
return chat_history, ""
def respond_from_voice(audio):
user_text = voice_to_text(audio)
output = sujata_chat(user_text)
return [(user_text, output)]
send.click(respond, [msg, chatbot], [chatbot, msg])
mic_btn.click(respond_from_voice, mic_input, chatbot)
clear.click(lambda: [], None, chatbot)
demo.launch()