Spaces:
Running
Running
File size: 1,814 Bytes
5602f0b cf53d89 5602f0b cf53d89 5602f0b cf53d89 5602f0b cf53d89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from deep_translator import GoogleTranslator
from langdetect import detect
import torch
import os
MODEL_NAME = "cardiffnlp/twitter-roberta-base-sentiment"
MODEL_DIR = "model"
# Download model if not present
if not os.path.exists(MODEL_DIR) or not os.listdir(MODEL_DIR):
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
tokenizer.save_pretrained(MODEL_DIR)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
model.save_pretrained(MODEL_DIR)
else:
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_DIR)
emotion_labels = {
0: "Negative π",
1: "Neutral π",
2: "Positive π"
}
translator = GoogleTranslator(source='auto', target='en')
def predict_emotion(text):
detected_language = detect(text)
if detected_language != 'en':
translated_text = translator.translate(text)
else:
translated_text = text
inputs = tokenizer(translated_text, return_tensors="pt", truncation=True, padding=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_class = torch.argmax(logits, dim=-1).item()
emotion = emotion_labels.get(predicted_class, "Unknown")
return emotion
iface = gr.Interface(
fn=predict_emotion,
inputs=gr.Textbox(lines=2, placeholder="Enter text here...", label="Input Text"),
outputs=[
gr.Textbox(label="Predicted Sentiment")
],
title="Emotion Detection App",
description="Enter text in any language. The app will detect the language, translate if needed, and predict the emotion."
)
if __name__ == "__main__":
iface.launch(share=False) |