Spaces:
Running
Running
File size: 1,631 Bytes
8795c64 3c1ef56 8795c64 ed6c508 8795c64 ed6c508 8795c64 ed6c508 8795c64 55792f9 8795c64 b8e2fdf 2b251bc b8e2fdf 8795c64 5213786 579e491 b8e2fdf 579e491 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
from transformers import AutoTokenizer
import torch
from tiny_finbert import TinyFinBERTRegressor, preprocess_texts
import os
import nltk
nltk.download('stopwords')
MODEL_DIR = "./saved_model"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
model = TinyFinBERTRegressor().to(DEVICE)
model.load_state_dict(torch.load(os.path.join(MODEL_DIR, "regressor_model.pt"), map_location=DEVICE))
model.eval()
def predict_sentiment(text):
print(f"[DEBUG] Input text: {text}")
processed = preprocess_texts([text])[0]
print(f"[DEBUG] Processed text: {processed}")
inputs = tokenizer(processed, return_tensors="pt", truncation=True, padding='max_length', max_length=128)
inputs = {k: v.to(DEVICE) for k, v in inputs.items() if k != "token_type_ids"}
with torch.no_grad():
score = model(**inputs)["score"].item()
print(f"[DEBUG] Score: {score}")
if score > 0.3:
interpretation = "positive"
elif score < -0.3:
interpretation = "negative"
else:
interpretation = "neutral"
return round(score, 4), interpretation
iface = gr.Interface(
fn=predict_sentiment,
inputs=gr.Textbox(label="Enter financial sentence"),
outputs=[
gr.Number(label="Sentiment Score"),
gr.Textbox(label="Interpretation")
],
title="TinyFinBERT Sentiment Analysis",
#allow_api=True,
api_name="predict"
)
#iface.launch()
iface.launch(
# server_name="0.0.0.0",
# share=True,
# #enable_queue=False
# max_threads=40,
# show_api=True
) |