import gradio as gr from transformers import pipeline import torch # --- Configuration --- MODEL_NAME = "finiteautomata/bertweet-base-sentiment-analysis" TITLE = "Instant Sentiment Analysis" DESCRIPTION = """ This demo showcases a sentiment analysis pipeline using the `bertweet-base-sentiment-analysis` model. This model is fine-tuned on tweets, making it particularly effective at understanding informal language, slang, and social media content. Enter any text below to see the model's sentiment prediction (Positive, Negative, or Neutral). """ # --- Pipeline Initialization --- def get_pipeline(): """ Initializes and returns a sentiment-analysis pipeline. Leverages GPU if available. """ return pipeline( "sentiment-analysis", model=MODEL_NAME, device=0 if torch.cuda.is_available() else -1, top_k=None ) # Load the pipeline once at startup sentiment_pipeline = get_pipeline() def analyze_sentiment(text): """ Performs sentiment analysis on the input text and formats the output. """ if not text: return {} results = sentiment_pipeline(text) # The pipeline returns a list of lists; we only need the inner list scores = results[0] # Format for Gradio's Label component return {result['label']: result['score'] for result in scores} # --- Gradio Interface --- iface = gr.Interface( fn=analyze_sentiment, inputs=gr.Textbox( lines=3, label="Input Text", placeholder="Type something here... (e.g., 'I love Hugging Face!')" ), outputs=gr.Label(num_top_classes=3, label="Sentiment Scores"), title=TITLE, description=DESCRIPTION, examples=[ ["I am absolutely thrilled with the new Hugging Face courses!"], ["The documentation was a bit confusing and could be improved."], ["This is a neutral statement about AI development."], ], allow_flagging="never" ) # --- Launch the App --- if __name__ == "__main__": iface.launch()