File size: 6,747 Bytes
940ba87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from fastapi import FastAPI
from pydantic import BaseModel
from utils import fetch_news, analyze_sentiment, extract_topics, generate_tts
import random

# Set up the FastAPI server with a name and description
app = FastAPI(title="News Sentiment API", description="Analyze news sentiment for companies")

class CompanyInput(BaseModel):
    """Simple model to validate incoming company name."""
    company: str

@app.post("/analyze")
async def analyze_company(input: CompanyInput):
    """Take a company name and return its news sentiment analysis."""
    company = input.company
    articles_data = fetch_news(company)
    if not articles_data:
        return {"error": f"No articles found for {company}. Check logs for details."}

    articles = []
    sentiments = {"Positive": 0, "Negative": 0, "Neutral": 0}
    positive_articles = []
    negative_articles = []
    neutral_articles = []

    for article in articles_data:
        summary = article["summary"].strip() or article["title"].split(" - ")[0].strip()
        source = article["title"].split(" - ")[-1].strip() if " - " in article["title"] else ""
        if source in summary:
            summary = summary.replace(source, "").strip()
        summary = f"{summary.rstrip(' -')} - {source}"

        sentiment = analyze_sentiment(summary)
        topics = extract_topics(summary)
        sentiments[sentiment] += 1

        title = article["title"].split(" - ")[0].strip()
        if sentiment == "Positive":
            positive_articles.append(title)
        elif sentiment == "Negative":
            negative_articles.append(title)
        else:
            neutral_articles.append(title)

        articles.append({
            "Title": article["title"],
            "Summary": summary,
            "Sentiment": sentiment,
            "Topics": topics,
            "Link": article["link"],
            "PubDate": article["pub_date"]
        })

    detailed_comparisons = [f"- News {i + 1} {article['Sentiment'].lower()}ly discusses {', '.join(article['Topics'])}"
                            for i, article in enumerate(articles)]
    dominant_sentiment = max(sentiments, key=sentiments.get)
    trends = f"{company} News Trends: {dominant_sentiment}"

    total_articles = sum(sentiments.values())
    sentiment_count = f"{sentiments['Positive']} positive, {sentiments['Negative']} negative, {sentiments['Neutral']} neutral"

    intro_phrases = [
        f"Spanning {total_articles} recent reports, the narrative surrounding {company} tilts {dominant_sentiment.lower()}, with {sentiment_count}.",
        f"Across {total_articles} articles in recent coverage, {company}’s story emerges as predominantly {dominant_sentiment.lower()}, reflecting {sentiment_count}.",
        f"Drawing from {total_articles} latest publications, {company}’s news landscape leans {dominant_sentiment.lower()}, underscored by {sentiment_count}."
    ]
    positive_phrases = [
        f"With {len(positive_articles)} favorable accounts, {company} demonstrates notable progress, exemplified by '{random.choice(positive_articles) if positive_articles else 'no specific examples available'}'.",
        f"Boasting {len(positive_articles)} positive developments, {company} showcases strength, as evidenced in '{random.choice(positive_articles) if positive_articles else 'no notable instances'}'.",
        f"Highlighted by {len(positive_articles)} encouraging reports, {company} is forging ahead, with '{random.choice(positive_articles) if positive_articles else 'no standout reports'}' standing out."
    ]
    negative_phrases = [
        f"However, {len(negative_articles)} troubling narratives raise concerns, including '{random.choice(negative_articles) if negative_articles else 'no specific concerns noted'}'.",
        f"Yet, {len(negative_articles)} adverse reports signal challenges, such as '{random.choice(negative_articles) if negative_articles else 'no highlighted issues'}'.",
        f"Nevertheless, {len(negative_articles)} concerning stories cast a shadow, notably '{random.choice(negative_articles) if negative_articles else 'no notable setbacks'}'."
    ]
    neutral_phrases = [
        f"Additionally, {len(neutral_articles)} impartial updates provide context, such as '{random.choice(neutral_articles) if neutral_articles else 'no neutral updates available'}'.",
        f"Meanwhile, {len(neutral_articles)} balanced accounts offer insight, including '{random.choice(neutral_articles) if neutral_articles else 'no balanced reports'}'.",
        f"Furthermore, {len(neutral_articles)} objective pieces contribute details, like '{random.choice(neutral_articles) if neutral_articles else 'no objective details'}'."
    ]
    outlook_phrases_positive = [
        f"In summary, {company} appears poised for a favorable trajectory.",
        f"All told, {company} stands on the cusp of a promising future.",
        f"Ultimately, {company} is positioned for an optimistic course ahead."
    ]
    outlook_phrases_negative = [
        f"In conclusion, {company} confronts a challenging path forward.",
        f"Overall, {company} navigates a formidable road ahead.",
        f"To conclude, {company} faces a demanding horizon."
    ]
    outlook_phrases_mixed = [
        f"In the final analysis, {company} balances opportunity and uncertainty.",
        f"On balance, {company} presents a complex outlook moving forward.",
        f"Ultimately, {company} reflects a blend of prospects and hurdles."
    ]

    final_text = random.choice(intro_phrases) + " "
    if positive_articles:
        final_text += random.choice(positive_phrases) + " "
    if negative_articles:
        final_text += random.choice(negative_phrases) + " "
    if neutral_articles:
        final_text += random.choice(neutral_phrases) + " "
    if sentiments["Positive"] > sentiments["Negative"]:
        final_text += random.choice(outlook_phrases_positive)
    elif sentiments["Negative"] > sentiments["Positive"]:
        final_text += random.choice(outlook_phrases_negative)
    else:
        final_text += random.choice(outlook_phrases_mixed)

    print(f"Generated dynamic final sentiment for {company}: {final_text}")

    return {
        "Company": company,
        "Articles": articles,
        "Comparative Sentiment Score": {
            "Sentiment Distribution": f"Positive: {sentiments['Positive']}, Negative: {sentiments['Negative']}, Neutral: {sentiments['Neutral']}",
            "Trends": trends,
            "Detailed Comparisons": "\n".join(detailed_comparisons)
        },
        "Final Sentiment Analysis": final_text.strip(),
        "Audio": None
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)  # Start the API server on port 8000