File size: 3,329 Bytes
09f258b
 
c3d0a8f
 
 
3d35e5e
c3d0a8f
3d35e5e
09f258b
 
504389b
09f258b
 
c3d0a8f
3d35e5e
c3d0a8f
 
 
 
09f258b
c3d0a8f
 
 
 
 
 
 
 
 
 
 
 
09f258b
c3d0a8f
 
 
 
 
 
 
 
 
 
 
09f258b
c3d0a8f
 
 
 
 
 
 
 
 
 
 
 
09f258b
c3d0a8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09f258b
 
c3d0a8f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# app.py β€” Financial News + Stock Analyzer with Nous-Hermes-2 (Mistral)

import requests
import pandas as pd
from bs4 import BeautifulSoup
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# βœ… Public, compatible model
model_id = "NousResearch/Nous-Hermes-2-Mistral-7B-DPO"
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_id)

llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)

def run_llm(prompt: str):
    result = llm_pipeline(prompt)[0]["generated_text"]
    return result.replace(prompt, "")

# ----------- Financial News Fetcher -----------
def fetch_financial_news(query="markets", max_articles=3):
    url = f"https://www.google.com/search?q={query}+site:reuters.com&tbm=nws"
    headers = {"User-Agent": "Mozilla/5.0"}
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, "html.parser")
    links = []
    for g in soup.find_all('a', href=True):
        href = g['href']
        if "reuters.com" in href and len(links) < max_articles:
            links.append(href.split("&")[0].replace("/url?q=", ""))
    return links

# ----------- News Article Summarizer -----------
def summarize_news_article(url):
    try:
        r = requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
        soup = BeautifulSoup(r.text, "html.parser")
        paragraphs = soup.find_all('p')
        text = "\n".join(p.get_text() for p in paragraphs[:10])
        prompt = f"You are a financial analyst. Summarize the key points from this article:\n\n{text}\n\nReturn a concise summary suitable for investors."
        return run_llm(prompt)
    except Exception as e:
        return f"Failed to summarize article: {e}"

# ----------- Stock Data Analyzer -----------
def analyze_stock_data(symbol="AAPL"):
    try:
        url = f"https://query1.finance.yahoo.com/v7/finance/download/{symbol}?period1=1682899200&period2=1685577600&interval=1d&events=history"
        df = pd.read_csv(url)
        df["Date"] = pd.to_datetime(df["Date"])
        closing_prices = df[["Date", "Close"]].tail(10)
        data_str = closing_prices.to_string(index=False)
        prompt = f"You're a financial analyst. Given the following recent closing prices of {symbol}, analyze the trend and summarize in plain English:\n\n{data_str}"
        return run_llm(prompt)
    except Exception as e:
        return f"Failed to fetch stock data: {e}"

# ----------- Gradio App Interface -----------
def analyze(query, stock_symbol):
    output = ""
    output += "πŸ“ˆ Fetching Financial News...\n"
    urls = fetch_financial_news(query)
    for url in urls:
        output += f"\nπŸ“° {url}\n"
        output += summarize_news_article(url) + "\n"
    output += "\nπŸ“Š Analyzing Stock Trends...\n"
    output += analyze_stock_data(stock_symbol)
    return output

gr.Interface(
    fn=analyze,
    inputs=[
        gr.Textbox(label="Financial News Topic", value="tech stocks"),
        gr.Textbox(label="Stock Symbol", value="AAPL")
    ],
    outputs=gr.Textbox(label="Financial Summary", lines=20),
    title="🧠 Fingraph β€” Financial Analyst Agent",
    description="Summarizes news + stock trends using Nous-Hermes-2 (Mistral) and LLM insights."
).launch()