File size: 2,523 Bytes
a51d3d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# app.py (Fast + Enhanced)

import gradio as gr
from duckduckgo_search import DDGS
from transformers import pipeline
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor

# Load faster summarizer
summarizer = pipeline("summarization", model="philschmid/bart-large-cnn-samsum")

# Web search
def search_web(query, num_results=3):
    with DDGS() as ddgs:
        results = [r for r in ddgs.text(query, max_results=num_results)]
    return results

# Scrape and summarize
def fetch_summary(url):
    try:
        res = requests.get(url, timeout=5)
        soup = BeautifulSoup(res.text, 'html.parser')
        paragraphs = soup.find_all('p')
        text = ' '.join([p.get_text() for p in paragraphs])
        text = text.strip().replace('\n', ' ')[:1500]  # Limit to 1500 characters
        if len(text) < 100:
            return None, None
        summary = summarizer(text, max_length=100, min_length=30, do_sample=False)
        return summary[0]['summary_text'], url
    except:
        return None, None

# Use-case generation
def generate_use_cases(insights, company, industry):
    prompt = f"Based on these insights about {company} in the {industry} sector, list 3 practical AI use cases:\n\n{insights}"
    summary = summarizer(prompt, max_length=120, min_length=40, do_sample=False)
    return summary[0]['summary_text']

# Main pipeline
def process(company, industry):
    query = f"{company} {industry} 2025 trends"
    results = search_web(query, num_results=3)

    # Parallel scrape/summarize
    with ThreadPoolExecutor(max_workers=3) as executor:
        summaries = list(executor.map(lambda r: fetch_summary(r['href']), results))

    valid = [(s, u) for s, u in summaries if s and u]
    all_summaries = '\n'.join([s for s, _ in valid])
    references = '\n'.join([f"- [{u}]({u})" for _, u in valid])

    use_cases = generate_use_cases(all_summaries, company, industry)

    return all_summaries.strip(), use_cases.strip(), references.strip()
    
# Gradio UI
demo = gr.Interface(
    fn=process,
    inputs=[gr.Textbox(label="Company Name"), gr.Textbox(label="Industry")],
    outputs=[
        gr.Textbox(label="Summarized Insights"),
        gr.Textbox(label="AI Use Cases"),
        gr.Markdown(label="Reference Links")
    ],
    title="⚡ AI Insight Generator (Fast & Free)",
    description="Enter a company and industry to get real-time insights and AI business use cases—faster and without API keys!"
)

if __name__ == "__main__":
    demo.launch()