Spaces:
Sleeping
Sleeping
import streamlit as st | |
from serpapi import GoogleSearch | |
# SerpAPI API Key | |
API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270" | |
# Function to fetch news articles using SerpAPI | |
def fetch_news_serpapi(): | |
st.info("Fetching latest news on waste management using SerpAPI...") | |
search = GoogleSearch({ | |
"q": "waste management India", | |
"tbm": "nws", | |
"api_key": API_KEY | |
}) | |
results = search.get_dict() | |
if "news_results" in results: | |
news = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]] | |
return news | |
else: | |
return [] | |
# Function to fetch hackathons and webinars using SerpAPI | |
def fetch_hackathons_serpapi(): | |
st.info("Fetching hackathons and webinars related to waste management using SerpAPI...") | |
search = GoogleSearch({ | |
"q": "waste management hackathon OR webinar", | |
"tbm": "nws", | |
"api_key": API_KEY | |
}) | |
results = search.get_dict() | |
if "news_results" in results: | |
hackathons = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]] | |
return hackathons | |
else: | |
return [] | |
# Function to fetch government initiatives using SerpAPI | |
def fetch_government_initiatives_serpapi(): | |
st.info("Fetching Indian government initiatives on waste management using SerpAPI...") | |
search = GoogleSearch({ | |
"q": "Indian government waste management initiatives", | |
"tbm": "nws", | |
"api_key": API_KEY | |
}) | |
results = search.get_dict() | |
if "news_results" in results: | |
initiatives = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]] | |
return initiatives | |
else: | |
return [] | |
# Streamlit app layout | |
def main(): | |
st.set_page_config(page_title="BinSight", layout="wide") | |
st.title("π BinSight - Waste Management & Education") | |
st.markdown( | |
""" | |
Welcome to **BinSight**! This platform provides real-time updates on: | |
- π° Current news on waste management. | |
- π‘ Hackathons and webinars related to waste management. | |
- π Indian government initiatives. | |
""" | |
) | |
# Tabs for better UI | |
tab1, tab2, tab3 = st.tabs(["π° News", "π‘ Hackathons/Webinars", "π Govt Initiatives"]) | |
# Tab 1: News | |
with tab1: | |
news = fetch_news_serpapi() | |
if news: | |
for item in news: | |
st.markdown(f"[{item['title']}]({item['link']})") | |
else: | |
st.warning("No news articles found.") | |
# Tab 2: Hackathons/Webinars | |
with tab2: | |
hackathons = fetch_hackathons_serpapi() | |
if hackathons: | |
for event in hackathons: | |
st.markdown(f"[{event['title']}]({event['link']})") | |
else: | |
st.warning("No hackathons or webinars found.") | |
# Tab 3: Govt Initiatives | |
with tab3: | |
initiatives = fetch_government_initiatives_serpapi() | |
if initiatives: | |
for initiative in initiatives: | |
st.markdown(f"[{initiative['title']}]({initiative['link']})") | |
else: | |
st.warning("No government initiatives found.") | |
# Footer | |
st.sidebar.title("About BinSight") | |
st.sidebar.info( | |
""" | |
BinSight is an initiative to educate people about waste management and to connect | |
them with events, news, and government programs to make our planet sustainable. | |
""" | |
) | |
# Back button to redirect to dashboard | |
st.markdown("<br>", unsafe_allow_html=True) | |
st.markdown("<a href='https://binsight.onrender.com/dashboard.html' target='_self' style='text-decoration:none;'><button style='padding: 10px 20px; font-size: 16px;'>β¬ Back to Dashboard</button></a>", unsafe_allow_html=True) | |
if __name__ == "__main__": | |
main() | |
# Best version without backbutton | |
# import streamlit as st | |
# from serpapi import GoogleSearch | |
# # SerpAPI API Key | |
# API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270" | |
# # Function to fetch news articles using SerpAPI | |
# def fetch_news_serpapi(): | |
# st.info("Fetching latest news on waste management using SerpAPI...") | |
# search = GoogleSearch({ | |
# "q": "waste management India", | |
# "tbm": "nws", | |
# "api_key": API_KEY | |
# }) | |
# results = search.get_dict() | |
# if "news_results" in results: | |
# news = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]] | |
# return news | |
# else: | |
# return [] | |
# # Function to fetch hackathons and webinars using SerpAPI | |
# def fetch_hackathons_serpapi(): | |
# st.info("Fetching hackathons and webinars related to waste management using SerpAPI...") | |
# search = GoogleSearch({ | |
# "q": "waste management hackathon OR webinar", | |
# "tbm": "nws", | |
# "api_key": API_KEY | |
# }) | |
# results = search.get_dict() | |
# if "news_results" in results: | |
# hackathons = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]] | |
# return hackathons | |
# else: | |
# return [] | |
# # Function to fetch government initiatives using SerpAPI | |
# def fetch_government_initiatives_serpapi(): | |
# st.info("Fetching Indian government initiatives on waste management using SerpAPI...") | |
# search = GoogleSearch({ | |
# "q": "Indian government waste management initiatives", | |
# "tbm": "nws", | |
# "api_key": API_KEY | |
# }) | |
# results = search.get_dict() | |
# if "news_results" in results: | |
# initiatives = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]] | |
# return initiatives | |
# else: | |
# return [] | |
# # Streamlit app layout | |
# def main(): | |
# st.set_page_config(page_title="BinSight", layout="wide") | |
# st.title("π BinSight - Waste Management & Education") | |
# st.markdown( | |
# """ | |
# Welcome to **BinSight**! This platform provides real-time updates on: | |
# - π° Current news on waste management. | |
# - π‘ Hackathons and webinars related to waste management. | |
# - π Indian government initiatives. | |
# """ | |
# ) | |
# # Tabs for better UI | |
# tab1, tab2, tab3 = st.tabs(["π° News", "π‘ Hackathons/Webinars", "π Govt Initiatives"]) | |
# # Tab 1: News | |
# with tab1: | |
# news = fetch_news_serpapi() | |
# if news: | |
# for item in news: | |
# st.markdown(f"[{item['title']}]({item['link']})") | |
# else: | |
# st.warning("No news articles found.") | |
# # Tab 2: Hackathons/Webinars | |
# with tab2: | |
# hackathons = fetch_hackathons_serpapi() | |
# if hackathons: | |
# for event in hackathons: | |
# st.markdown(f"[{event['title']}]({event['link']})") | |
# else: | |
# st.warning("No hackathons or webinars found.") | |
# # Tab 3: Govt Initiatives | |
# with tab3: | |
# initiatives = fetch_government_initiatives_serpapi() | |
# if initiatives: | |
# for initiative in initiatives: | |
# st.markdown(f"[{initiative['title']}]({initiative['link']})") | |
# else: | |
# st.warning("No government initiatives found.") | |
# # Footer | |
# st.sidebar.title("About BinSight") | |
# st.sidebar.info( | |
# """ | |
# BinSight is an initiative to educate people about waste management and to connect | |
# them with events, news, and government programs to make our planet sustainable. | |
# """ | |
# ) | |
# if __name__ == "__main__": | |
# main() | |
# import streamlit as st | |
# import requests | |
# from bs4 import BeautifulSoup | |
# import json | |
# from serpapi import GoogleSearch | |
# # Constants for Google Search API (serpapi) | |
# SERPAPI_API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270" | |
# # Fetch the latest news on waste management | |
# def fetch_news(): | |
# st.info("Fetching the latest news on waste management...") | |
# try: | |
# url = "https://news.google.com/rss/search?q=waste+management" | |
# response = requests.get(url, timeout=10) | |
# response.raise_for_status() | |
# soup = BeautifulSoup(response.content, "xml") | |
# articles = [entry.title.text for entry in soup.find_all("item")] | |
# if not articles: | |
# raise ValueError("No news articles found.") | |
# return articles | |
# except Exception as e: | |
# st.warning(f"Error fetching news: {e}") | |
# st.info("Searching for waste management news...") | |
# return fetch_from_google("waste management news") | |
# # Fetch upcoming webinars and hackathons | |
# def fetch_webinars_and_hackathons(): | |
# st.info("Fetching upcoming webinars and hackathons on waste management...") | |
# try: | |
# url = "https://www.google.com/search?q=waste+management+webinars+hackathons" | |
# response = requests.get(url, timeout=10) | |
# response.raise_for_status() | |
# soup = BeautifulSoup(response.content, "html.parser") | |
# webinars = [item.text for item in soup.find_all("h3", limit=5)] | |
# if not webinars: | |
# raise ValueError("No webinars or hackathons found.") | |
# return webinars | |
# except Exception as e: | |
# st.warning(f"Error fetching webinars: {e}") | |
# st.info("Searching for webinars and hackathons...") | |
# return fetch_from_google("waste management webinars hackathons") | |
# # Fetch Indian government initiatives related to waste management | |
# def fetch_government_initiatives(): | |
# st.info("Fetching Indian government initiatives on waste management...") | |
# try: | |
# url = "https://swachhbharat.mygov.in/" | |
# response = requests.get(url, timeout=10) | |
# response.raise_for_status() | |
# soup = BeautifulSoup(response.content, "html.parser") | |
# initiatives = [item.text.strip() for item in soup.find_all("h2", limit=5)] | |
# if not initiatives: | |
# raise ValueError("No government initiatives found.") | |
# return initiatives | |
# except Exception as e: | |
# st.warning(f"Error fetching government initiatives: {e}") | |
# st.info("Searching for government initiatives on waste management...") | |
# return fetch_from_google("Indian government initiatives waste management") | |
# # Search using Google API (serpapi) for news, webinars, or government initiatives | |
# def fetch_from_google(query): | |
# params = { | |
# "q": query, | |
# "api_key": SERPAPI_API_KEY, | |
# "engine": "google", | |
# } | |
# search = GoogleSearch(params) | |
# results = search.get_dict() | |
# if 'organic_results' not in results: | |
# return [f"No results found for '{query}'"] | |
# data = [result['title'] for result in results['organic_results']] | |
# return data if data else [f"No results found for '{query}'"] | |
# # Main function to organize everything | |
# def main(): | |
# st.title("BinSight - Waste Management News, Webinars, and Initiatives") | |
# # Display News | |
# st.subheader("Latest News on Waste Management") | |
# news = fetch_news() | |
# for item in news: | |
# st.write(f"- {item}") | |
# # Display Webinars & Hackathons | |
# st.subheader("Upcoming Webinars & Hackathons") | |
# webinars = fetch_webinars_and_hackathons() | |
# for item in webinars: | |
# st.write(f"- {item}") | |
# # Display Government Initiatives | |
# st.subheader("Indian Government Initiatives on Waste Management") | |
# initiatives = fetch_government_initiatives() | |
# for item in initiatives: | |
# st.write(f"- {item}") | |
# # Run the Streamlit app | |
# if __name__ == "__main__": | |
# main() | |
# import streamlit as st | |
# import requests | |
# from bs4 import BeautifulSoup | |
# import pandas as pd | |
# # Function to fetch news articles | |
# # Function to fetch news articles | |
# def fetch_news(): | |
# st.info("Fetching latest news on waste management...") | |
# url = "https://news.google.com/rss/search?q=waste+management+india" | |
# response = requests.get(url) | |
# soup = BeautifulSoup(response.content, "lxml-xml") # Use lxml-xml parser | |
# articles = soup.find_all("item")[:5] | |
# news = [{"title": item.title.text, "link": item.link.text} for item in articles] | |
# return news | |
# # Function to fetch hackathons/webinars | |
# def fetch_hackathons(): | |
# st.info("Fetching hackathons and webinars related to waste management...") | |
# url = "https://www.eventbrite.com/d/online/environment--conferences/" | |
# response = requests.get(url) | |
# soup = BeautifulSoup(response.text, "html.parser") | |
# events = soup.find_all("div", {"class": "search-event-card-wrapper"})[:5] | |
# hackathons = [] | |
# for event in events: | |
# title = event.find("div", {"class": "eds-event-card__formatted-name--is-clamped"}).text | |
# link = event.find("a")["href"] | |
# hackathons.append({"title": title, "link": link}) | |
# return hackathons | |
# def fetch_government_initiatives(): | |
# st.info("Fetching Indian government initiatives on waste management...") | |
# # Alternative sources for government initiatives | |
# urls = [ | |
# "https://mohua.gov.in/", | |
# "https://sbmurban.org/", | |
# ] | |
# initiatives = [] | |
# for url in urls: | |
# try: | |
# response = requests.get(url, timeout=10) | |
# response.raise_for_status() | |
# soup = BeautifulSoup(response.content, "html.parser") | |
# # Example: Adjust parsing logic based on the website's structure | |
# initiatives.extend([item.text.strip() for item in soup.find_all("h2", limit=5)]) | |
# except requests.exceptions.RequestException as e: | |
# st.warning(f"Could not fetch data from {url}: {e}") | |
# if not initiatives: | |
# st.error("No government initiatives found.") | |
# return ["No data available."] | |
# return initiatives | |
# # Streamlit app layout | |
# def main(): | |
# st.set_page_config(page_title="BinSight", layout="wide") | |
# st.title("π BinSight - Waste Management & Education") | |
# st.markdown( | |
# """ | |
# Welcome to **BinSight**! This platform provides real-time updates on: | |
# - π° Current news on waste management. | |
# - π‘ Hackathons and webinars related to waste management. | |
# - π Indian government initiatives. | |
# """ | |
# ) | |
# # Tabs for better UI | |
# tab1, tab2, tab3 = st.tabs(["π° News", "π‘ Hackathons/Webinars", "π Govt Initiatives"]) | |
# # Tab 1: News | |
# with tab1: | |
# news = fetch_news() | |
# if news: | |
# for item in news: | |
# st.markdown(f"[{item['title']}]({item['link']})") | |
# else: | |
# st.warning("No news articles found.") | |
# # Tab 2: Hackathons/Webinars | |
# with tab2: | |
# hackathons = fetch_hackathons() | |
# if hackathons: | |
# for event in hackathons: | |
# st.markdown(f"[{event['title']}]({event['link']})") | |
# else: | |
# st.warning("No hackathons or webinars found.") | |
# # Tab 3: Govt Initiatives | |
# with tab3: | |
# initiatives = fetch_government_initiatives() | |
# if initiatives: | |
# for initiative in initiatives: | |
# st.markdown(f"[{initiative['title']}]({initiative['link']})") | |
# else: | |
# st.warning("No government initiatives found.") | |
# # Footer | |
# st.sidebar.title("About BinSight") | |
# st.sidebar.info( | |
# """ | |
# BinSight is an initiative to educate people about waste management and to connect | |
# them with events, news, and government programs to make our planet sustainable. | |
# """ | |
# ) | |
# if __name__ == "__main__": | |
# main() | |