Spaces:
Running
Running
from flask import Flask, jsonify, render_template, request | |
from flask_cors import CORS | |
from sqlalchemy import create_engine, text | |
import kagglehub | |
from kagglehub import KaggleDatasetAdapter | |
import pandas as pd | |
import os | |
app = Flask(__name__) | |
CORS(app) | |
# Database configuration | |
DATABASE_URL = os.environ.get( | |
"DATABASE_URL", | |
"postgresql://postgres:postgres@db.ckrqyjfdifjbsuuofegd.supabase.co:5432/postgres", | |
) | |
engine = create_engine(DATABASE_URL, pool_pre_ping=True) | |
# Download the datasets | |
def download_datasets(): | |
try: | |
print("Downloading datasets...") | |
# Download news dataset | |
news_path = kagglehub.dataset_download("therohk/global-news-week") | |
print("Path to news dataset files:", news_path) | |
# Download business leaders dataset | |
business_path = kagglehub.dataset_download("beatafaron/wisdom-from-business-leaders-and-innovators") | |
print("Path to business leaders dataset files:", business_path) | |
return {"news": news_path, "business": business_path} | |
except Exception as e: | |
print(f"Error downloading datasets: {e}") | |
return None | |
def home(): | |
return render_template('business-leaders.html') | |
def blog(): | |
return render_template('blog.html') | |
def login(): | |
try: | |
payload = request.get_json(silent=True) or request.form | |
email = (payload.get('email') or '').strip().lower() | |
password = payload.get('password') or '' | |
if not email or not password: | |
return jsonify({"error": "Email and password are required"}), 400 | |
# Simple credentials check against users table | |
# Expected schema: users(id serial pk, email text unique, password text) | |
with engine.connect() as conn: | |
row = conn.execute( | |
text("SELECT id, email, password FROM users WHERE email = :email"), | |
{"email": email}, | |
).mappings().first() | |
if not row: | |
return jsonify({"error": "Invalid credentials"}), 401 | |
# NOTE: For demo purposes comparing plaintext. In production, store hashed passwords and verify with bcrypt. | |
if password != row["password"]: | |
return jsonify({"error": "Invalid credentials"}), 401 | |
return jsonify({"ok": True, "user": {"id": row["id"], "email": row["email"]}}) | |
except Exception as e: | |
return jsonify({"error": str(e)}), 500 | |
def signup(): | |
try: | |
payload = request.get_json(silent=True) or request.form | |
full_name = (payload.get('fullName') or '').strip() | |
email = (payload.get('email') or '').strip().lower() | |
password = payload.get('password') or '' | |
if not full_name or not email or not password: | |
return jsonify({"error": "Full name, email and password are required"}), 400 | |
# Check if user already exists | |
with engine.connect() as conn: | |
existing_user = conn.execute( | |
text("SELECT id FROM users WHERE email = :email"), | |
{"email": email}, | |
).mappings().first() | |
if existing_user: | |
return jsonify({"error": "User with this email already exists"}), 409 | |
# Insert new user | |
# Expected schema: users(id serial pk, full_name text, email text unique, password text) | |
result = conn.execute( | |
text("INSERT INTO users (full_name, email, password) VALUES (:full_name, :email, :password) RETURNING id"), | |
{"full_name": full_name, "email": email, "password": password}, | |
) | |
# Commit the transaction | |
conn.commit() | |
user_id = result.fetchone()[0] | |
return jsonify({ | |
"message": "Account created successfully! Redirecting to login...", | |
"user": {"id": user_id, "full_name": full_name, "email": email} | |
}), 201 | |
except Exception as e: | |
return jsonify({"error": str(e)}), 500 | |
def get_news(): | |
try: | |
# Find the CSV file in the downloaded dataset | |
dataset_path = None | |
for root, dirs, files in os.walk('.'): | |
for file in files: | |
if file.endswith('.csv') and 'global-news' in root: | |
dataset_path = os.path.join(root, file) | |
break | |
if dataset_path: | |
break | |
if dataset_path: | |
# Read the CSV file | |
df = pd.read_csv(dataset_path) | |
# Convert to JSON format for the blog | |
news_data = [] | |
for index, row in df.head(20).iterrows(): # Get first 20 articles | |
news_item = { | |
'title': row.get('title', 'No title'), | |
'description': row.get('description', 'No description'), | |
'date': row.get('date', 'No date'), | |
'source': row.get('source', 'Unknown source'), | |
'category': row.get('category', 'General') | |
} | |
news_data.append(news_item) | |
return jsonify(news_data) | |
else: | |
return jsonify({'error': 'News dataset file not found'}) | |
except Exception as e: | |
return jsonify({'error': str(e)}) | |
def get_business_leaders(): | |
try: | |
# Load the business leaders dataset using kagglehub | |
df = kagglehub.load_dataset( | |
KaggleDatasetAdapter.PANDAS, | |
"beatafaron/wisdom-from-business-leaders-and-innovators", | |
"", # Empty string for default file | |
) | |
# Convert to JSON format | |
business_data = [] | |
for index, row in df.head(20).iterrows(): # Get first 20 records | |
business_item = { | |
'id': index + 1, | |
'quote': row.get('quote', 'No quote available'), | |
'author': row.get('author', 'Unknown author'), | |
'category': row.get('category', 'General'), | |
'source': row.get('source', 'Unknown source'), | |
'year': row.get('year', 'Unknown year') | |
} | |
business_data.append(business_item) | |
return jsonify(business_data) | |
except Exception as e: | |
return jsonify({'error': str(e)}) | |
def get_business_leader(record_id): | |
try: | |
# Load the business leaders dataset | |
df = kagglehub.load_dataset( | |
KaggleDatasetAdapter.PANDAS, | |
"beatafaron/wisdom-from-business-leaders-and-innovators", | |
"", | |
) | |
if record_id < len(df): | |
row = df.iloc[record_id] | |
business_item = { | |
'id': record_id + 1, | |
'quote': row.get('quote', 'No quote available'), | |
'author': row.get('author', 'Unknown author'), | |
'category': row.get('category', 'General'), | |
'source': row.get('source', 'Unknown source'), | |
'year': row.get('year', 'Unknown year') | |
} | |
return jsonify(business_item) | |
else: | |
return jsonify({'error': 'Record not found'}) | |
except Exception as e: | |
return jsonify({'error': str(e)}) | |
def dataset_info(): | |
try: | |
# Get info about both datasets | |
news_path = kagglehub.dataset_download("therohk/global-news-week") | |
business_path = kagglehub.dataset_download("beatafaron/wisdom-from-business-leaders-and-innovators") | |
return jsonify({ | |
'status': 'success', | |
'datasets': { | |
'news': { | |
'name': 'Global News Week', | |
'path': str(news_path), | |
'description': 'Latest news articles from various sources' | |
}, | |
'business_leaders': { | |
'name': 'Wisdom from Business Leaders and Innovators', | |
'path': str(business_path), | |
'description': 'Inspirational quotes and wisdom from business leaders' | |
} | |
}, | |
'message': 'All datasets downloaded successfully' | |
}) | |
except Exception as e: | |
return jsonify({ | |
'status': 'error', | |
'error': str(e) | |
}) | |
if __name__ == '__main__': | |
app.run(debug=True, port=5000) | |