File size: 8,648 Bytes
0a96199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
from flask import Flask, jsonify, render_template, request
from flask_cors import CORS
from sqlalchemy import create_engine, text
import kagglehub
from kagglehub import KaggleDatasetAdapter
import pandas as pd
import os

app = Flask(__name__)
CORS(app)

# Database configuration
DATABASE_URL = os.environ.get(
    "DATABASE_URL",
    "postgresql://postgres:postgres@db.ckrqyjfdifjbsuuofegd.supabase.co:5432/postgres",
)
engine = create_engine(DATABASE_URL, pool_pre_ping=True)

# Download the datasets
@app.before_first_request
def download_datasets():
    try:
        print("Downloading datasets...")
        
        # Download news dataset
        news_path = kagglehub.dataset_download("therohk/global-news-week")
        print("Path to news dataset files:", news_path)
        
        # Download business leaders dataset
        business_path = kagglehub.dataset_download("beatafaron/wisdom-from-business-leaders-and-innovators")
        print("Path to business leaders dataset files:", business_path)
        
        return {"news": news_path, "business": business_path}
    except Exception as e:
        print(f"Error downloading datasets: {e}")
        return None

@app.route('/')
def home():
    return render_template('business-leaders.html')

@app.route('/blog')
def blog():
    return render_template('blog.html')

@app.route('/login', methods=['POST'])
def login():
    try:
        payload = request.get_json(silent=True) or request.form
        email = (payload.get('email') or '').strip().lower()
        password = payload.get('password') or ''
        if not email or not password:
            return jsonify({"error": "Email and password are required"}), 400

        # Simple credentials check against users table
        # Expected schema: users(id serial pk, email text unique, password text)
        with engine.connect() as conn:
            row = conn.execute(
                text("SELECT id, email, password FROM users WHERE email = :email"),
                {"email": email},
            ).mappings().first()

        if not row:
            return jsonify({"error": "Invalid credentials"}), 401

        # NOTE: For demo purposes comparing plaintext. In production, store hashed passwords and verify with bcrypt.
        if password != row["password"]:
            return jsonify({"error": "Invalid credentials"}), 401

        return jsonify({"ok": True, "user": {"id": row["id"], "email": row["email"]}})
    except Exception as e:
        return jsonify({"error": str(e)}), 500

@app.route('/signup', methods=['POST'])
def signup():
    try:
        payload = request.get_json(silent=True) or request.form
        full_name = (payload.get('fullName') or '').strip()
        email = (payload.get('email') or '').strip().lower()
        password = payload.get('password') or ''
        
        if not full_name or not email or not password:
            return jsonify({"error": "Full name, email and password are required"}), 400

        # Check if user already exists
        with engine.connect() as conn:
            existing_user = conn.execute(
                text("SELECT id FROM users WHERE email = :email"),
                {"email": email},
            ).mappings().first()

            if existing_user:
                return jsonify({"error": "User with this email already exists"}), 409

            # Insert new user
            # Expected schema: users(id serial pk, full_name text, email text unique, password text)
            result = conn.execute(
                text("INSERT INTO users (full_name, email, password) VALUES (:full_name, :email, :password) RETURNING id"),
                {"full_name": full_name, "email": email, "password": password},
            )
            
            # Commit the transaction
            conn.commit()
            
            user_id = result.fetchone()[0]
            
            return jsonify({
                "message": "Account created successfully! Redirecting to login...",
                "user": {"id": user_id, "full_name": full_name, "email": email}
            }), 201
            
    except Exception as e:
        return jsonify({"error": str(e)}), 500

@app.route('/api/news')
def get_news():
    try:
        # Find the CSV file in the downloaded dataset
        dataset_path = None
        for root, dirs, files in os.walk('.'):
            for file in files:
                if file.endswith('.csv') and 'global-news' in root:
                    dataset_path = os.path.join(root, file)
                    break
            if dataset_path:
                break
        
        if dataset_path:
            # Read the CSV file
            df = pd.read_csv(dataset_path)
            
            # Convert to JSON format for the blog
            news_data = []
            for index, row in df.head(20).iterrows():  # Get first 20 articles
                news_item = {
                    'title': row.get('title', 'No title'),
                    'description': row.get('description', 'No description'),
                    'date': row.get('date', 'No date'),
                    'source': row.get('source', 'Unknown source'),
                    'category': row.get('category', 'General')
                }
                news_data.append(news_item)
            
            return jsonify(news_data)
        else:
            return jsonify({'error': 'News dataset file not found'})
            
    except Exception as e:
        return jsonify({'error': str(e)})

@app.route('/api/business-leaders')
def get_business_leaders():
    try:
        # Load the business leaders dataset using kagglehub
        df = kagglehub.load_dataset(
            KaggleDatasetAdapter.PANDAS,
            "beatafaron/wisdom-from-business-leaders-and-innovators",
            "",  # Empty string for default file
        )
        
        # Convert to JSON format
        business_data = []
        for index, row in df.head(20).iterrows():  # Get first 20 records
            business_item = {
                'id': index + 1,
                'quote': row.get('quote', 'No quote available'),
                'author': row.get('author', 'Unknown author'),
                'category': row.get('category', 'General'),
                'source': row.get('source', 'Unknown source'),
                'year': row.get('year', 'Unknown year')
            }
            business_data.append(business_item)
        
        return jsonify(business_data)
        
    except Exception as e:
        return jsonify({'error': str(e)})

@app.route('/api/business-leaders/<int:record_id>')
def get_business_leader(record_id):
    try:
        # Load the business leaders dataset
        df = kagglehub.load_dataset(
            KaggleDatasetAdapter.PANDAS,
            "beatafaron/wisdom-from-business-leaders-and-innovators",
            "",
        )
        
        if record_id < len(df):
            row = df.iloc[record_id]
            business_item = {
                'id': record_id + 1,
                'quote': row.get('quote', 'No quote available'),
                'author': row.get('author', 'Unknown author'),
                'category': row.get('category', 'General'),
                'source': row.get('source', 'Unknown source'),
                'year': row.get('year', 'Unknown year')
            }
            return jsonify(business_item)
        else:
            return jsonify({'error': 'Record not found'})
            
    except Exception as e:
        return jsonify({'error': str(e)})

@app.route('/api/dataset-info')
def dataset_info():
    try:
        # Get info about both datasets
        news_path = kagglehub.dataset_download("therohk/global-news-week")
        business_path = kagglehub.dataset_download("beatafaron/wisdom-from-business-leaders-and-innovators")
        
        return jsonify({
            'status': 'success',
            'datasets': {
                'news': {
                    'name': 'Global News Week',
                    'path': str(news_path),
                    'description': 'Latest news articles from various sources'
                },
                'business_leaders': {
                    'name': 'Wisdom from Business Leaders and Innovators',
                    'path': str(business_path),
                    'description': 'Inspirational quotes and wisdom from business leaders'
                }
            },
            'message': 'All datasets downloaded successfully'
        })
    except Exception as e:
        return jsonify({
            'status': 'error',
            'error': str(e)
        })

if __name__ == '__main__':
    app.run(debug=True, port=5000)