File size: 28,704 Bytes
6f5e1a2
 
 
 
 
 
82a3b72
 
 
 
6f5e1a2
 
 
5676bf3
 
 
 
 
 
 
 
 
c59b529
 
6f5e1a2
82a3b72
 
 
5676bf3
 
 
c59b529
82a3b72
6f5e1a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509e545
 
 
 
 
 
 
 
 
 
c59b529
509e545
c59b529
 
 
 
 
 
 
 
509e545
 
 
c59b529
 
 
 
 
 
 
 
 
5676bf3
c59b529
 
 
509e545
 
 
6f5e1a2
c59b529
 
 
 
 
 
 
 
509e545
c59b529
 
 
 
 
 
 
 
 
 
 
509e545
c59b529
 
 
 
 
 
 
509e545
c59b529
 
 
509e545
 
 
c59b529
 
 
 
 
 
79c7d42
 
c59b529
 
509e545
c59b529
 
 
509e545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c59b529
 
 
 
5676bf3
c59b529
 
6f5e1a2
5676bf3
 
 
 
 
 
 
 
 
 
 
 
 
 
6f5e1a2
5676bf3
 
6f5e1a2
 
82a3b72
 
5676bf3
82a3b72
 
 
 
5676bf3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82a3b72
 
 
 
 
5676bf3
82a3b72
c59b529
82a3b72
 
5676bf3
c59b529
 
5676bf3
82a3b72
 
 
 
 
 
5676bf3
 
 
 
c59b529
5676bf3
 
82a3b72
 
 
4780b8f
82a3b72
 
 
 
 
 
 
4780b8f
82a3b72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4780b8f
 
82a3b72
 
 
 
 
 
 
 
 
 
 
 
 
 
4780b8f
 
82a3b72
 
 
 
 
 
4780b8f
 
 
 
 
82a3b72
4780b8f
82a3b72
 
 
 
 
 
 
 
 
4780b8f
 
 
 
 
 
82a3b72
4780b8f
82a3b72
 
 
 
 
 
4780b8f
 
82a3b72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5676bf3
4780b8f
 
 
 
 
 
bb9ca74
4780b8f
 
 
 
 
 
bb9ca74
 
 
 
 
 
 
 
 
 
4780b8f
 
bb9ca74
 
5676bf3
 
 
 
 
 
 
 
 
ce7bfbc
5676bf3
ce7bfbc
 
5676bf3
 
 
ce7bfbc
 
5676bf3
 
 
 
ce7bfbc
 
 
5676bf3
 
 
 
 
 
 
ce7bfbc
5676bf3
 
ce7bfbc
5676bf3
ce7bfbc
5676bf3
 
 
 
ce7bfbc
5676bf3
 
ce7bfbc
 
 
1104e14
 
 
 
10b8a2d
ce7bfbc
 
 
10b8a2d
5676bf3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4780b8f
5676bf3
a02f041
 
 
 
 
 
 
 
 
 
 
 
 
5676bf3
 
 
 
4780b8f
6f5e1a2
82a3b72
4780b8f
5676bf3
6f5e1a2
5676bf3
6f5e1a2
5676bf3
 
82a3b72
 
 
 
 
 
5676bf3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82a3b72
 
 
 
 
c59b529
 
82a3b72
c59b529
 
 
 
509e545
 
c59b529
 
 
 
82a3b72
 
3fb806f
c59b529
4780b8f
5676bf3
 
 
 
 
 
 
bb9ca74
 
 
 
82a3b72
 
bb9ca74
 
 
 
 
5676bf3
0e17596
 
 
5676bf3
0e17596
 
 
 
4780b8f
5676bf3
 
4780b8f
 
bb9ca74
82a3b72
 
 
 
 
 
 
 
6f5e1a2
82a3b72
 
5676bf3
 
 
 
 
82a3b72
 
5676bf3
 
 
 
 
 
 
6f5e1a2
82a3b72
 
 
 
 
6f5e1a2
82a3b72
 
 
c59b529
82a3b72
 
5676bf3
82a3b72
 
 
6f5e1a2
82a3b72
 
 
 
 
 
 
 
 
 
 
 
 
 
4780b8f
5676bf3
4780b8f
 
 
bb9ca74
 
4780b8f
5676bf3
bb9ca74
 
 
 
 
 
 
 
 
 
5676bf3
0e17596
 
 
5676bf3
82a3b72
6f5e1a2
 
ce7bfbc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
import os
import datetime
import requests
import re
import pandas as pd
import gradio as gr
import threading
import uuid
import queue
import time
from transformers import AutoTokenizer
from mistralai import Mistral
from huggingface_hub import InferenceClient
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart  # Add this line
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import base64
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
import openai  # Correct OpenAI import
from openai.error import RateLimitError  # Import rate limit error handling

# ------------------------------
# Helper functions and globals
# ------------------------------
sheet_data = None
file_name = None
sheet = None
slider_max_tokens = None

def debug_print(message: str):
    print(f"[{datetime.datetime.now().isoformat()}] {message}", flush=True)

def initialize_tokenizer():
    try:
        return AutoTokenizer.from_pretrained("gpt2")
    except Exception as e:
        debug_print("Failed to initialize tokenizer: " + str(e))
        return None

global_tokenizer = initialize_tokenizer()

def count_tokens(text: str) -> int:
    if global_tokenizer:
        try:
            return len(global_tokenizer.encode(text))
        except Exception:
            return len(text.split())
    return len(text.split())

def get_model_pricing(model_name: str):
    """Return pricing information for models."""
    model_pricing = {
        "GPT-3.5": {"USD": {"input": 0.0000005, "output": 0.0000015}, "RON": {"input": 0.0000023, "output": 0.0000069}},
        "GPT-4o": {"USD": {"input": 0.0000025, "output": 0.00001}, "RON": {"input": 0.0000115, "output": 0.000046}},
        "GPT-4o mini": {"USD": {"input": 0.00000015, "output": 0.0000006}, "RON": {"input": 0.0000007, "output": 0.0000028}},
        "o1-mini": {"USD": {"input": 0.0000011, "output": 0.0000044}, "RON": {"input": 0.0000051, "output": 0.0000204}},
        "o3-mini": {"USD": {"input": 0.0000011, "output": 0.0000044}, "RON": {"input": 0.0000051, "output": 0.0000204}},
        "Meta-Llama-3": {"USD": {"input": 0.00, "output": 0.00}, "RON": {"input": 0.00, "output": 0.00}},
        "Mistral-API": {"USD": {"input": 0.00, "output": 0.00}, "RON": {"input": 0.00, "output": 0.00}}
    }
    return model_pricing.get(model_name, {"USD": {"input": 0.00, "output": 0.00}, "RON": {"input": 0.00, "output": 0.00}})

def get_model_max_tokens(model_name: str) -> int:
    """Return the max context length for the selected model."""
    model_token_limits = {
        "GPT-3.5": 16385,
        "GPT-4o": 128000,
        "GPT-4o mini": 128000,
        "Meta-Llama-3": 4096,
        "Mistral-API": 128000,
        "o1-mini": 128000,
        "o3-mini": 128000
    }
    for key in model_token_limits:
        if key in model_name:
            return model_token_limits[key]
    return 4096  # Default safety limit


def generate_response(prompt: str, model_name: str, sheet_data: str = "") -> str:
    global slider_max_tokens
    
    full_prompt = f"{prompt}\n\nSheet Data:\n{sheet_data}" if sheet_data else prompt
    max_context_tokens = get_model_max_tokens(model_name)
    max_tokens = min(slider_max_tokens, max_context_tokens)
  # Extract base model name for API calls and pricing
    base_model_name = model_name.split()[1] if len(model_name.split()) > 1 else model_name
        
    
    try:
        if "Mistral" in model_name:
            mistral_api_key = os.getenv("MISTRAL_API_KEY")
            if not mistral_api_key:
                raise ValueError("MISTRAL_API_KEY environment variable not set.")
            mistral_client = Mistral(api_key=mistral_api_key)
            response = mistral_client.chat.complete(
                model="mistral-small-latest",
                messages=[{"role": "user", "content": full_prompt}],
                temperature=0.7,
                top_p=0.95
            )
            return f"[Model: {model_name}]" + response.choices[0].message.content
        
        elif "Meta-Llama" in model_name:
            hf_api_token = os.getenv("HF_API_TOKEN")
            if not hf_api_token:
                raise ValueError("HF_API_TOKEN environment variable not set.")
            client = InferenceClient(token=hf_api_token)
            response = client.text_generation(
                full_prompt,
                model="meta-llama/Meta-Llama-3-8B-Instruct",
                temperature=0.7,
                top_p=0.95,
                max_new_tokens=max_tokens
            )
            return f"[Model: {model_name}]" + response
        
        elif any(model in model_name for model in ["GPT-3.5", "GPT-4o", "o1-mini", "o3-mini"]):
            model_map = {
                "GPT-3.5": "gpt-3.5-turbo",
                "GPT-4o": "gpt-4o",
                "GPT-4o mini": "gpt-4o-mini",
                "o1-mini": "gpt-4o-mini",
                "o3-mini": "gpt-4o-mini"
            }
            model = next((model_map[key] for key in model_map if key in model_name), None)
            
            if not model:
                raise ValueError(f"Unsupported OpenAI model: {model_name}")
            
            openai.api_key = os.getenv("OPEN_API_KEY")
            
            response = openai.ChatCompletion.create(
                model=model,
                messages=[{"role": "user", "content": full_prompt}],
                temperature=0.7,
                max_tokens=max_tokens
            )
            
           # Count input tokens to estimate input cost
            input_tokens = count_tokens(full_prompt)
                        
            
            # Count output tokens
            output_tokens = count_tokens(response["choices"][0]["message"]["content"])
            
            # Get pricing information
            pricing = get_model_pricing(base_model_name)
            
            # Calculate per-token pricing
            per_token_pricing = (
                f" (${input_tokens * pricing['USD']['input']:.3f}/in, "
                f"${output_tokens * pricing['USD']['output']:.3f}/out | "
                f"{input_tokens * pricing['RON']['input']:.3f} RON/in, "
                f"{output_tokens * pricing['RON']['output']:.3f} RON/out)"
            )
            
            return f"[Model: {model_name}{per_token_pricing}]" + response["choices"][0]["message"]["content"]
    
    except Exception as e:
        debug_print(f"❌ Error generating response: {str(e)}")
        return f"[Model: {model_name}][Error] {str(e)}"

        
        
def process_query(prompt: str, model_name: str):
    global sheet_data
    
    # Handle the case where sheet_data might be None
    if sheet_data is None:
        sheet_data = get_sheet_data()
        
    full_prompt = f"{prompt}\n\nSheet Data:\n{sheet_data}"  # Append sheet data to prompt
    debug_print(f"Processing query with model {model_name}: {full_prompt}")

    # Generate the response using the specified model and sheet data
    response = generate_response(prompt, model_name, sheet_data)
    
    # Count the number of tokens for input and output
    input_tokens = count_tokens(prompt + "\n\n" + sheet_data)  # Include sheet data in the input token count
    output_tokens = count_tokens(response)
    
    # Return the response along with token counts
    return response, f"Input tokens: {input_tokens}", f"Output tokens: {output_tokens}"

# ------------------------------
# Global variables for background jobs
# ------------------------------
jobs = {}
results_queue = queue.Queue()
last_job_id = None

# ------------------------------
# Job management functions
# ------------------------------

def get_job_list():
    job_list_md = "### πŸ“Š Submitted Jobs\n\n"
    
    if not jobs:
        return "No jobs found. Submit a query or load files to create jobs."
    
    # Sort jobs by start time (newest first)
    sorted_jobs = sorted(
        [(job_id, job_info) for job_id, job_info in jobs.items()],
        key=lambda x: x[1].get("start_time", 0),
        reverse=True
    )
    
    for job_id, job_info in sorted_jobs:
        status = job_info.get("status", "unknown")
        job_type = job_info.get("type", "unknown")
        query = job_info.get("query", "")
        start_time = job_info.get("start_time", 0)
        time_str = datetime.datetime.fromtimestamp(start_time).strftime("%Y-%m-%d %H:%M:%S")
        
        # Create a shortened query preview
        query_preview = query[:30] + "..." if query and len(query) > 30 else query or "N/A"
        
        # Color-code the status display
        if status == "processing":
            status_formatted = f"<span style='color: red'>⏳ {status}</span>"
        elif status == "completed":
            status_formatted = f"<span style='color: green'>βœ… {status}</span>"
        else:
            status_formatted = f"<span style='color: orange'>❓ {status}</span>"
        
        if job_type == "query":
            job_list_md += f"- [{job_id}](javascript:void) - {time_str} - {status_formatted} - Query: {query_preview}\n"
        else:
            job_list_md += f"- [{job_id}](javascript:void) - {time_str} - {status_formatted} - File Load Job\n"
    
    return job_list_md

def get_sheet_data():
    global sheet_data
    global file_name  
    global sheet  
    file = file_name 
    sheet_name = sheet
    print ("file name: ",file," sheet name: ",sheet_name," ")
    
    if sheet_data is None: 
        try:
            df = pd.read_excel(file.name, sheet_name=sheet_name)
            sheet_data = df.to_string(index=False)  # Convert sheet data to string format
            return sheet_data  # Display sheet data in UI
        except Exception as e:
            return f"Error reading sheet: {str(e)}"
    else: 
        return sheet_data

# Assuming process_in_background is using threading to call process_query

def process_in_background(job_id, func, args):
    """Runs a function in the background and stores its result in a shared queue."""
    result = func(*args)
    results_queue.put((job_id, result))
    debug_print(f"Job {job_id} finished processing in background.")
    

def submit_query_async(query, model_choice, max_tokens_slider):
    """Asynchronous version of submit_query_updated to prevent timeouts."""
    global last_job_id
    global sheet_data 
    global slider_max_tokens 
    slider_max_tokens = max_tokens_slider
    
    if not query:
        return ("Please enter a non-empty query", "", "Input tokens: 0", "Output tokens: 0", "", "", get_job_list())
    
    job_id = str(uuid.uuid4())
    debug_print(f"Starting async job {job_id} for query: {query}")
    
    # Handle the case where sheet_data might be None
    if sheet_data is None:
        sheet_data = get_sheet_data()
        
    
    query = f"{query}\n\nSheet Data:\n{sheet_data}"  # Append sheet data to prompt
    
    # Start background thread to process the query
    threading.Thread(
        target=process_in_background,
        args=(job_id, process_query, [query, model_choice or "Mistral-API"])
    ).start()
    
    jobs[job_id] = {
        "status": "processing", 
        "type": "query",
        "start_time": time.time(),
        "query": query,
        "model": model_choice or "Mistral-API"
    }
    
    last_job_id = job_id
    
    return (
        f"πŸš€ Query submitted and processing in the background (Job ID: {job_id}).\n\n"
        f"Use the 'Check Job Status' section to view results.",
        f"Job ID: {job_id}",
        f"Input tokens: {count_tokens(query)}",
        "Output tokens: pending",
        job_id,  # For UI job id update
        query,  # For UI query display update
        get_job_list()  # Updated job list
    )

def job_selected(job_id):
    if job_id in jobs:
        return job_id, jobs[job_id].get("query", "No query for this job")
    return job_id, "Job not found"

def refresh_job_list():
    return get_job_list()

def sync_model_dropdown(value):
    return value    

def check_job_status(job_id):
    if not job_id:
        html_response = "<div style='font-family: monospace;'><p>Please enter a job ID.</p></div>"
        return html_response, "", "", "", ""
    
    # Process any completed jobs in the results queue
    try:
        while not results_queue.empty():
            completed_id, result = results_queue.get_nowait()
            if completed_id in jobs:
                jobs[completed_id]["status"] = "completed"
                jobs[completed_id]["result"] = result
                jobs[completed_id]["end_time"] = time.time()
                debug_print(f"Job {completed_id} completed and stored in jobs dictionary")
    except queue.Empty:
        pass
    
    if job_id not in jobs:
        html_response = "<div style='font-family: monospace;'><p>Job not found. Please check the ID and try again.</p></div>"
        return html_response, "", "", "", ""
    
    job = jobs[job_id]
    job_query = job.get("query", "No query available for this job")
    
    if job["status"] == "processing":
        elapsed_time = time.time() - job["start_time"]
        html_response = (
            f"<div style='font-family: monospace;'>"
            f"<p><strong>⏳ Query is still being processed</strong> (elapsed: {elapsed_time:.1f}s). Please check again shortly.</p>"
            f"</div>"
        )
        return (
            html_response,
            f"Job ID: {job_id}",
            f"Input tokens: {count_tokens(job.get('query', ''))}",
            "Output tokens: pending",
            job_query
        )
    
    if job["status"] == "completed":
        result = job["result"]
        processing_time = job["end_time"] - job["start_time"]
        html_response = (
            f"<div style='font-family: monospace;'>"
            f"<p><strong>βœ… Response:</strong> {result[0]}</p>"
            f"<p>Processing time: {processing_time:.1f}s</p>"
            f"</div>"
        )
        return (
            html_response,
            f"Job ID: {job_id}",
            result[1],
            result[2],
            job_query
        )
    
    html_response = f"<div style='font-family: monospace;'><p>Job status: {job['status']}</p></div>"
    return html_response, "", "", "", job_query

def cleanup_old_jobs():
    current_time = time.time()
    to_delete = []
    
    for job_id, job in jobs.items():
        # Completed jobs older than 24 hours and processing jobs older than 48 hours will be removed.
        if job["status"] == "completed" and (current_time - job.get("end_time", 0)) > 86400:
            to_delete.append(job_id)
        elif job["status"] == "processing" and (current_time - job.get("start_time", 0)) > 172800:
            to_delete.append(job_id)
    
    for job_id in to_delete:
        del jobs[job_id]
    
    debug_print(f"Cleaned up {len(to_delete)} old jobs. {len(jobs)} jobs remaining.")
    return f"Cleaned up {len(to_delete)} old jobs", "", ""

# Function to run query (dummy function)
def run_query(max_value):
    # Simulate a data retrieval or processing function
    return [[i, i**2] for i in range(1, max_value + 1)]

# Function to call both refresh_job_list and check_job_status using the last job ID
def periodic_update(is_checked):
    interval = 3 if is_checked else None
    debug_print(f"Auto-refresh checkbox is {'checked' if is_checked else 'unchecked'}, every={interval}")
    if is_checked:
        global last_job_id
        job_list_md = refresh_job_list()
        job_status = check_job_status(last_job_id) if last_job_id else ("No job ID available", "", "", "", "")
        
        # Extract plain text from HTML for status_text
        from bs4 import BeautifulSoup
        html_content = job_status[0]
        plain_text = ""
        if html_content:
            soup = BeautifulSoup(html_content, "html.parser")
            plain_text = soup.get_text()
        
        # Return all expected outputs, including status_text
        return job_list_md, job_status[0], plain_text, job_status[1], job_status[2], job_status[3], job_status[4]
    else:
        # Return empty values to stop updates - make sure to match the number of expected outputs
        return "", "", "", "", "", "", ""
        
# OAuth 2.0 Scopes
SCOPES = ["https://www.googleapis.com/auth/gmail.send"]

from google_auth_oauthlib.flow import InstalledAppFlow
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
import os
import json


def get_gmail_credentials():
    global oauth_flow
    
    creds = None
    
    # Fetch client secrets from environment variables
    client_id = os.environ.get("HF_GOOGLE_CLIENT_ID")
    client_secret = os.environ.get("HF_GOOGLE_CLIENT_SECRET")
    
    if not client_id or not client_secret:
        raise ValueError("Missing Gmail OAuth credentials in environment variables.")
    
    # Define the redirect URI for your Hugging Face space
    redirect_uri = "https://huggingface.co/spaces/alx-d/scout/oauth2callback"
    
    # Load credentials from token.json if available
    if os.path.exists(token_path):
        creds = Credentials.from_authorized_user_file(token_path)
    
    # If no valid credentials, log in via OAuth
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            client_config = {
                "web": {
                    "client_id": client_id,
                    "project_id": "your_project_id",
                    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
                    "token_uri": "https://oauth2.googleapis.com/token",
                    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
                    "client_secret": client_secret,
                    "redirect_uris": [redirect_uri]
                }
            }
            
            oauth_flow = Flow.from_client_config(client_config, SCOPES, redirect_uri=redirect_uri)
            auth_url, _ = oauth_flow.authorization_url(
                prompt='consent',
                access_type='offline',
                include_granted_scopes='true'
            )
            
            return None, auth_url
    
    return creds, None

# Add email sending function  
def send_email(email_address, content, is_formatted=True):
    if not email_address or "@" not in email_address:
        return "Please enter a valid email address"

    try:
        creds = get_gmail_credentials()
        service = build("gmail", "v1", credentials=creds)

        # Create email message with appropriate MIME type
        msg = MIMEMultipart()
        msg["to"] = email_address
        msg["subject"] = "Scouting AI Report"
        msg.attach(MIMEText(content, "html" if is_formatted else "plain"))

        # Encode email message in base64
        encoded_msg = base64.urlsafe_b64encode(msg.as_bytes()).decode()
        send_message = {"raw": encoded_msg}

        # Send email using Gmail API
        service.users().messages().send(userId="me", body=send_message).execute()
        return "Email sent successfully via Gmail API!"
    
    except Exception as e:
        return f"Failed to send email: {str(e)}"

# Function to copy content to clipboard
def copy_to_clipboard(content):
    import pyperclip
    pyperclip.copy(content)
    return "Copied to clipboard!"


# Function to convert HTML to plain text using BeautifulSoup
def copy_plain_text(html_content):
    try:
        from bs4 import BeautifulSoup
    except ImportError:
        return "Error: BeautifulSoup is required to convert HTML to plain text. Please install it."
    soup = BeautifulSoup(html_content, "html.parser")
    plain_text = soup.get_text()
    import pyperclip
    pyperclip.copy(plain_text)
    
    return "Copied to clipboard!"


# Default prompt template
default_prompt = """
You are a scout who has played against this player, and you are analyzing the following statistics.
Create a scouting report for the head coach, detailing:

1) The player's strengths, along with a strategy to counter those strengths.
2) The player's weaknesses, and how we can exploit those weaknesses based on the stats.

Present the report in a way that is easy to read, combining each strength with its corresponding counter-strategy, and each weakness with an exploitation plan.

At the end of the report, include a β€œKey Points to Emphasize” section.

Use HTML formatting for the output, and apply a dark color palette (e.g., dark green, dark red, dark gray) for different sections to enhance visual readability.
"""

# ------------------------------
# Gradio UI Layout: Scouting AI App
# ------------------------------

with gr.Blocks() as app:
    # App Title and Description
    gr.Markdown("## πŸ€ Scouting AI App")
    gr.Markdown("Welcome to the Scouting AI App! Upload your files, submit queries, and check job statuses easily. Game on! 😊")
    
    # Two-column layout for top section (File Load and Job Information)
    with gr.Row():
        # Left Column: File Load Section (50% width)
        with gr.Column(scale=1):
            gr.Markdown("### πŸ“ Load File Section")
            gr.Markdown("Upload your **.xlsm** file below, specify the sheet name, and click *Load Sheet* to process your file.")
            file_input = gr.File(label="Upload .xlsm File")
            sheet_input_file = gr.Textbox(label="Sheet Name")
            load_button_file = gr.Button("Load Sheet")
            sheet_output_file = gr.Textbox(label="Sheet Info", interactive=False)
        
        # Right Column: Job Information Section (50% width)
        with gr.Column(scale=1):
            gr.Markdown("### πŸ“Š Job Information")
            gr.Markdown("View all submitted jobs, refresh the list, and check the status of individual jobs.")
            
            # Fixed-height job list with scrollbar
            job_list_display = gr.Markdown(
                get_job_list(),
                elem_id="job-list-display",
                elem_classes=["scrollable-job-list"]
            )
            
            # Add CSS for scrollable job list
            gr.HTML("""
            <style>
            .scrollable-job-list {
                height: 220px;
                overflow-y: auto;
                border: 1px solid #ccc;
                padding: 10px;
                margin-bottom: 10px;
            }
            </style>
            """)
            
            refresh_button = gr.Button("Refresh Job List")
                       
            gr.Markdown("#### πŸ” Check Job Status")
            job_id_input = gr.Textbox(label="Enter Job ID")
            check_status_button = gr.Button("Check Job Status")
            
    # Submit Query Section (left column, below File Load)
    with gr.Row():
        # Left Column: Submit Query Section
        with gr.Column(scale=1):
            gr.Markdown("### πŸš€ Submit Query")
            gr.Markdown("Enter your prompt below and choose a model. Your query will be processed in the background.")
            # Update the model dropdown in the Gradio UI
            # Update the model dropdown in the Gradio UI
            model_dropdown = gr.Dropdown(
                choices=[
                    "πŸ‡ΊπŸ‡Έ GPT-3.5",
                    "πŸ‡ΊπŸ‡Έ GPT-4o",
                    "πŸ‡ΊπŸ‡Έ GPT-4o mini",
                    "πŸ‡ΊπŸ‡Έ o1-mini", 
                    "πŸ‡ΊπŸ‡Έ o3-mini",
                    "πŸ‡ΊπŸ‡Έ Remote Meta-Llama-3", 
                    "πŸ‡ͺπŸ‡Ί Mistral-API",
                ],
                value="πŸ‡ΊπŸ‡Έ GPT-4o mini",  # Default model set to Mistral
                label="Select Model"
            )
            max_tokens_slider = gr.Slider(minimum=200, maximum=4096, value=1200, label="πŸ”’ Max Tokens", step=50)
                
            prompt_input = gr.Textbox(label="Enter your prompt", value=default_prompt, lines=6)
            with gr.Row():
                auto_refresh_checkbox = gr.Checkbox(
                    label="Enable Auto Refresh",
                    value=False  # Default to unchecked
                )                        
                submit_button = gr.Button("Submit Query ")
                # Use a Checkbox to control the periodic updates
                
            # Add a textarea to store the plain text version for copying
            status_text = gr.Textbox(label="Response Text ", visible=True)
            
            response_output = gr.Textbox(label="Response", interactive=False)
            token_info = gr.Textbox(label="Token Info", interactive=False)
            # Add buttons for copying and sending email
#            with gr.Row():
#                copy_btn = gr.Button("πŸ“‹ Copy Text")

           
            # Add buttons for copying and sending email
#            with gr.Row():
#                copy_plain_button = gr.Button("πŸ“‹ Copy Plain Text")
#                copy_formatted_button = gr.Button("πŸ“‹ Copy Formatted")
                
#            with gr.Row():
#                email_input = gr.Textbox(label="Email Address")
#                send_email_button = gr.Button("πŸ“§ Send Report")
#                email_status = gr.Textbox(label="Status", interactive=False)
            
        # Job Status Output in right column
        with gr.Column(scale=1):
            # Change Job Status output to an HTML component for proper formatting
            status_output = gr.HTML(label="Job Status", interactive=False)
                
            job_id_display = gr.Textbox(label="Job ID", interactive=False)
            input_tokens_display = gr.Textbox(label="Input Tokens", interactive=False)
            output_tokens_display = gr.Textbox(label="Output Tokens", interactive=False)
            job_query_display = gr.Textbox(label="Job Query", interactive=False)
    
    # ------------------------------
    # Set up interactions
    # ------------------------------
    
    # Load file interaction (dummy function for now)
    def load_file(file, sheet_name):
        global sheet_data
        global file_name  
        file_name = file
        sheet = sheet_name
        
        if file is None or sheet_name.strip() == "":
            return "Please upload a file and enter a valid sheet name."
        
        try:
            df = pd.read_excel(file.name, sheet_name=sheet_name)
            sheet_data = df.to_string(index=False)  # Convert sheet data to string format
            return sheet_data  # Display sheet data in UI
        except Exception as e:
            return f"Error reading sheet: {str(e)}"
    
    load_button_file.click(
        fn=load_file,
        inputs=[file_input, sheet_input_file],
        outputs=sheet_output_file
    )
    
    # When submitting a query asynchronously
    submit_button.click(
        fn=submit_query_async,
        inputs=[prompt_input, model_dropdown, max_tokens_slider],
        outputs=[
            response_output, token_info, 
            input_tokens_display, output_tokens_display,
            job_id_input, job_query_display, job_list_display
        ]
    )
    
    # Check job status interaction
    check_status_button.click(
        fn=check_job_status,
        inputs=[job_id_input],
        outputs=[status_output, job_id_display, input_tokens_display,
                 output_tokens_display, job_query_display]
    )
    
    # Refresh the job list
    refresh_button.click(
        fn=refresh_job_list,
        inputs=[],
        outputs=job_list_display
    )
    
    # Use the Checkbox to control the periodic updates
    auto_refresh_checkbox.change(
        fn=periodic_update,
        inputs=[auto_refresh_checkbox],
        outputs=[job_list_display, status_output, status_text, job_id_display, input_tokens_display, output_tokens_display, job_query_display],
        every=3
    )
    

    # Connect the copy button to show the text in the textbox and make it visible temporarily
    def show_copy_text(text):
        # Simply return the text value and make the component visible
        return gr.update(value=text, visible=True)

    # Set up the event handlers
#    copy_btn.click(fn=show_copy_text, inputs=status_text, outputs=status_text)

    
    # Copy and email buttons
#    copy_plain_button.click(fn=copy_plain_text, inputs=[status_output], outputs=[email_status])
#    copy_formatted_button.click(fn=copy_to_clipboard, inputs=[status_output], outputs=[email_status])
#    send_email_button.click(fn=send_email, inputs=[email_input, status_output], outputs=[email_status])


if __name__ == "__main__":
    debug_print("Launching Gradio UI...")
    app.queue().launch(share=False)