File size: 52,984 Bytes
fa69746
 
c26d87b
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f16978
 
fa69746
 
 
 
 
 
 
 
eb5ed27
aacf5b0
fa69746
 
 
 
 
eb5ed27
 
 
 
 
 
aacf5b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa69746
 
 
 
 
aacf5b0
fa69746
 
 
 
eb5ed27
aacf5b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa69746
 
 
 
 
 
aacf5b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb5ed27
aacf5b0
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
 
fa69746
c26d87b
fa69746
 
 
 
c26d87b
fa69746
c26d87b
fa69746
24a10ca
 
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
fa69746
 
 
 
 
 
c26d87b
 
 
 
 
 
 
 
 
 
 
 
 
 
fa69746
 
 
c26d87b
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
 
fa69746
 
 
 
 
 
 
 
c26d87b
 
 
 
 
 
 
 
 
 
fa69746
c26d87b
fa69746
c26d87b
fa69746
c26d87b
 
fa69746
c26d87b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa69746
 
 
 
 
 
 
c26d87b
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c68a08b
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f16978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
 
fa69746
 
c26d87b
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
 
 
fa69746
c26d87b
fa69746
 
 
 
c26d87b
 
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
4f16978
fa69746
 
4f16978
fa69746
 
 
 
 
 
 
 
 
 
 
 
 
 
4f16978
 
 
 
 
 
 
fa69746
4f16978
 
 
 
 
fa69746
4f16978
 
 
 
 
 
fa69746
 
 
 
 
4f16978
37bc974
fa69746
37bc974
d4539da
 
b219614
eb5ed27
 
c26d87b
aacf5b0
 
 
 
 
 
 
 
 
eb5ed27
aacf5b0
eb5ed27
 
1349ceb
 
 
b219614
1349ceb
 
b219614
1349ceb
 
b219614
 
4f16978
 
 
 
 
aacf5b0
 
 
 
 
 
 
 
 
4f16978
 
 
 
 
aacf5b0
 
 
 
 
 
 
 
 
4f16978
 
c26d87b
 
4f16978
c26d87b
4f16978
c26d87b
4f16978
 
 
 
 
 
 
c26d87b
4f16978
c26d87b
 
 
4f16978
 
 
 
c26d87b
4f16978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c26d87b
 
 
4f16978
 
 
 
c26d87b
4f16978
 
 
 
 
c26d87b
 
4f16978
c26d87b
 
 
 
4f16978
c26d87b
aacf5b0
 
 
 
 
 
 
 
 
1349ceb
4f16978
 
1349ceb
 
 
 
c26d87b
1349ceb
b219614
 
1349ceb
 
b219614
4f16978
 
 
 
 
 
 
 
 
 
 
 
aacf5b0
 
 
 
 
 
 
 
 
4f16978
aacf5b0
 
 
 
 
 
 
 
 
4f16978
aacf5b0
 
 
 
 
 
 
 
 
4f16978
599f010
1349ceb
4f16978
 
1349ceb
aacf5b0
 
 
 
 
 
 
 
 
0fffd32
 
 
 
 
 
 
 
 
d4539da
0fffd32
 
 
d4539da
0fffd32
 
d4539da
0fffd32
eb5ed27
0fffd32
 
 
 
c26d87b
0fffd32
 
 
c26d87b
 
 
 
eb5ed27
 
b5ae635
 
 
 
 
 
 
0fffd32
aacf5b0
 
 
 
 
 
 
 
 
 
0fffd32
 
4f16978
b5ae635
4f16978
b5ae635
4f16978
0fffd32
 
 
d4539da
 
b5ae635
0fffd32
 
 
 
 
b5ae635
0fffd32
 
 
 
 
b5ae635
0fffd32
 
 
 
 
 
 
b5ae635
0fffd32
 
 
b5ae635
 
0fffd32
 
c26d87b
 
 
b5ae635
0fffd32
 
4f16978
0fffd32
 
 
4f16978
 
 
 
b5ae635
4f16978
 
b5ae635
4f16978
b5ae635
4f16978
0fffd32
aacf5b0
 
 
0fffd32
 
 
 
 
 
 
 
 
 
 
 
 
 
eb5ed27
0fffd32
 
c26d87b
aacf5b0
4f16978
0fffd32
 
 
 
4f16978
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
"""
Dashboard Narrator - Powered by OpenRouter.ai
A tool to analyze dashboard PDFs and images and generate comprehensive reports.
"""

# Import required libraries
import os
import time
import threading
import io
import base64
import json
import requests
from PyPDF2 import PdfReader
from PIL import Image
import markdown
from weasyprint import HTML, CSS
from weasyprint.text.fonts import FontConfiguration
from pdf2image import convert_from_bytes
import gradio as gr
import tempfile
import shutil

# Create a global progress tracker
class ProgressTracker:
    def __init__(self):
        self.progress = 0
        self.message = "Ready"
        self.is_processing = False
        self.lock = threading.Lock()
        self.gradio_progress = None  # Store Gradio progress object
        self.progress_bar = None  # Store Gradio progress bar component
    
    def update(self, progress, message="Processing..."):
        with self.lock:
            self.progress = progress
            self.message = message
            # Update Gradio progress bar if available
            if self.gradio_progress is not None:
                try:
                    self.gradio_progress(progress / 100, desc=message)
                except:
                    pass  # Ignore errors if progress object is not valid
            
            # Update visible progress bar component if available
            if self.progress_bar is not None:
                try:
                    # Create HTML progress bar
                    progress_html = f"""
                    <div style="background-color: #f0f0f0; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #ddd;">
                        <div style="background: linear-gradient(90deg, #4CAF50 0%, #45a049 100%); width: {progress}%; height: 25px; border-radius: 8px; transition: width 0.3s ease;"></div>
                        <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #333;">
                            {message} - {progress:.1f}%
                        </div>
                    </div>
                    """
                    self.progress_bar.update(value=progress_html, visible=True)
                except Exception as e:
                    print(f"Error updating progress bar: {e}")
                    pass
    
    def get_status(self):
        with self.lock:
            return f"{self.message} ({self.progress:.1f}%)"
    
    def start_processing(self, gradio_progress=None, progress_bar=None):
        with self.lock:
            self.is_processing = True
            self.progress = 0
            self.message = "Starting..."
            self.gradio_progress = gradio_progress
            self.progress_bar = progress_bar
            # Show progress bar
            if self.progress_bar is not None:
                try:
                    start_html = """
                    <div style="background-color: #f0f0f0; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #ddd;">
                        <div style="background: linear-gradient(90deg, #4CAF50 0%, #45a049 100%); width: 0%; height: 25px; border-radius: 8px; transition: width 0.3s ease;"></div>
                        <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #333;">
                            Starting... - 0.0%
                        </div>
                    </div>
                    """
                    self.progress_bar.update(value=start_html, visible=True)
                except Exception as e:
                    print(f"Error starting progress bar: {e}")
                    pass
    
    def end_processing(self):
        with self.lock:
            self.is_processing = False
            self.progress = 100
            self.message = "Complete"
            # Show completion progress bar
            if self.progress_bar is not None:
                try:
                    complete_html = """
                    <div style="background-color: #f0f0f0; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #ddd;">
                        <div style="background: linear-gradient(90deg, #4CAF50 0%, #45a049 100%); width: 100%; height: 25px; border-radius: 8px; transition: width 0.3s ease;"></div>
                        <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #333;">
                            βœ… Analysis Complete - 100.0%
                        </div>
                    </div>
                    """
                    self.progress_bar.update(value=complete_html, visible=True)
                except Exception as e:
                    print(f"Error ending progress bar: {e}")
                    pass
            self.gradio_progress = None
            # Don't reset progress_bar here so it shows the completion state

# Create a global instance
progress_tracker = ProgressTracker()
output_status = None

# Function to update the Gradio interface with progress
def update_progress():
    global output_status
    while progress_tracker.is_processing:
        status = progress_tracker.get_status()
        if output_status is not None:
            output_status.update(value=status)
        time.sleep(0.5)
    return

# OpenRouter Client for making API calls
class OpenRouterClient:
    def __init__(self, api_key):
        self.api_key = api_key
        self.base_url = "https://openrouter.ai/api/v1"
    
    def messages_create(self, model, messages, system=None, temperature=0.7, max_tokens=None):
        """Send messages to the OpenRouter API and return the response"""
        url = f"{self.base_url}/chat/completions"
        
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        payload = {
            "model": model,
            "messages": messages,
            "temperature": temperature,
        }
        
        # Add system message if provided
        if system:
            payload["messages"].insert(0, {"role": "system", "content": system})
        
        # Add max_tokens if provided
        if max_tokens:
            payload["max_tokens"] = max_tokens
        
        try:
            response = requests.post(url, headers=headers, json=payload)
            response.raise_for_status()  # Raise an exception for HTTP errors
            
            result = response.json()
            
            # Format the response to match the expected structure
            formatted_response = type('obj', (object,), {
                'content': [
                    type('obj', (object,), {
                        'text': result['choices'][0]['message']['content']
                    })
                ]
            })
            
            return formatted_response
            
        except requests.exceptions.RequestException as e:
            print(f"API request error: {str(e)}")
            if hasattr(e, 'response') and e.response:
                print(f"Response: {e.response.text}")
            raise

# Supported languages configuration
SUPPORTED_LANGUAGES = {
    "italiano": {
        "code": "it",
        "name": "Italiano",
        "report_title": "Analisi Dashboard",
        "report_subtitle": "Report Dettagliato",
        "date_label": "Data",
        "system_prompt": "Sei un esperto analista di business intelligence specializzato nell'interpretazione di dashboard e dati visualizzati. Fornisci analisi in italiano approfondite e insight actionable basati sui dati forniti.",
        "section_title": "ANALISI SEZIONE",
        "multi_doc_title": "ANALISI DASHBOARD {index}"
    },
    "english": {
        "code": "en",
        "name": "English",
        "report_title": "Dashboard Analysis",
        "report_subtitle": "Detailed Report",
        "date_label": "Date",
        "system_prompt": "You are an expert business intelligence analyst specialized in interpreting dashboards and data visualizations. Provide in-depth analysis and actionable insights based on the data provided.",
        "section_title": "SECTION ANALYSIS",
        "multi_doc_title": "DASHBOARD {index} ANALYSIS"
    },
    "franΓ§ais": {
        "code": "fr",
        "name": "FranΓ§ais",
        "report_title": "Analyse de Tableau de Bord",
        "report_subtitle": "Rapport DΓ©taillΓ©",
        "date_label": "Date",
        "system_prompt": "Vous Γͺtes un analyste expert en business intelligence spΓ©cialisΓ© dans l'interprΓ©tation des tableaux de bord et des visualisations de donnΓ©es. Fournissez en franΓ§ais une analyse approfondie et des insights actionnables basΓ©s sur les donnΓ©es fournies.",
        "section_title": "ANALYSE DE SECTION",
        "multi_doc_title": "ANALYSE DU TABLEAU DE BORD {index}"
    },
    "espaΓ±ol": {
        "code": "es",
        "name": "EspaΓ±ol",
        "report_title": "AnΓ‘lisis de Dashboard",
        "report_subtitle": "Informe Detallado",
        "date_label": "Fecha",
        "system_prompt": "Eres un analista experto en inteligencia empresarial especializado en interpretar dashboards y visualizaciones de datos. Proporciona en espaΓ±ol un anΓ‘lisis en profundidad e insights accionables basados en los datos proporcionados.",
        "section_title": "ANÁLISIS DE SECCIΓ“N",
        "multi_doc_title": "ANÁLISIS DEL DASHBOARD {index}"
    },
    "deutsch": {
        "code": "de",
        "name": "Deutsch",
        "report_title": "Dashboard-Analyse",
        "report_subtitle": "Detaillierter Bericht",
        "date_label": "Datum",
        "system_prompt": "Sie sind ein Experte fΓΌr Business Intelligence-Analyse, der auf die Interpretation von Dashboards und Datenvisualisierungen spezialisiert ist. Bieten Sie auf Deutsch eine eingehende Analyse und umsetzbare Erkenntnisse auf Grundlage der bereitgestellten Daten.",
        "section_title": "ABSCHNITTSANALYSE",
        "multi_doc_title": "DASHBOARD-ANALYSE {index}"
    }
}

# OpenRouter models - Updated with new models
DEFAULT_MODEL = "anthropic/claude-sonnet-4"
OPENROUTER_MODELS = [
    "anthropic/claude-sonnet-4",
    "anthropic/claude-3.7-sonnet",
    "openai/gpt-4.1",
    "openai/o4-mini-high",
    "openai/gpt-4.1-mini",
    "google/gemini-2.5-flash-preview-05-20",
    "google/gemini-2.5-pro-preview-03-25",
    "moonshotai/kimi-vl-a3b-thinking:free",
    "microsoft/phi-4-multimodal-instruct",
    "qwen/qwen2.5-vl-72b-instruct:free",
    "openrouter/optimus-alpha"
]

# Utility Functions
def extract_text_from_pdf(pdf_bytes):
    """Extract text from a PDF file."""
    try:
        pdf_reader = PdfReader(io.BytesIO(pdf_bytes))
        text = ""
        for page_num in range(len(pdf_reader.pages)):
            extracted = pdf_reader.pages[page_num].extract_text()
            if extracted:
                text += extracted + "\n"
        return text
    except Exception as e:
        print(f"Error extracting text from PDF: {str(e)}")
        return ""

def get_file_type(file_path):
    """Determine the file type based on file extension."""
    if file_path.lower().endswith('.pdf'):
        return 'pdf'
    elif file_path.lower().endswith(('.png', '.jpg', '.jpeg')):
        return 'image'
    else:
        return 'unknown'

def load_image_from_file(file_path):
    """Load an image from file path."""
    try:
        image = Image.open(file_path)
        # Convert to RGB if necessary
        if image.mode != 'RGB':
            image = image.convert('RGB')
        return image
    except Exception as e:
        print(f"Error loading image from {file_path}: {str(e)}")
        return None

def load_image_from_bytes(image_bytes):
    """Load an image from bytes."""
    try:
        image = Image.open(io.BytesIO(image_bytes))
        # Convert to RGB if necessary
        if image.mode != 'RGB':
            image = image.convert('RGB')
        return image
    except Exception as e:
        print(f"Error loading image from bytes: {str(e)}")
        return None

def divide_image_vertically(image, num_sections):
    """Divide an image vertically into sections."""
    width, height = image.size
    section_height = height // num_sections
    sections = []
    for i in range(num_sections):
        top = i * section_height
        bottom = height if i == num_sections - 1 else (i + 1) * section_height
        section = image.crop((0, top, width, bottom))
        sections.append(section)
        print(f"Section {i+1}: size {section.width}x{section.height} pixels")
    return sections

def encode_image_with_resize(image, max_size_mb=4.5):
    """Encode an image in base64, resizing if necessary."""
    max_bytes = max_size_mb * 1024 * 1024
    img_byte_arr = io.BytesIO()
    image.save(img_byte_arr, format='PNG')
    current_size = len(img_byte_arr.getvalue())
    if current_size > max_bytes:
        scale_factor = (max_bytes / current_size) ** 0.5
        new_width = int(image.width * scale_factor)
        new_height = int(image.height * scale_factor)
        resized_image = image.resize((new_width, new_height), Image.LANCZOS)
        img_byte_arr = io.BytesIO()
        resized_image.save(img_byte_arr, format='PNG', optimize=True)
        print(f"Image resized from {current_size/1024/1024:.2f}MB to {len(img_byte_arr.getvalue())/1024/1024:.2f}MB")
        image = resized_image
    else:
        print(f"Image size acceptable: {current_size/1024/1024:.2f}MB")
    buffer = io.BytesIO()
    image.save(buffer, format="PNG", optimize=True)
    return base64.b64encode(buffer.getvalue()).decode("utf-8")

# Core Analysis Functions
def analyze_dashboard_section(client, model, section_number, total_sections, image_section, full_text, language, goal_description=None):
    """Analyze a vertical section of the dashboard in the specified language."""
    print(f"Analyzing section {section_number}/{total_sections} in {language['name']} using {model}...")
    try:
        encoded_image = encode_image_with_resize(image_section)
    except Exception as e:
        print(f"Error encoding section {section_number}: {str(e)}")
        return f"Error analyzing section {section_number}: {str(e)}"
    
    section_prompt = f"""
        Act as a senior data analyst examining this dashboard section for Customer Experience purpose.\n
    Your analysis will be shared with top executives to inform about Customer Experience improvements and customer satisfaction level.\n
    # Dashboard Analysis - Section {section_number} of {total_sections}\n
    You are analyzing section {section_number} of {total_sections} of a long vertical dashboard. This is part of a broader analysis.\n
    {f"The analysis objective is: {goal_description}" if goal_description else ""}\n\n
    For this specific section:\n
    1. Describe what these visualizations show, including their type (e.g., bar chart, line graph) and the data they represent\n
    2. Quantitatively analyze the data, noting specific values, percentages, and numeric trends\n
    3. Identify significant patterns, anomalies, or outliers visible in the data\n
    4. Provide 2-3 actionable insights based on this analysis, explaining their business implications\n
    5. Suggest possible reasons for any notable trends or unexpected findings\n
    Focus exclusively on the visible section. Don't reference or speculate about unseen dashboard elements.\n
    Answer completely in {language['name']}.\n\n
    # Text extracted from the complete dashboard:\n
    {full_text[:10000] if full_text else "No text available for this image."}
    
    # Image of this dashboard section:
    [BASE64 IMAGE: {encoded_image[:20]}...] 
    This is a dashboard visualization showing various metrics and charts. Please analyze the content visible in this image.
    """
    
    # Create message with image for vision models
    message_content = [
        {
            "type": "text",
            "text": section_prompt
        },
        {
            "type": "image_url",
            "image_url": {
                "url": f"data:image/png;base64,{encoded_image}"
            }
        }
    ]
    
    try:
        response = client.messages_create(
            model=model,
            messages=[{"role": "user", "content": message_content}],
            system=language['system_prompt'],
            temperature=0.1,
            max_tokens=10000
        )
        return response.content[0].text
    except Exception as e:
        print(f"Error analyzing section {section_number}: {str(e)}")
        return f"Error analyzing section {section_number}: {str(e)}"

def create_comprehensive_report(client, model, section_analyses, full_text, language, goal_description=None):
    """Create a unified comprehensive report based on individual section analyses."""
    print(f"Generating final comprehensive report in {language['name']} using {model}...")
    comprehensive_prompt = f"""
    # Comprehensive Dashboard Analysis Request
    You have analyzed a long vertical dashboard in multiple sections. Now you need to create a unified and coherent report based on all the partial analyses.\n
    {f"The analysis objective is: {goal_description}" if goal_description else ""}\n\n
    Here are the analyses of the individual dashboard sections:\n
    {section_analyses}\n\n
    Based on these partial analyses, generate a professional, structured, and coherent report that includes:\n
    1. Executive Summary - Include key metrics, major findings, and critical recommendations (limit to 1 page equivalent)\n
    2. Dashboard Performance Overview - Add a section that evaluates the overall health metrics before diving into categories\n
    3 Detailed Analysis by Category - Keep this, it's essential\n
    4 Trend Analysis - Broaden from just temporal to include cross-category patterns\n
    5 Critical Issues and Opportunities - Combine anomalies with positive outliers to provide balanced insights\n
    6 Strategic Implications and Recommendations - Consolidate your insights and recommendations into a single, stronger section\n
    7 Implementation Roadmap - Convert your conclusions into a prioritized action plan with timeframes\n
    8 Appendix: Monitoring Improvements - Move the monitoring suggestions to an appendix unless they're a primary focus\n\n
    Integrate information from all sections to create a coherent and complete report.\n\n
    # Text extracted from the complete dashboard:\n
    {full_text[:10000] if full_text else "No text available for this image."}
    """
    try:
        response = client.messages_create(
            model=model,
            messages=[{"role": "user", "content": comprehensive_prompt}],
            system=language['system_prompt'],
            temperature=0.1,
            max_tokens=10000
        )
        return response.content[0].text
    except Exception as e:
        print(f"Error creating comprehensive report: {str(e)}")
        return f"Error creating comprehensive report: {str(e)}"

def create_multi_dashboard_comparative_report(client, model, individual_reports, language, goal_description=None):
    """Create a comparative report analyzing multiple dashboards together."""
    print(f"Generating comparative report for multiple dashboards in {language['name']} using {model}...")
    comparative_prompt = f"""
    # Multi-Dashboard Comparative Analysis Request
    You have analyzed multiple dashboards individually. Now you need to create a comparative analysis report that identifies patterns, similarities, differences, and insights across all dashboards.
    {f"The analysis objective is: {goal_description}" if goal_description else ""}
    Here are the analyses of the individual dashboards:
    {individual_reports}
    Based on these individual analyses, generate a professional, structured comparative report that includes:
    1. Executive Overview of All Dashboards
    2. Comparative Analysis of Key Metrics
    3. Cross-Dashboard Patterns and Trends
    4. Notable Differences Between Dashboards
    5. Integrated Insights from All Sources
    6. Comprehensive Strategic Recommendations
    7. Suggestions for Cross-Dashboard Monitoring Improvements
    8. Conclusions and Integrated Next Steps
    Integrate information from all dashboards to create a coherent comparative report.
    """
    try:
        response = client.messages_create(
            model=model,
            messages=[{"role": "user", "content": comparative_prompt}],
            system=language['system_prompt'],
            temperature=0.1,
            max_tokens=12000
        )
        return response.content[0].text
    except Exception as e:
        print(f"Error creating comparative report: {str(e)}")
        return f"Error creating comparative report: {str(e)}"

def markdown_to_pdf(markdown_content, output_filename, language):
    """Convert Markdown content to a well-formatted PDF."""
    print(f"Converting Markdown report to PDF in {language['name']}...")
    css = CSS(string='''
        @page { margin: 1.5cm; }
        body { font-family: Arial, sans-serif; line-height: 1.5; font-size: 11pt; }
        h1 { color: #2c3e50; font-size: 22pt; margin-top: 1cm; margin-bottom: 0.5cm; page-break-after: avoid; }
        h2 { color: #3498db; font-size: 16pt; margin-top: 0.8cm; margin-bottom: 0.3cm; page-break-after: avoid; }
        p { margin-bottom: 0.3cm; text-align: justify; }
    ''')
    today = time.strftime("%d/%m/%Y")
    cover_page = f"""
    <div style="text-align: center; height: 100vh; display: flex; flex-direction: column; justify-content: center; align-items: center;">
        <h1 style="font-size: 26pt; color: #2c3e50;">{language['report_title']}</h1>
        <h2 style="font-size: 14pt; color: #7f8c8d;">{language['report_subtitle']}</h2>
        <p style="font-size: 12pt; color: #7f8c8d;">{language['date_label']}: {today}</p>
    </div>
    <div style="page-break-after: always;"></div>
    """
    html_content = markdown.markdown(markdown_content, extensions=['tables', 'fenced_code'])
    full_html = f"""
    <!DOCTYPE html>
    <html lang="{language['code']}">
    <head><meta charset="UTF-8"><title>{language['report_title']}</title></head>
    <body>{cover_page}{html_content}</body>
    </html>
    """
    font_config = FontConfiguration()
    HTML(string=full_html).write_pdf(output_filename, stylesheets=[css], font_config=font_config)
    print(f"PDF created successfully: {output_filename}")
    return output_filename

def analyze_vertical_dashboard(client, model, file_data, file_type, language, goal_description=None, num_sections=4, dashboard_index=None):
    """Analyze a vertical dashboard by dividing it into sections. Supports both PDF and image files."""
    dashboard_marker = f" {dashboard_index}" if dashboard_index is not None else ""
    total_dashboards = progress_tracker.total_dashboards if hasattr(progress_tracker, 'total_dashboards') else 1
    dashboard_progress_base = ((dashboard_index - 1) / total_dashboards * 100) if dashboard_index is not None else 0
    dashboard_progress_step = (100 / total_dashboards) if total_dashboards > 0 else 100
    
    progress_tracker.update(dashboard_progress_base, f"πŸ–ΌοΈ Analyzing dashboard{dashboard_marker}...")
    print(f"πŸ–ΌοΈ Analyzing dashboard{dashboard_marker}...")
    
    # Extract text if it's a PDF
    full_text = ""
    if file_type == 'pdf':
        progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.1, f"πŸ“„ Extracting text from dashboard{dashboard_marker}...")
        print(f"πŸ“„ Extracting full text from PDF...")
        full_text = extract_text_from_pdf(file_data)
        if not full_text or len(full_text.strip()) < 100:
            print("⚠️ Limited text extracted from PDF. Analysis will rely primarily on images.")
        else:
            print(f"βœ… Extracted {len(full_text)} characters of text from PDF.")
    else:
        print("πŸ“„ Image file detected - no text extraction needed.")
    
    # Convert to image(s)
    progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.2, f"πŸ–ΌοΈ Converting dashboard{dashboard_marker} to images...")
    print("πŸ–ΌοΈ Processing image...")
    
    try:
        if file_type == 'pdf':
            # Convert PDF to images
            pdf_images = convert_from_bytes(file_data, dpi=150)
            if not pdf_images:
                print("❌ Unable to convert PDF to images.")
                return None, "Error: Unable to convert PDF to images."
            print(f"βœ… PDF converted to {len(pdf_images)} image pages.")
            main_image = pdf_images[0]
        else:
            # Load image directly
            main_image = load_image_from_bytes(file_data)
            if main_image is None:
                print("❌ Unable to load image.")
                return None, "Error: Unable to load image."
            print(f"βœ… Image loaded successfully.")
        
        print(f"Main image size: {main_image.width}x{main_image.height} pixels")
        
        progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.3, f"Dividing dashboard{dashboard_marker} into {num_sections} sections...")
        print(f"Dividing image into {num_sections} vertical sections...")
        image_sections = divide_image_vertically(main_image, num_sections)
        print(f"βœ… Image divided into {len(image_sections)} sections.")
    except Exception as e:
        print(f"❌ Error processing image: {str(e)}")
        return None, f"Error: {str(e)}"
    
    section_analyses = []
    section_progress_step = dashboard_progress_step * 0.4 / len(image_sections)
    
    for i, section in enumerate(image_sections):
        section_progress = dashboard_progress_base + dashboard_progress_step * 0.3 + section_progress_step * i
        progress_tracker.update(section_progress, f"Analyzing section {i+1}/{len(image_sections)} of dashboard{dashboard_marker}...")
        
        print(f"\n{'='*50}")
        print(f"Processing section {i+1}/{len(image_sections)}...")
        section_result = analyze_dashboard_section(
            client, 
            model,
            i+1, 
            len(image_sections), 
            section, 
            full_text, 
            language,
            goal_description
        )
        if section_result:
            section_analyses.append(f"\n## {language['section_title']} {i+1}\n{section_result}")
            print(f"βœ… Analysis of section {i+1} completed.")
        else:
            section_analyses.append(f"\n## {language['section_title']} {i+1}\nAnalysis not available for this section.")
            print(f"⚠️ Analysis of section {i+1} not available.")
    
    progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.7, f"Generating final report for dashboard{dashboard_marker}...")
    print("\n" + "="*50)
    print(f"All section analyses completed. Generating report...")
    combined_sections = "\n".join(section_analyses)
    
    # If dashboard index is provided, add a header for the dashboard
    if dashboard_index is not None:
        dashboard_header = f"# {language['multi_doc_title'].format(index=dashboard_index)}\n\n"
        combined_sections = dashboard_header + combined_sections
    
    final_report = create_comprehensive_report(client, model, combined_sections, full_text, language, goal_description)
    
    # If dashboard index is provided, prepend it to the report
    if dashboard_index is not None and dashboard_index > 1:
        # Only add header if it doesn't already exist (might have been added by Claude)
        if not final_report.startswith(f"# {language['multi_doc_title'].format(index=dashboard_index)}"):
            final_report = f"# {language['multi_doc_title'].format(index=dashboard_index)}\n\n{final_report}"
    
    progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.9, f"Finalizing dashboard{dashboard_marker} analysis...")
    return final_report, combined_sections


def get_available_models(api_key):
    """Get available models from OpenRouter API."""
    try:
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        response = requests.get("https://openrouter.ai/api/v1/models", headers=headers)
        
        if response.status_code == 200:
            models_data = response.json()
            available_models = [model["id"] for model in models_data.get("data", [])]
            
            # First add our preferred models at the top if they're available
            sorted_models = [model for model in OPENROUTER_MODELS if model in available_models]
            
            # Then add any additional models not in our predefined list
            additional_models = [model for model in available_models if model not in OPENROUTER_MODELS]
            additional_models.sort()
            
            all_models = ["custom"] + sorted_models + additional_models
            return all_models
        else:
            print(f"Error fetching models: {response.status_code}")
            return ["custom"] + OPENROUTER_MODELS
    except Exception as e:
        print(f"Error fetching models: {str(e)}")
        return ["custom"] + OPENROUTER_MODELS

# FIXED: Improved file handling for Gradio compatibility
def create_output_files(individual_reports, comparative_report, language, timestamp):
    """Create output files with proper Gradio compatibility."""
    output_files = []
    
    try:
        # Create a temporary directory that Gradio can access
        temp_dir = tempfile.mkdtemp()
        print(f"Created temporary directory: {temp_dir}")
        
        # Create individual report files
        for i, report in enumerate(individual_reports):
            if report and report.strip():  # Only create files for valid reports
                # Create markdown file
                md_filename = os.path.join(temp_dir, f"dashboard_{i+1}_{language['code']}_{timestamp}.md")
                try:
                    with open(md_filename, 'w', encoding='utf-8') as f:
                        f.write(report)
                    if os.path.exists(md_filename) and os.path.getsize(md_filename) > 0:
                        output_files.append(md_filename)
                        print(f"βœ… Created markdown file: {md_filename}")
                    else:
                        print(f"⚠️ Failed to create valid markdown file for dashboard {i+1}")
                except Exception as e:
                    print(f"❌ Error creating markdown file for dashboard {i+1}: {str(e)}")
                
                # Create PDF file
                pdf_filename = os.path.join(temp_dir, f"dashboard_{i+1}_{language['code']}_{timestamp}.pdf")
                try:
                    pdf_path = markdown_to_pdf(report, pdf_filename, language)
                    if os.path.exists(pdf_filename) and os.path.getsize(pdf_filename) > 0:
                        output_files.append(pdf_filename)
                        print(f"βœ… Created PDF file: {pdf_filename}")
                    else:
                        print(f"⚠️ Failed to create valid PDF file for dashboard {i+1}")
                except Exception as e:
                    print(f"❌ Error creating PDF file for dashboard {i+1}: {str(e)}")
        
        # Create comparative report if available
        if comparative_report and comparative_report.strip():
            # Create comparative markdown file
            comparative_md = os.path.join(temp_dir, f"comparative_analysis_{language['code']}_{timestamp}.md")
            try:
                with open(comparative_md, 'w', encoding='utf-8') as f:
                    f.write(comparative_report)
                if os.path.exists(comparative_md) and os.path.getsize(comparative_md) > 0:
                    output_files.append(comparative_md)
                    print(f"βœ… Created comparative markdown file: {comparative_md}")
                else:
                    print(f"⚠️ Failed to create valid comparative markdown file")
            except Exception as e:
                print(f"❌ Error creating comparative markdown file: {str(e)}")
            
            # Create comparative PDF file
            comparative_pdf = os.path.join(temp_dir, f"comparative_analysis_{language['code']}_{timestamp}.pdf")
            try:
                pdf_path = markdown_to_pdf(comparative_report, comparative_pdf, language)
                if os.path.exists(comparative_pdf) and os.path.getsize(comparative_pdf) > 0:
                    output_files.append(comparative_pdf)
                    print(f"βœ… Created comparative PDF file: {comparative_pdf}")
                else:
                    print(f"⚠️ Failed to create valid comparative PDF file")
            except Exception as e:
                print(f"❌ Error creating comparative PDF file: {str(e)}")
        
        print(f"Total output files created: {len(output_files)}")
        return output_files
        
    except Exception as e:
        print(f"❌ Error in create_output_files: {str(e)}")
        return []

def process_multiple_dashboards(api_key, files, language_code="it", goal_description=None, num_sections=4, model_name=DEFAULT_MODEL, custom_model=None):
    """Process multiple dashboard files (PDF/images) and create individual and comparative reports."""
    # Start progress tracking
    progress_tracker.start_processing()
    progress_tracker.total_dashboards = len(files)
    
    # Step 1: Initialize language settings and API client
    progress_tracker.update(1, "Initializing analysis...")
    language = None
    for lang_key, lang_data in SUPPORTED_LANGUAGES.items():
        if lang_data['code'] == language_code:
            language = lang_data
            break
    if not language:
        print(f"⚠️ Language '{language_code}' not supported. Using Italian as fallback.")
        language = SUPPORTED_LANGUAGES['italiano']
    print(f"🌐 Selected language: {language['name']}")
    
    if not api_key:
        progress_tracker.update(100, "⚠️ Error: API key not provided.")
        progress_tracker.end_processing()
        print("⚠️ Error: API key not provided.")
        return None, None, "Error: API key not provided."
    
    try:
        client = OpenRouterClient(api_key=api_key)
        print("βœ… OpenRouter client initialized successfully.")
    except Exception as e:
        progress_tracker.update(100, f"❌ Error initializing client: {str(e)}")
        progress_tracker.end_processing()
        print(f"❌ Error initializing client: {str(e)}")
        return None, None, f"Error: {str(e)}"
    
    # Determine which model to use
    model = custom_model if model_name == "custom" and custom_model else model_name
    print(f"πŸ€– Using model: {model}")
    
    # Step 2: Process each dashboard individually
    individual_reports = []
    individual_analyses = []
    
    for i, (file_data, file_type) in enumerate(files):
        dashboard_progress_base = (i / len(files) * 80)  # 80% of progress for dashboard analysis
        progress_tracker.update(dashboard_progress_base, f"Processing dashboard {i+1}/{len(files)}...")
        print(f"\n{'#'*60}")
        print(f"Processing dashboard {i+1}/{len(files)} (Type: {file_type})...")
        
        report, analysis = analyze_vertical_dashboard(
            client=client,
            model=model,
            file_data=file_data,
            file_type=file_type,
            language=language,
            goal_description=goal_description,
            num_sections=num_sections,
            dashboard_index=i+1
        )
        
        if report:
            individual_reports.append(report)
            individual_analyses.append(analysis)
            print(f"βœ… Analysis of dashboard {i+1} completed.")
        else:
            print(f"❌ Analysis of dashboard {i+1} failed.")
    
    # Step 3: Generate comparative report if multiple dashboards
    comparative_report = None
    if len(individual_reports) > 1:
        progress_tracker.update(80, "Creating comparative analysis...")
        print("\n" + "#"*60)
        print("Creating comparative analysis of all dashboards...")
        
        # Combined report content
        all_reports_content = "\n\n".join(individual_reports)
        
        # Generate comparative analysis
        comparative_report = create_multi_dashboard_comparative_report(
            client=client,
            model=model,
            individual_reports=all_reports_content,
            language=language,
            goal_description=goal_description
        )
    
    # Step 4: Create output files with improved handling
    progress_tracker.update(90, "Creating output files...")
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    
    try:
        output_files = create_output_files(individual_reports, comparative_report, language, timestamp)
        
        if not output_files:
            error_msg = "No output files were created successfully."
            progress_tracker.update(100, f"⚠️ {error_msg}")
            progress_tracker.end_processing()
            return None, None, error_msg
        
    except Exception as e:
        error_msg = f"Error creating output files: {str(e)}"
        print(f"❌ {error_msg}")
        progress_tracker.update(100, f"❌ {error_msg}")
        progress_tracker.end_processing()
        return None, None, error_msg
    
    # Complete progress tracking
    progress_tracker.update(100, "βœ… Analysis completed successfully!")
    progress_tracker.end_processing()
    
    # Return the combined report content and all output files
    combined_content = "\n\n---\n\n".join(individual_reports)
    if len(individual_reports) > 1 and comparative_report:
        combined_content += f"\n\n{'='*80}\n\n# COMPARATIVE ANALYSIS\n\n{comparative_report}"
    
    return combined_content, output_files, "βœ… Analysis completed successfully!"

# FIXED: Improved wrapper function for Gradio interface with progress bar integration
def process_dashboard(api_key, files, language_name, goal_description=None, num_sections=4, model_name=DEFAULT_MODEL, custom_model=None, progress=gr.Progress()):
    """Process dashboard files (PDF/images) and generate reports (wrapper function for Gradio interface)."""
    
    # Get reference to the progress bar component
    progress_bar = None
    try:
        # We'll pass this via the global progress_tracker
        progress_bar = progress_tracker.progress_bar
    except:
        pass
    
    # Start progress tracking with Gradio progress integration
    progress_tracker.start_processing(progress, progress_bar)
    
    # Start a thread to update text-based progress
    progress_thread = threading.Thread(target=update_progress)
    progress_thread.daemon = True
    progress_thread.start()
    
    # Convert language name to language code
    language_code = "en"  # Default to English
    for lang_key, lang_data in SUPPORTED_LANGUAGES.items():
        if lang_data['name'].lower() == language_name.lower():
            language_code = lang_data['code']
            break
    
    # Validate inputs
    if not api_key or not api_key.strip():
        error_message = "API key is required."
        progress_tracker.update(100, f"❌ {error_message}")
        progress_tracker.end_processing()
        error_html = """
        <div style="background-color: #ffebee; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #f44336;">
            <div style="background: linear-gradient(90deg, #f44336 0%, #d32f2f 100%); width: 100%; height: 25px; border-radius: 8px;"></div>
            <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #d32f2f;">
                ❌ Error: API key is required
            </div>
        </div>
        """
        return None, None, error_message, error_html
    
    if not files or len(files) == 0:
        error_message = "No files uploaded."
        progress_tracker.update(100, f"❌ {error_message}")
        progress_tracker.end_processing()
        error_html = """
        <div style="background-color: #ffebee; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #f44336;">
            <div style="background: linear-gradient(90deg, #f44336 0%, #d32f2f 100%); width: 100%; height: 25px; border-radius: 8px;"></div>
            <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #d32f2f;">
                ❌ Error: No files uploaded
            </div>
        </div>
        """
        return None, None, error_message, error_html
    
    # Process the uploaded files with improved handling
    processed_files = []
    if files is not None:
        for i, file in enumerate(files):
            try:
                # Handle different Gradio file formats more robustly
                file_path = None
                
                if isinstance(file, dict):
                    # Handle new Gradio File component format
                    if 'name' in file:
                        file_path = file['name']
                    elif 'path' in file:
                        file_path = file['path']
                elif isinstance(file, str):
                    # Handle string file paths
                    file_path = file
                else:
                    # Try to get the path from the file object
                    for attr in ['name', 'path', 'file_path']:
                        if hasattr(file, attr):
                            file_path = getattr(file, attr)
                            break
                
                if not file_path:
                    print(f"⚠️ Could not determine file path for uploaded file {i+1}")
                    continue
                
                if not os.path.exists(file_path):
                    print(f"⚠️ File does not exist: {file_path}")
                    continue
                
                # Determine file type
                file_type = get_file_type(file_path)
                
                if file_type == 'unknown':
                    print(f"⚠️ Unsupported file type for {file_path}")
                    continue
                
                # Read file data
                try:
                    with open(file_path, 'rb') as f:
                        file_data = f.read()
                    
                    if len(file_data) == 0:
                        print(f"⚠️ Empty file: {file_path}")
                        continue
                    
                    processed_files.append((file_data, file_type))
                    print(f"βœ… Processed {file_path} as {file_type} ({len(file_data)} bytes)")
                    
                except Exception as e:
                    print(f"❌ Error reading file {file_path}: {str(e)}")
                    continue
                    
            except Exception as e:
                print(f"❌ Error processing uploaded file {i+1}: {str(e)}")
                continue
    
    if not processed_files:
        error_message = "No valid files were uploaded or processed."
        progress_tracker.update(100, f"❌ {error_message}")
        progress_tracker.end_processing()
        error_html = """
        <div style="background-color: #ffebee; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #f44336;">
            <div style="background: linear-gradient(90deg, #f44336 0%, #d32f2f 100%); width: 100%; height: 25px; border-radius: 8px;"></div>
            <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #d32f2f;">
                ❌ Error: No valid files processed
            </div>
        </div>
        """
        return None, None, error_message, error_html
    
    print(f"Successfully processed {len(processed_files)} files for analysis")
    
    # Call the actual processing function
    try:
        combined_content, output_files, status = process_multiple_dashboards(
            api_key=api_key,
            files=processed_files,
            language_code=language_code,
            goal_description=goal_description,
            num_sections=num_sections,
            model_name=model_name,
            custom_model=custom_model
        )
        
        # Validate output files exist and are accessible
        if output_files:
            valid_files = []
            for file_path in output_files:
                if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
                    valid_files.append(file_path)
                else:
                    print(f"⚠️ Output file not found or empty: {file_path}")
            
            if valid_files:
                print(f"βœ… Returning {len(valid_files)} valid output files")
                success_html = """
                <div style="background-color: #f0f0f0; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #ddd;">
                    <div style="background: linear-gradient(90deg, #4CAF50 0%, #45a049 100%); width: 100%; height: 25px; border-radius: 8px; transition: width 0.3s ease;"></div>
                    <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #333;">
                        βœ… Analysis Complete - 100.0%
                    </div>
                </div>
                """
                return combined_content, valid_files, status, success_html
            else:
                success_html = """
                <div style="background-color: #fff3cd; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #ffc107;">
                    <div style="background: linear-gradient(90deg, #ffc107 0%, #ffb300 100%); width: 100%; height: 25px; border-radius: 8px;"></div>
                    <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #856404;">
                        ⚠️ Analysis completed but no files created
                    </div>
                </div>
                """
                return combined_content, None, "Analysis completed but no downloadable files were created.", success_html
        
        success_html = """
        <div style="background-color: #f0f0f0; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #ddd;">
            <div style="background: linear-gradient(90deg, #4CAF50 0%, #45a049 100%); width: 100%; height: 25px; border-radius: 8px; transition: width 0.3s ease;"></div>
            <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #333;">
                βœ… Analysis Complete - 100.0%
            </div>
        </div>
        """
        return combined_content, output_files, status, success_html
        
    except Exception as e:
        error_message = f"Error processing dashboards: {str(e)}"
        print(f"❌ {error_message}")
        progress_tracker.update(100, f"❌ {error_message}")
        progress_tracker.end_processing()
        error_html = """
        <div style="background-color: #ffebee; border-radius: 10px; padding: 5px; margin: 10px 0; border: 1px solid #f44336;">
            <div style="background: linear-gradient(90deg, #f44336 0%, #d32f2f 100%); width: 100%; height: 25px; border-radius: 8px;"></div>
            <div style="text-align: center; margin-top: 5px; font-weight: bold; color: #d32f2f;">
                ❌ Error processing dashboards
            </div>
        </div>
        """
        return None, None, error_message, error_html

# Gradio Interface Functions
def toggle_custom_model(choice):
    """Toggle visibility of custom model input based on selection."""
    return {"visible": choice == "custom"}

def refresh_models(api_key):
    """Refresh the list of available models based on the API key."""
    if not api_key:
        return gr.Dropdown(choices=["custom"] + OPENROUTER_MODELS, value=DEFAULT_MODEL)
    
    try:
        available_models = get_available_models(api_key)
        return gr.Dropdown(choices=available_models, value=DEFAULT_MODEL)
    except Exception as e:
        print(f"Error refreshing models: {str(e)}")
        return gr.Dropdown(choices=["custom"] + OPENROUTER_MODELS, value=DEFAULT_MODEL)

# Define the Gradio interface with improved error handling and progress bar
with gr.Blocks(title="Dashboard Narrator - Powered by OpenRouter.ai", theme=gr.themes.Soft()) as demo:
    gr.Markdown("""
    # πŸ“Š Dashboard Narrator - Powered by OpenRouter.ai
    Unlock the hidden stories in your dashboards!<br>
    Dashboard Narrator leverages advanced AI models through OpenRouter.ai to dissect your PDF reports and images,<br> 
    analyze each segment with expert precision, and craft comprehensive insights in your preferred language.<br><br>
    Turn complex data visualizations into clear, strategic recommendations and uncover trends you might have missed.<br>
    From executive summaries to detailed breakdowns, get the full narrative behind your numbers in just a few clicks.<br><br>
    **✨ New Features:**
    - Support for PNG and JPG image analysis
    - Enhanced with Claude Sonnet 4 and Gemini 2.5 Flash models
    - Multi-format dashboard analysis capabilities
    - Improved file download functionality
    - **Real-time progress tracking with visual progress bar**<br><br>
    **Instructions:**
    1. Enter your OpenRouter API key (get one at OpenRouter.ai)
    2. Choose an AI model for analysis
    3. Select your preferred report language
    4. Upload one or more dashboard files (PDF, PNG, JPG)
    5. Optionally specify analysis goals
    6. Click "Analyze Dashboards" to begin
    """)
    
    # Add a visible progress bar component
    with gr.Row():
        with gr.Column():
            progress_bar = gr.HTML(
                value="",
                visible=False,
                label="Analysis Progress"
            )
    
    with gr.Row():
        with gr.Column(scale=1):
            api_key = gr.Textbox(
                label="OpenRouter API Key (Required)", 
                placeholder="Enter your OpenRouter API key...", 
                type="password"
            )
            refresh_btn = gr.Button("πŸ”„ Refresh Available Models", size="sm")
            
            model_choice = gr.Dropdown(
                choices=["custom"] + OPENROUTER_MODELS,
                value=DEFAULT_MODEL,
                label="Select AI Model"
            )
            
            custom_model = gr.Textbox(
                label="Custom Model ID", 
                placeholder="Enter custom model ID (e.g., anthropic/claude-3-opus:latest)...",
                visible=False
            )
            
            language = gr.Dropdown(
                choices=["Italiano", "English", "FranΓ§ais", "EspaΓ±ol", "Deutsch"], 
                value="English", 
                label="Report Language"
            )
            
            num_sections = gr.Slider(
                minimum=2, 
                maximum=10, 
                value=4, 
                step=1, 
                label="Vertical Sections per Dashboard"
            )
            
            goal = gr.Textbox(
                label="Analysis Goal (Optional)", 
                placeholder="E.g., Analyze Q1 2024 sales KPIs..."
            )
            
            files = gr.File(
                label="Upload Dashboards (PDF, PNG, JPG)", 
                file_types=[".pdf", ".png", ".jpg", ".jpeg"], 
                file_count="multiple"
            )
            
            analyze_btn = gr.Button("πŸ” Analyze Dashboards", variant="primary", size="lg")
            
        with gr.Column(scale=2):
            with gr.Tab("Report"):
                output_md = gr.Markdown(label="Analysis Report", value="Upload dashboards and click Analyze to get started...")
            with gr.Tab("Download Files"):
                output_files = gr.File(
                    label="Download Generated Reports", 
                    file_count="multiple"
                )
            output_status = gr.Textbox(
                label="Status & Progress", 
                placeholder="Upload dashboards and press Analyze to begin...", 
                interactive=False
            )
    
    # Store progress bar reference in global tracker
    progress_tracker.progress_bar = progress_bar
    
    # Handle model dropdown change
    model_choice.change(
        fn=toggle_custom_model,
        inputs=model_choice,
        outputs=custom_model,
    )
    
    # Handle refresh models button
    refresh_btn.click(
        fn=refresh_models,
        inputs=api_key,
        outputs=model_choice,
    )
    
    # Handle analyze button with improved error handling and progress bar
    analyze_btn.click(
        fn=process_dashboard,
        inputs=[api_key, files, language, goal, num_sections, model_choice, custom_model],
        outputs=[output_md, output_files, output_status, progress_bar],
        show_progress=True
    )

# Launch the app
if __name__ == "__main__":
    demo.launch(share=True, show_error=True)