Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
"""
|
2 |
Dashboard Narrator - Powered by OpenRouter.ai
|
3 |
-
A tool to analyze dashboard PDFs and generate comprehensive reports.
|
4 |
"""
|
5 |
|
6 |
# Import required libraries
|
@@ -168,15 +168,17 @@ SUPPORTED_LANGUAGES = {
|
|
168 |
}
|
169 |
}
|
170 |
|
171 |
-
# OpenRouter models
|
172 |
-
DEFAULT_MODEL = "anthropic/claude-
|
173 |
OPENROUTER_MODELS = [
|
|
|
174 |
"anthropic/claude-3.7-sonnet",
|
175 |
"openai/gpt-4.1",
|
176 |
"openai/o4-mini-high",
|
177 |
"openai/gpt-4.1-mini",
|
178 |
-
"
|
179 |
"google/gemini-2.5-pro-preview-03-25",
|
|
|
180 |
"microsoft/phi-4-multimodal-instruct",
|
181 |
"qwen/qwen2.5-vl-72b-instruct:free",
|
182 |
"openrouter/optimus-alpha"
|
@@ -197,6 +199,39 @@ def extract_text_from_pdf(pdf_bytes):
|
|
197 |
print(f"Error extracting text from PDF: {str(e)}")
|
198 |
return ""
|
199 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
def divide_image_vertically(image, num_sections):
|
201 |
"""Divide an image vertically into sections."""
|
202 |
width, height = image.size
|
@@ -256,17 +291,31 @@ def analyze_dashboard_section(client, model, section_number, total_sections, ima
|
|
256 |
Focus exclusively on the visible section. Don't reference or speculate about unseen dashboard elements.\n
|
257 |
Answer completely in {language['name']}.\n\n
|
258 |
# Text extracted from the complete dashboard:\n
|
259 |
-
{full_text[:10000]}
|
260 |
|
261 |
# Image of this dashboard section:
|
262 |
[BASE64 IMAGE: {encoded_image[:20]}...]
|
263 |
This is a dashboard visualization showing various metrics and charts. Please analyze the content visible in this image.
|
264 |
"""
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
try:
|
267 |
response = client.messages_create(
|
268 |
model=model,
|
269 |
-
messages=[{"role": "user", "content":
|
270 |
system=language['system_prompt'],
|
271 |
temperature=0.1,
|
272 |
max_tokens=10000
|
@@ -296,7 +345,7 @@ def create_comprehensive_report(client, model, section_analyses, full_text, lang
|
|
296 |
8 Appendix: Monitoring Improvements - Move the monitoring suggestions to an appendix unless they're a primary focus\n\n
|
297 |
Integrate information from all sections to create a coherent and complete report.\n\n
|
298 |
# Text extracted from the complete dashboard:\n
|
299 |
-
{full_text[:10000]}
|
300 |
"""
|
301 |
try:
|
302 |
response = client.messages_create(
|
@@ -376,8 +425,8 @@ def markdown_to_pdf(markdown_content, output_filename, language):
|
|
376 |
print(f"PDF created successfully: {output_filename}")
|
377 |
return output_filename
|
378 |
|
379 |
-
def analyze_vertical_dashboard(client, model,
|
380 |
-
"""Analyze a vertical dashboard by dividing it into sections."""
|
381 |
dashboard_marker = f" {dashboard_index}" if dashboard_index is not None else ""
|
382 |
total_dashboards = progress_tracker.total_dashboards if hasattr(progress_tracker, 'total_dashboards') else 1
|
383 |
dashboard_progress_base = ((dashboard_index - 1) / total_dashboards * 100) if dashboard_index is not None else 0
|
@@ -386,23 +435,40 @@ def analyze_vertical_dashboard(client, model, pdf_bytes, language, goal_descript
|
|
386 |
progress_tracker.update(dashboard_progress_base, f"πΌοΈ Analyzing dashboard{dashboard_marker}...")
|
387 |
print(f"πΌοΈ Analyzing dashboard{dashboard_marker}...")
|
388 |
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
print("
|
|
|
|
|
|
|
|
|
|
|
394 |
else:
|
395 |
-
print(
|
396 |
|
|
|
397 |
progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.2, f"πΌοΈ Converting dashboard{dashboard_marker} to images...")
|
398 |
-
print("πΌοΈ
|
|
|
399 |
try:
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
406 |
print(f"Main image size: {main_image.width}x{main_image.height} pixels")
|
407 |
|
408 |
progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.3, f"Dividing dashboard{dashboard_marker} into {num_sections} sections...")
|
@@ -410,7 +476,7 @@ def analyze_vertical_dashboard(client, model, pdf_bytes, language, goal_descript
|
|
410 |
image_sections = divide_image_vertically(main_image, num_sections)
|
411 |
print(f"β
Image divided into {len(image_sections)} sections.")
|
412 |
except Exception as e:
|
413 |
-
print(f"β Error
|
414 |
return None, f"Error: {str(e)}"
|
415 |
|
416 |
section_analyses = []
|
@@ -490,11 +556,11 @@ def get_available_models(api_key):
|
|
490 |
print(f"Error fetching models: {str(e)}")
|
491 |
return ["custom"] + OPENROUTER_MODELS
|
492 |
|
493 |
-
def process_multiple_dashboards(api_key,
|
494 |
-
"""Process multiple dashboard
|
495 |
# Start progress tracking
|
496 |
progress_tracker.start_processing()
|
497 |
-
progress_tracker.total_dashboards = len(
|
498 |
|
499 |
# Step 1: Initialize language settings and API client
|
500 |
progress_tracker.update(1, "Initializing analysis...")
|
@@ -531,16 +597,17 @@ def process_multiple_dashboards(api_key, pdf_files, language_code="it", goal_des
|
|
531 |
individual_reports = []
|
532 |
individual_analyses = []
|
533 |
|
534 |
-
for i,
|
535 |
-
dashboard_progress_base = (i / len(
|
536 |
-
progress_tracker.update(dashboard_progress_base, f"Processing dashboard {i+1}/{len(
|
537 |
print(f"\n{'#'*60}")
|
538 |
-
print(f"Processing dashboard {i+1}/{len(
|
539 |
|
540 |
report, analysis = analyze_vertical_dashboard(
|
541 |
client=client,
|
542 |
model=model,
|
543 |
-
|
|
|
544 |
language=language,
|
545 |
goal_description=goal_description,
|
546 |
num_sections=num_sections,
|
@@ -628,8 +695,8 @@ def process_multiple_dashboards(api_key, pdf_files, language_code="it", goal_des
|
|
628 |
return combined_content, output_files, "β
Analysis completed successfully!"
|
629 |
|
630 |
# Wrapper function for Gradio interface
|
631 |
-
def process_dashboard(api_key,
|
632 |
-
"""Process dashboard
|
633 |
# Start a thread to update progress
|
634 |
progress_thread = threading.Thread(target=update_progress)
|
635 |
progress_thread.daemon = True
|
@@ -642,31 +709,58 @@ def process_dashboard(api_key, pdf_files, language_name, goal_description=None,
|
|
642 |
language_code = lang_data['code']
|
643 |
break
|
644 |
|
645 |
-
#
|
646 |
-
|
647 |
-
if
|
648 |
-
for
|
649 |
-
|
650 |
-
# Handle
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
if hasattr(
|
661 |
-
|
662 |
-
|
663 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
664 |
|
665 |
# Call the actual processing function
|
666 |
try:
|
667 |
combined_content, output_files, status = process_multiple_dashboards(
|
668 |
api_key=api_key,
|
669 |
-
|
670 |
language_code=language_code,
|
671 |
goal_description=goal_description,
|
672 |
num_sections=num_sections,
|
@@ -703,10 +797,14 @@ with gr.Blocks(title="Dashboard Narrator - Powered by OpenRouter.ai", theme=gr.t
|
|
703 |
gr.Markdown("""
|
704 |
# π Dashboard Narrator - Powered by OpenRouter.ai
|
705 |
Unlock the hidden stories in your dashboards!<br>
|
706 |
-
Dashboard Narrator leverages advanced AI models through OpenRouter.ai to dissect your PDF reports,<br>
|
707 |
analyze each segment with expert precision, and craft comprehensive insights in your preferred language.<br><br>
|
708 |
Turn complex data visualizations into clear, strategic recommendations and uncover trends you might have missed.<br>
|
709 |
From executive summaries to detailed breakdowns, get the full narrative behind your numbers in just a few clicks.<br><br>
|
|
|
|
|
|
|
|
|
710 |
""")
|
711 |
with gr.Row():
|
712 |
with gr.Column(scale=1):
|
@@ -744,9 +842,9 @@ with gr.Blocks(title="Dashboard Narrator - Powered by OpenRouter.ai", theme=gr.t
|
|
744 |
placeholder="E.g., Analyze Q1 2024 sales KPIs..."
|
745 |
)
|
746 |
|
747 |
-
|
748 |
-
label="Upload Dashboards (PDF)",
|
749 |
-
file_types=[".pdf"],
|
750 |
file_count="multiple"
|
751 |
)
|
752 |
|
@@ -778,7 +876,7 @@ with gr.Blocks(title="Dashboard Narrator - Powered by OpenRouter.ai", theme=gr.t
|
|
778 |
# Handle analyze button
|
779 |
analyze_btn.click(
|
780 |
fn=process_dashboard,
|
781 |
-
inputs=[api_key,
|
782 |
outputs=[output_md, output_files, output_status]
|
783 |
)
|
784 |
|
|
|
1 |
"""
|
2 |
Dashboard Narrator - Powered by OpenRouter.ai
|
3 |
+
A tool to analyze dashboard PDFs and images and generate comprehensive reports.
|
4 |
"""
|
5 |
|
6 |
# Import required libraries
|
|
|
168 |
}
|
169 |
}
|
170 |
|
171 |
+
# OpenRouter models - Updated with new models
|
172 |
+
DEFAULT_MODEL = "anthropic/claude-sonnet-4"
|
173 |
OPENROUTER_MODELS = [
|
174 |
+
"anthropic/claude-sonnet-4",
|
175 |
"anthropic/claude-3.7-sonnet",
|
176 |
"openai/gpt-4.1",
|
177 |
"openai/o4-mini-high",
|
178 |
"openai/gpt-4.1-mini",
|
179 |
+
"google/gemini-2.5-flash-preview-05-20",
|
180 |
"google/gemini-2.5-pro-preview-03-25",
|
181 |
+
"moonshotai/kimi-vl-a3b-thinking:free",
|
182 |
"microsoft/phi-4-multimodal-instruct",
|
183 |
"qwen/qwen2.5-vl-72b-instruct:free",
|
184 |
"openrouter/optimus-alpha"
|
|
|
199 |
print(f"Error extracting text from PDF: {str(e)}")
|
200 |
return ""
|
201 |
|
202 |
+
def get_file_type(file_path):
|
203 |
+
"""Determine the file type based on file extension."""
|
204 |
+
if file_path.lower().endswith('.pdf'):
|
205 |
+
return 'pdf'
|
206 |
+
elif file_path.lower().endswith(('.png', '.jpg', '.jpeg')):
|
207 |
+
return 'image'
|
208 |
+
else:
|
209 |
+
return 'unknown'
|
210 |
+
|
211 |
+
def load_image_from_file(file_path):
|
212 |
+
"""Load an image from file path."""
|
213 |
+
try:
|
214 |
+
image = Image.open(file_path)
|
215 |
+
# Convert to RGB if necessary
|
216 |
+
if image.mode != 'RGB':
|
217 |
+
image = image.convert('RGB')
|
218 |
+
return image
|
219 |
+
except Exception as e:
|
220 |
+
print(f"Error loading image from {file_path}: {str(e)}")
|
221 |
+
return None
|
222 |
+
|
223 |
+
def load_image_from_bytes(image_bytes):
|
224 |
+
"""Load an image from bytes."""
|
225 |
+
try:
|
226 |
+
image = Image.open(io.BytesIO(image_bytes))
|
227 |
+
# Convert to RGB if necessary
|
228 |
+
if image.mode != 'RGB':
|
229 |
+
image = image.convert('RGB')
|
230 |
+
return image
|
231 |
+
except Exception as e:
|
232 |
+
print(f"Error loading image from bytes: {str(e)}")
|
233 |
+
return None
|
234 |
+
|
235 |
def divide_image_vertically(image, num_sections):
|
236 |
"""Divide an image vertically into sections."""
|
237 |
width, height = image.size
|
|
|
291 |
Focus exclusively on the visible section. Don't reference or speculate about unseen dashboard elements.\n
|
292 |
Answer completely in {language['name']}.\n\n
|
293 |
# Text extracted from the complete dashboard:\n
|
294 |
+
{full_text[:10000] if full_text else "No text available for this image."}
|
295 |
|
296 |
# Image of this dashboard section:
|
297 |
[BASE64 IMAGE: {encoded_image[:20]}...]
|
298 |
This is a dashboard visualization showing various metrics and charts. Please analyze the content visible in this image.
|
299 |
"""
|
300 |
|
301 |
+
# Create message with image for vision models
|
302 |
+
message_content = [
|
303 |
+
{
|
304 |
+
"type": "text",
|
305 |
+
"text": section_prompt
|
306 |
+
},
|
307 |
+
{
|
308 |
+
"type": "image_url",
|
309 |
+
"image_url": {
|
310 |
+
"url": f"data:image/png;base64,{encoded_image}"
|
311 |
+
}
|
312 |
+
}
|
313 |
+
]
|
314 |
+
|
315 |
try:
|
316 |
response = client.messages_create(
|
317 |
model=model,
|
318 |
+
messages=[{"role": "user", "content": message_content}],
|
319 |
system=language['system_prompt'],
|
320 |
temperature=0.1,
|
321 |
max_tokens=10000
|
|
|
345 |
8 Appendix: Monitoring Improvements - Move the monitoring suggestions to an appendix unless they're a primary focus\n\n
|
346 |
Integrate information from all sections to create a coherent and complete report.\n\n
|
347 |
# Text extracted from the complete dashboard:\n
|
348 |
+
{full_text[:10000] if full_text else "No text available for this image."}
|
349 |
"""
|
350 |
try:
|
351 |
response = client.messages_create(
|
|
|
425 |
print(f"PDF created successfully: {output_filename}")
|
426 |
return output_filename
|
427 |
|
428 |
+
def analyze_vertical_dashboard(client, model, file_data, file_type, language, goal_description=None, num_sections=4, dashboard_index=None):
|
429 |
+
"""Analyze a vertical dashboard by dividing it into sections. Supports both PDF and image files."""
|
430 |
dashboard_marker = f" {dashboard_index}" if dashboard_index is not None else ""
|
431 |
total_dashboards = progress_tracker.total_dashboards if hasattr(progress_tracker, 'total_dashboards') else 1
|
432 |
dashboard_progress_base = ((dashboard_index - 1) / total_dashboards * 100) if dashboard_index is not None else 0
|
|
|
435 |
progress_tracker.update(dashboard_progress_base, f"πΌοΈ Analyzing dashboard{dashboard_marker}...")
|
436 |
print(f"πΌοΈ Analyzing dashboard{dashboard_marker}...")
|
437 |
|
438 |
+
# Extract text if it's a PDF
|
439 |
+
full_text = ""
|
440 |
+
if file_type == 'pdf':
|
441 |
+
progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.1, f"π Extracting text from dashboard{dashboard_marker}...")
|
442 |
+
print(f"π Extracting full text from PDF...")
|
443 |
+
full_text = extract_text_from_pdf(file_data)
|
444 |
+
if not full_text or len(full_text.strip()) < 100:
|
445 |
+
print("β οΈ Limited text extracted from PDF. Analysis will rely primarily on images.")
|
446 |
+
else:
|
447 |
+
print(f"β
Extracted {len(full_text)} characters of text from PDF.")
|
448 |
else:
|
449 |
+
print("π Image file detected - no text extraction needed.")
|
450 |
|
451 |
+
# Convert to image(s)
|
452 |
progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.2, f"πΌοΈ Converting dashboard{dashboard_marker} to images...")
|
453 |
+
print("πΌοΈ Processing image...")
|
454 |
+
|
455 |
try:
|
456 |
+
if file_type == 'pdf':
|
457 |
+
# Convert PDF to images
|
458 |
+
pdf_images = convert_from_bytes(file_data, dpi=150)
|
459 |
+
if not pdf_images:
|
460 |
+
print("β Unable to convert PDF to images.")
|
461 |
+
return None, "Error: Unable to convert PDF to images."
|
462 |
+
print(f"β
PDF converted to {len(pdf_images)} image pages.")
|
463 |
+
main_image = pdf_images[0]
|
464 |
+
else:
|
465 |
+
# Load image directly
|
466 |
+
main_image = load_image_from_bytes(file_data)
|
467 |
+
if main_image is None:
|
468 |
+
print("β Unable to load image.")
|
469 |
+
return None, "Error: Unable to load image."
|
470 |
+
print(f"β
Image loaded successfully.")
|
471 |
+
|
472 |
print(f"Main image size: {main_image.width}x{main_image.height} pixels")
|
473 |
|
474 |
progress_tracker.update(dashboard_progress_base + dashboard_progress_step * 0.3, f"Dividing dashboard{dashboard_marker} into {num_sections} sections...")
|
|
|
476 |
image_sections = divide_image_vertically(main_image, num_sections)
|
477 |
print(f"β
Image divided into {len(image_sections)} sections.")
|
478 |
except Exception as e:
|
479 |
+
print(f"β Error processing image: {str(e)}")
|
480 |
return None, f"Error: {str(e)}"
|
481 |
|
482 |
section_analyses = []
|
|
|
556 |
print(f"Error fetching models: {str(e)}")
|
557 |
return ["custom"] + OPENROUTER_MODELS
|
558 |
|
559 |
+
def process_multiple_dashboards(api_key, files, language_code="it", goal_description=None, num_sections=4, model_name=DEFAULT_MODEL, custom_model=None):
|
560 |
+
"""Process multiple dashboard files (PDF/images) and create individual and comparative reports."""
|
561 |
# Start progress tracking
|
562 |
progress_tracker.start_processing()
|
563 |
+
progress_tracker.total_dashboards = len(files)
|
564 |
|
565 |
# Step 1: Initialize language settings and API client
|
566 |
progress_tracker.update(1, "Initializing analysis...")
|
|
|
597 |
individual_reports = []
|
598 |
individual_analyses = []
|
599 |
|
600 |
+
for i, (file_data, file_type) in enumerate(files):
|
601 |
+
dashboard_progress_base = (i / len(files) * 80) # 80% of progress for dashboard analysis
|
602 |
+
progress_tracker.update(dashboard_progress_base, f"Processing dashboard {i+1}/{len(files)}...")
|
603 |
print(f"\n{'#'*60}")
|
604 |
+
print(f"Processing dashboard {i+1}/{len(files)} (Type: {file_type})...")
|
605 |
|
606 |
report, analysis = analyze_vertical_dashboard(
|
607 |
client=client,
|
608 |
model=model,
|
609 |
+
file_data=file_data,
|
610 |
+
file_type=file_type,
|
611 |
language=language,
|
612 |
goal_description=goal_description,
|
613 |
num_sections=num_sections,
|
|
|
695 |
return combined_content, output_files, "β
Analysis completed successfully!"
|
696 |
|
697 |
# Wrapper function for Gradio interface
|
698 |
+
def process_dashboard(api_key, files, language_name, goal_description=None, num_sections=4, model_name=DEFAULT_MODEL, custom_model=None):
|
699 |
+
"""Process dashboard files (PDF/images) and generate reports (wrapper function for Gradio interface)."""
|
700 |
# Start a thread to update progress
|
701 |
progress_thread = threading.Thread(target=update_progress)
|
702 |
progress_thread.daemon = True
|
|
|
709 |
language_code = lang_data['code']
|
710 |
break
|
711 |
|
712 |
+
# Process the uploaded files
|
713 |
+
processed_files = []
|
714 |
+
if files is not None:
|
715 |
+
for file in files:
|
716 |
+
try:
|
717 |
+
# Handle different Gradio file formats
|
718 |
+
file_path = None
|
719 |
+
if isinstance(file, dict) and 'name' in file:
|
720 |
+
# Newer Gradio File component format
|
721 |
+
file_path = file['name']
|
722 |
+
elif isinstance(file, str):
|
723 |
+
# Older Gradio File component format
|
724 |
+
file_path = file
|
725 |
+
else:
|
726 |
+
# Try to get the path from the file object
|
727 |
+
if hasattr(file, 'name'):
|
728 |
+
file_path = file.name
|
729 |
+
elif hasattr(file, 'path'):
|
730 |
+
file_path = file.path
|
731 |
+
|
732 |
+
if file_path:
|
733 |
+
# Determine file type
|
734 |
+
file_type = get_file_type(file_path)
|
735 |
+
|
736 |
+
if file_type == 'unknown':
|
737 |
+
print(f"β οΈ Unsupported file type for {file_path}")
|
738 |
+
continue
|
739 |
+
|
740 |
+
# Read file data
|
741 |
+
with open(file_path, 'rb') as f:
|
742 |
+
file_data = f.read()
|
743 |
+
|
744 |
+
processed_files.append((file_data, file_type))
|
745 |
+
print(f"β
Processed {file_path} as {file_type}")
|
746 |
+
else:
|
747 |
+
print(f"β οΈ Could not determine file path for uploaded file")
|
748 |
+
|
749 |
+
except Exception as e:
|
750 |
+
print(f"β Error processing uploaded file: {str(e)}")
|
751 |
+
continue
|
752 |
+
|
753 |
+
if not processed_files:
|
754 |
+
error_message = "No valid files were uploaded or processed."
|
755 |
+
progress_tracker.update(100, error_message)
|
756 |
+
progress_tracker.end_processing()
|
757 |
+
return None, None, error_message
|
758 |
|
759 |
# Call the actual processing function
|
760 |
try:
|
761 |
combined_content, output_files, status = process_multiple_dashboards(
|
762 |
api_key=api_key,
|
763 |
+
files=processed_files,
|
764 |
language_code=language_code,
|
765 |
goal_description=goal_description,
|
766 |
num_sections=num_sections,
|
|
|
797 |
gr.Markdown("""
|
798 |
# π Dashboard Narrator - Powered by OpenRouter.ai
|
799 |
Unlock the hidden stories in your dashboards!<br>
|
800 |
+
Dashboard Narrator leverages advanced AI models through OpenRouter.ai to dissect your PDF reports and images,<br>
|
801 |
analyze each segment with expert precision, and craft comprehensive insights in your preferred language.<br><br>
|
802 |
Turn complex data visualizations into clear, strategic recommendations and uncover trends you might have missed.<br>
|
803 |
From executive summaries to detailed breakdowns, get the full narrative behind your numbers in just a few clicks.<br><br>
|
804 |
+
**β¨ New Features:**
|
805 |
+
- Support for PNG and JPG image analysis
|
806 |
+
- Enhanced with Claude Sonnet 4 and Gemini 2.5 Flash models
|
807 |
+
- Multi-format dashboard analysis capabilities
|
808 |
""")
|
809 |
with gr.Row():
|
810 |
with gr.Column(scale=1):
|
|
|
842 |
placeholder="E.g., Analyze Q1 2024 sales KPIs..."
|
843 |
)
|
844 |
|
845 |
+
files = gr.File(
|
846 |
+
label="Upload Dashboards (PDF, PNG, JPG)",
|
847 |
+
file_types=[".pdf", ".png", ".jpg", ".jpeg"],
|
848 |
file_count="multiple"
|
849 |
)
|
850 |
|
|
|
876 |
# Handle analyze button
|
877 |
analyze_btn.click(
|
878 |
fn=process_dashboard,
|
879 |
+
inputs=[api_key, files, language, goal, num_sections, model_choice, custom_model],
|
880 |
outputs=[output_md, output_files, output_status]
|
881 |
)
|
882 |
|