Avijit Ghosh commited on
Commit
f0e2fd8
·
1 Parent(s): a1a0756

better execption handling

Browse files
Files changed (1) hide show
  1. app.py +118 -125
app.py CHANGED
@@ -1,9 +1,11 @@
 
 
1
  import json
2
  import gradio as gr
3
  import pandas as pd
4
  import plotly.express as px
5
  import os
6
- import numpy as np # Make sure NumPy is imported
7
  import duckdb
8
  from tqdm.auto import tqdm # Standard tqdm for console, gr.Progress will track it
9
  import time
@@ -15,7 +17,7 @@ MODEL_SIZE_RANGES = {
15
  "X-Large (20-50GB)": (20, 50), "XX-Large (>50GB)": (50, float('inf'))
16
  }
17
  PROCESSED_PARQUET_FILE_PATH = "models_processed.parquet"
18
- HF_PARQUET_URL = 'https://huggingface.co/datasets/cfahlgren1/hub-stats/resolve/main/models.parquet'
19
 
20
  TAG_FILTER_CHOICES = [
21
  "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images",
@@ -36,8 +38,7 @@ PIPELINE_TAGS = [
36
  'table-question-answering',
37
  ]
38
 
39
- # --- Utility Functions ---
40
- def extract_model_size(safetensors_data): # Renamed for consistency if used, preprocessor uses extract_model_file_size_gb
41
  try:
42
  if pd.isna(safetensors_data): return 0.0
43
  data_to_parse = safetensors_data
@@ -62,59 +63,60 @@ def extract_org_from_id(model_id):
62
  model_id_str = str(model_id)
63
  return model_id_str.split("/")[0] if "/" in model_id_str else "unaffiliated"
64
 
65
- # --- THIS IS THE CORRECTED process_tags_for_series from preprocess.py ---
66
- def process_tags_for_series(series_of_tags_values, tqdm_cls=None): # Added tqdm_cls for Gradio progress
67
  processed_tags_accumulator = []
68
-
69
- # Determine the iterable (use tqdm if tqdm_cls is provided, else direct iteration)
70
- iterable = series_of_tags_values
71
- if tqdm_cls and tqdm_cls != tqdm : # Check if it's Gradio's progress tracker
72
- iterable = tqdm_cls(series_of_tags_values, desc="Standardizing Tags (App)", unit="row")
73
- elif tqdm_cls == tqdm: # For direct console tqdm if passed
74
- iterable = tqdm(series_of_tags_values, desc="Standardizing Tags (App)", unit="row", leave=False)
75
-
76
 
77
- for i, tags_value_from_series in enumerate(iterable):
78
  temp_processed_list_for_row = []
79
- current_value_for_error_msg = str(tags_value_from_series)[:200]
80
 
81
  try:
 
 
82
  if isinstance(tags_value_from_series, list):
83
  current_tags_in_list = []
84
- for tag_item in tags_value_from_series:
85
  try:
86
- if pd.isna(tag_item): continue
 
87
  str_tag = str(tag_item)
88
  stripped_tag = str_tag.strip()
89
  if stripped_tag:
90
  current_tags_in_list.append(stripped_tag)
91
  except Exception as e_inner_list_proc:
92
- print(f"APP ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a list for row {i}. Error: {e_inner_list_proc}. Original: {current_value_for_error_msg}")
93
  temp_processed_list_for_row = current_tags_in_list
94
 
 
95
  elif isinstance(tags_value_from_series, np.ndarray):
 
96
  current_tags_in_list = []
97
- for tag_item in tags_value_from_series.tolist():
98
  try:
99
- if pd.isna(tag_item): continue
100
  str_tag = str(tag_item)
101
  stripped_tag = str_tag.strip()
102
  if stripped_tag:
103
  current_tags_in_list.append(stripped_tag)
104
  except Exception as e_inner_array_proc:
105
- print(f"APP ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a NumPy array for row {i}. Error: {e_inner_array_proc}. Original: {current_value_for_error_msg}")
106
  temp_processed_list_for_row = current_tags_in_list
107
 
108
- elif tags_value_from_series is None or pd.isna(tags_value_from_series):
 
109
  temp_processed_list_for_row = []
110
 
 
111
  elif isinstance(tags_value_from_series, str):
112
  processed_str_tags = []
 
113
  if (tags_value_from_series.startswith('[') and tags_value_from_series.endswith(']')) or \
114
  (tags_value_from_series.startswith('(') and tags_value_from_series.endswith(')')):
115
  try:
116
  evaluated_tags = ast.literal_eval(tags_value_from_series)
117
- if isinstance(evaluated_tags, (list, tuple)):
 
 
118
  current_eval_list = []
119
  for tag_item in evaluated_tags:
120
  if pd.isna(tag_item): continue
@@ -122,12 +124,14 @@ def process_tags_for_series(series_of_tags_values, tqdm_cls=None): # Added tqdm_
122
  if str_tag: current_eval_list.append(str_tag)
123
  processed_str_tags = current_eval_list
124
  except (ValueError, SyntaxError):
125
- pass
126
 
 
127
  if not processed_str_tags:
128
  try:
129
  json_tags = json.loads(tags_value_from_series)
130
  if isinstance(json_tags, list):
 
131
  current_json_list = []
132
  for tag_item in json_tags:
133
  if pd.isna(tag_item): continue
@@ -135,15 +139,19 @@ def process_tags_for_series(series_of_tags_values, tqdm_cls=None): # Added tqdm_
135
  if str_tag: current_json_list.append(str_tag)
136
  processed_str_tags = current_json_list
137
  except json.JSONDecodeError:
 
138
  processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()]
139
  except Exception as e_json_other:
140
- print(f"APP ERROR during JSON processing for string '{current_value_for_error_msg}' for row {i}. Error: {e_json_other}")
141
- processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()]
142
 
143
  temp_processed_list_for_row = processed_str_tags
144
 
145
- else:
146
- if pd.isna(tags_value_from_series):
 
 
 
147
  temp_processed_list_for_row = []
148
  else:
149
  str_val = str(tags_value_from_series).strip()
@@ -152,15 +160,13 @@ def process_tags_for_series(series_of_tags_values, tqdm_cls=None): # Added tqdm_
152
  processed_tags_accumulator.append(temp_processed_list_for_row)
153
 
154
  except Exception as e_outer_tag_proc:
155
- print(f"APP CRITICAL UNHANDLED ERROR processing row {i}: value '{current_value_for_error_msg}' (type: {type(tags_value_from_series)}). Error: {e_outer_tag_proc}. Appending [].")
156
  processed_tags_accumulator.append([])
157
 
158
  return processed_tags_accumulator
159
- # --- END OF CORRECTED process_tags_for_series ---
160
-
161
 
162
- def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio progress
163
- if tqdm_cls is None: tqdm_cls = tqdm # Default to standard tqdm if None
164
  overall_start_time = time.time()
165
  print(f"Gradio load_models_data called with force_refresh={force_refresh}")
166
 
@@ -181,11 +187,15 @@ def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio
181
  if missing_cols:
182
  raise ValueError(f"Pre-processed Parquet is missing columns: {missing_cols}. Please run preprocessor or refresh data in app.")
183
 
 
184
  if 'has_robot' in df.columns:
185
  robot_count_parquet = df['has_robot'].sum()
186
  print(f"DIAGNOSTIC (App - Parquet Load): 'has_robot' column found. Number of True values: {robot_count_parquet}")
 
 
187
  else:
188
  print("DIAGNOSTIC (App - Parquet Load): 'has_robot' column NOT FOUND.")
 
189
 
190
  msg = f"Successfully loaded pre-processed data in {elapsed:.2f}s. Shape: {df.shape}"
191
  print(msg)
@@ -204,7 +214,7 @@ def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio
204
  print("force_refresh=True (Gradio). Fetching fresh data...")
205
  fetch_start = time.time()
206
  try:
207
- query = f"SELECT * FROM read_parquet('{HF_PARQUET_URL}')"
208
  df_raw = duckdb.sql(query).df()
209
  if df_raw is None or df_raw.empty: raise ValueError("Fetched data is empty or None.")
210
  raw_data_source_msg = f"Fetched by Gradio in {time.time() - fetch_start:.2f}s. Rows: {len(df_raw)}"
@@ -217,22 +227,21 @@ def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio
217
  return pd.DataFrame(), False, err_msg
218
 
219
  print(f"Initiating processing for data newly fetched by Gradio. {raw_data_source_msg}")
220
- df = pd.DataFrame() # This will be our processed DataFrame
221
  proc_start = time.time()
222
 
223
  core_cols = {'id': str, 'downloads': float, 'downloadsAllTime': float, 'likes': float,
224
  'pipeline_tag': str, 'tags': object, 'safetensors': object}
225
  for col, dtype in core_cols.items():
226
  if col in df_raw.columns:
227
- df[col] = df_raw[col] # Assign raw data first
228
  if dtype == float: df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0.0)
229
  elif dtype == str: df[col] = df[col].astype(str).fillna('')
230
- # For 'tags' and 'safetensors' (object type), no specific conversion here, done later
231
- else: # If column is missing in raw data
232
  if col in ['downloads', 'downloadsAllTime', 'likes']: df[col] = 0.0
233
  elif col == 'pipeline_tag': df[col] = ''
234
- elif col == 'tags': df[col] = pd.Series([[] for _ in range(len(df_raw))]) # Default to empty lists
235
- elif col == 'safetensors': df[col] = None # Default to None
236
  elif col == 'id': return pd.DataFrame(), False, "Critical: 'id' column missing."
237
 
238
  output_filesize_col_name = 'params'
@@ -240,11 +249,8 @@ def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio
240
  df[output_filesize_col_name] = pd.to_numeric(df_raw[output_filesize_col_name], errors='coerce').fillna(0.0)
241
  elif 'safetensors' in df.columns:
242
  safetensors_iter = df['safetensors']
243
- if tqdm_cls and tqdm_cls != tqdm:
244
- safetensors_iter = tqdm_cls(df['safetensors'], desc="Extracting model sizes (GB)", unit="row")
245
- elif tqdm_cls == tqdm:
246
- safetensors_iter = tqdm(df['safetensors'], desc="Extracting model sizes (GB)", unit="row", leave=False)
247
-
248
  df[output_filesize_col_name] = [extract_model_size(s) for s in safetensors_iter]
249
  df[output_filesize_col_name] = pd.to_numeric(df[output_filesize_col_name], errors='coerce').fillna(0.0)
250
  else:
@@ -259,17 +265,16 @@ def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio
259
  elif 5 <= numeric_size_gb < 20: return "Large (5-20GB)"
260
  elif 20 <= numeric_size_gb < 50: return "X-Large (20-50GB)"
261
  elif numeric_size_gb >= 50: return "XX-Large (>50GB)"
262
- else: return "Small (<1GB)" # Default
263
  df['size_category'] = df[output_filesize_col_name].apply(get_size_category_gradio)
264
 
265
- df['tags'] = process_tags_for_series(df['tags'], tqdm_cls=tqdm_cls)
266
-
267
  df['temp_tags_joined'] = df['tags'].apply(
268
- lambda tl: '~~~'.join(str(t).lower().strip() for t in tl if pd.notna(t) and str(t).strip()) if isinstance(tl, list) else ''
269
  )
270
  tag_map = {
271
  'has_audio': ['audio'], 'has_speech': ['speech'], 'has_music': ['music'],
272
- 'has_robot': ['robot', 'robotics'],
273
  'has_bio': ['bio'], 'has_med': ['medic', 'medical'],
274
  'has_series': ['series', 'time-series', 'timeseries'],
275
  'has_video': ['video'], 'has_image': ['image', 'vision'],
@@ -292,9 +297,13 @@ def load_models_data(force_refresh=False, tqdm_cls=None): # tqdm_cls for Gradio
292
  not (output_filesize_col_name in df_raw.columns and pd.api.types.is_numeric_dtype(df_raw[output_filesize_col_name])):
293
  df = df.drop(columns=['safetensors'], errors='ignore')
294
 
 
295
  if force_refresh and 'has_robot' in df.columns:
296
  robot_count_app_proc = df['has_robot'].sum()
297
  print(f"DIAGNOSTIC (App - Force Refresh Processing): 'has_robot' column processed. Number of True values: {robot_count_app_proc}")
 
 
 
298
 
299
  print(f"Data processing by Gradio completed in {time.time() - proc_start:.2f}s.")
300
 
@@ -311,12 +320,25 @@ def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=N
311
  "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science",
312
  "Video": "has_video", "Images": "has_image", "Text": "has_text"}
313
 
 
314
  if 'has_robot' in filtered_df.columns:
315
  initial_robot_count = filtered_df['has_robot'].sum()
 
 
 
 
 
316
  if tag_filter and tag_filter in col_map:
317
  target_col = col_map[tag_filter]
318
  if target_col in filtered_df.columns:
 
 
 
 
 
319
  filtered_df = filtered_df[filtered_df[target_col]]
 
 
320
  else:
321
  print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.")
322
  if pipeline_filter:
@@ -337,17 +359,16 @@ def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=N
337
  if filtered_df.empty: return pd.DataFrame()
338
  if count_by not in filtered_df.columns or not pd.api.types.is_numeric_dtype(filtered_df[count_by]):
339
  filtered_df[count_by] = pd.to_numeric(filtered_df.get(count_by), errors="coerce").fillna(0.0)
340
-
341
- org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
342
  top_orgs_list = org_totals.index.tolist()
343
  treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
344
- treemap_data["root"] = "models"
345
  treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
346
  return treemap_data
347
 
348
  def create_treemap(treemap_data, count_by, title=None):
349
  if treemap_data.empty:
350
- fig = px.treemap(names=["No data matches filters"], parents=[""], values=[1])
351
  fig.update_layout(title="No data matches the selected filters", margin=dict(t=50, l=25, r=25, b=25))
352
  return fig
353
  fig = px.treemap(
@@ -361,27 +382,24 @@ def create_treemap(treemap_data, count_by, title=None):
361
 
362
  with gr.Blocks(title="HuggingFace Model Explorer") as demo:
363
  models_data_state = gr.State(pd.DataFrame())
364
- loading_complete_state = gr.State(False)
365
 
 
366
  with gr.Row():
367
- gr.Markdown("# HuggingFace Models TreeMap Visualization")
368
- with gr.Row():
369
- with gr.Column(scale=1):
370
  count_by_dropdown = gr.Dropdown(label="Metric", choices=[("Downloads (last 30 days)", "downloads"), ("Downloads (All Time)", "downloadsAllTime"), ("Likes", "likes")], value="downloads")
371
  filter_choice_radio = gr.Radio(label="Filter Type", choices=["None", "Tag Filter", "Pipeline Filter"], value="None")
372
  tag_filter_dropdown = gr.Dropdown(label="Select Tag", choices=TAG_FILTER_CHOICES, value=None, visible=False)
373
  pipeline_filter_dropdown = gr.Dropdown(label="Select Pipeline Tag", choices=PIPELINE_TAGS, value=None, visible=False)
374
  size_filter_dropdown = gr.Dropdown(label="Model Size Filter", choices=["None"] + list(MODEL_SIZE_RANGES.keys()), value="None")
375
  top_k_slider = gr.Slider(label="Number of Top Organizations", minimum=5, maximum=50, value=25, step=5)
376
- skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski")
377
-
378
- generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False)
379
  refresh_data_button = gr.Button(value="Refresh Data from Hugging Face", variant="secondary")
380
-
381
- with gr.Column(scale=3):
382
  plot_output = gr.Plot()
383
- status_message_md = gr.Markdown("Initializing...")
384
- data_info_md = gr.Markdown("")
385
 
386
  def _update_button_interactivity(is_loaded_flag):
387
  return gr.update(interactive=is_loaded_flag)
@@ -391,56 +409,28 @@ with gr.Blocks(title="HuggingFace Model Explorer") as demo:
391
  return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
392
  filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
393
 
394
-
395
- def ui_load_data_controller(force_refresh_ui_trigger=False, progress=gr.Progress(track_tqdm=True)):
396
  print(f"ui_load_data_controller called with force_refresh_ui_trigger={force_refresh_ui_trigger}")
397
  status_msg_ui = "Loading data..."
398
  data_info_text = ""
399
  current_df = pd.DataFrame()
400
  load_success_flag = False
401
- # data_as_of_date_display = "N/A" # Will be set inside the logic
402
-
403
  try:
404
  current_df, load_success_flag, status_msg_from_load = load_models_data(
405
- force_refresh=force_refresh_ui_trigger, tqdm_cls=progress.tqdm if progress else tqdm
406
  )
407
-
408
  if load_success_flag:
409
- # Default value for data_as_of_date_display
410
- data_as_of_date_display = "Pre-processed (date unavailable or invalid)"
411
-
412
- if force_refresh_ui_trigger: # Data was just fetched by Gradio
413
  data_as_of_date_display = pd.Timestamp.now(tz='UTC').strftime('%B %d, %Y, %H:%M:%S %Z')
414
- # If loaded from pre-processed parquet, check for its timestamp column
415
- elif 'data_download_timestamp' in current_df.columns and not current_df.empty:
416
- try:
417
- # Step 1: Safely get the value from the DataFrame's first row for the timestamp column
418
- raw_val_from_df = current_df['data_download_timestamp'].iloc[0]
419
-
420
- # Step 2: Process if raw_val_from_df is a list/array
421
- scalar_timestamp_val = None
422
- if isinstance(raw_val_from_df, (list, tuple, np.ndarray)):
423
- if len(raw_val_from_df) > 0:
424
- scalar_timestamp_val = raw_val_from_df[0]
425
- else:
426
- scalar_timestamp_val = raw_val_from_df
427
-
428
- # Step 3: Check for NA and convert the scalar value to datetime
429
- if pd.notna(scalar_timestamp_val):
430
- dt_obj = pd.to_datetime(scalar_timestamp_val)
431
- if pd.notna(dt_obj):
432
- if dt_obj.tzinfo is None:
433
- dt_obj = dt_obj.tz_localize('UTC')
434
- data_as_of_date_display = dt_obj.strftime('%B %d, %Y, %H:%M:%S %Z')
435
-
436
- except IndexError:
437
- print(f"DEBUG: IndexError encountered while processing 'data_download_timestamp'. DF empty: {current_df.empty}")
438
- if 'data_download_timestamp' in current_df.columns and not current_df.empty:
439
- print(f"DEBUG: Head of 'data_download_timestamp': {str(current_df['data_download_timestamp'].head(1))}") # Ensure string conversion for print
440
- except Exception as e_ts_proc:
441
- print(f"Error processing 'data_download_timestamp' from parquet: {e_ts_proc}")
442
 
443
- # Build data info string
444
  size_dist_lines = []
445
  if 'size_category' in current_df.columns:
446
  for cat in MODEL_SIZE_RANGES.keys():
@@ -448,33 +438,33 @@ with gr.Blocks(title="HuggingFace Model Explorer") as demo:
448
  size_dist_lines.append(f" - {cat}: {count:,} models")
449
  else: size_dist_lines.append(" - Size category information not available.")
450
  size_dist = "\n".join(size_dist_lines)
451
-
452
  data_info_text = (f"### Data Information\n"
453
  f"- Overall Status: {status_msg_from_load}\n"
454
  f"- Total models loaded: {len(current_df):,}\n"
455
  f"- Data as of: {data_as_of_date_display}\n"
456
  f"- Size categories:\n{size_dist}")
457
 
 
458
  if not current_df.empty and 'has_robot' in current_df.columns:
459
  robot_true_count = current_df['has_robot'].sum()
460
  data_info_text += f"\n- **Models flagged 'has_robot'**: {robot_true_count}"
461
- if 0 < robot_true_count <= 10:
462
  sample_robot_ids = current_df[current_df['has_robot']]['id'].head(5).tolist()
463
  data_info_text += f"\n - Sample 'has_robot' model IDs: `{', '.join(sample_robot_ids)}`"
464
  elif not current_df.empty:
465
- data_info_text += "\n- **Models flagged 'has_robot'**: 'has_robot' column not found."
 
466
 
467
  status_msg_ui = "Data loaded successfully. Ready to generate plot."
468
- else: # load_success_flag is False
469
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
470
  status_msg_ui = status_msg_from_load
471
-
472
  except Exception as e:
473
  status_msg_ui = f"An unexpected error occurred in ui_load_data_controller: {str(e)}"
474
  data_info_text = f"### Critical Error\n- {status_msg_ui}"
475
- print(f"Critical error in ui_load_data_controller: {e}") # This is the original error print
476
  load_success_flag = False
477
-
478
  return current_df, load_success_flag, data_info_text, status_msg_ui
479
 
480
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
@@ -482,52 +472,55 @@ with gr.Blocks(title="HuggingFace Model Explorer") as demo:
482
  if df_current_models is None or df_current_models.empty:
483
  empty_fig = create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded")
484
  error_msg = "Model data is not loaded or is empty. Please load or refresh data first."
485
- gr.Warning(error_msg)
486
  return empty_fig, error_msg
487
-
488
  tag_to_use = tag_choice if filter_type == "Tag Filter" else None
489
  pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
490
- size_to_use = size_choice if size_choice != "None" else None
491
  orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else []
 
 
 
 
 
 
492
 
493
  treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, size_to_use, orgs_to_skip)
494
 
495
  title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
496
  chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
497
  plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
498
-
499
  if treemap_df.empty:
500
  plot_stats_md = "No data matches the selected filters. Try adjusting your filters."
501
  else:
502
- total_items_in_plot = len(treemap_df['id'].unique())
503
- total_value_in_plot = treemap_df[metric_choice].sum()
504
  plot_stats_md = (f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}")
505
-
506
  return plotly_fig, plot_stats_md
507
 
508
  demo.load(
509
  fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=False, progress=progress),
510
- inputs=[],
511
  outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
512
  )
513
-
514
  refresh_data_button.click(
515
  fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=True, progress=progress),
516
  inputs=[],
517
  outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
518
  )
519
-
520
  generate_plot_button.click(
521
  fn=ui_generate_plot_controller,
522
  inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
523
  size_filter_dropdown, top_k_slider, skip_orgs_textbox, models_data_state],
524
- outputs=[plot_output, status_message_md]
525
  )
526
 
527
  if __name__ == "__main__":
528
  if not os.path.exists(PROCESSED_PARQUET_FILE_PATH):
529
  print(f"WARNING: Pre-processed data file '{PROCESSED_PARQUET_FILE_PATH}' not found.")
530
- print("It is highly recommended to run the preprocessing script (preprocess.py) first.")
531
  else:
532
  print(f"Found pre-processed data file: '{PROCESSED_PARQUET_FILE_PATH}'.")
533
- demo.launch()
 
 
 
1
+ # --- START OF FILE app.py ---
2
+
3
  import json
4
  import gradio as gr
5
  import pandas as pd
6
  import plotly.express as px
7
  import os
8
+ import numpy as np
9
  import duckdb
10
  from tqdm.auto import tqdm # Standard tqdm for console, gr.Progress will track it
11
  import time
 
17
  "X-Large (20-50GB)": (20, 50), "XX-Large (>50GB)": (50, float('inf'))
18
  }
19
  PROCESSED_PARQUET_FILE_PATH = "models_processed.parquet"
20
+ HF_PARQUET_URL = 'https://huggingface.co/datasets/cfahlgren1/hub-stats/resolve/main/models.parquet' # Added for completeness within app.py context
21
 
22
  TAG_FILTER_CHOICES = [
23
  "Audio & Speech", "Time series", "Robotics", "Music", "Video", "Images",
 
38
  'table-question-answering',
39
  ]
40
 
41
+ def extract_model_size(safetensors_data):
 
42
  try:
43
  if pd.isna(safetensors_data): return 0.0
44
  data_to_parse = safetensors_data
 
63
  model_id_str = str(model_id)
64
  return model_id_str.split("/")[0] if "/" in model_id_str else "unaffiliated"
65
 
66
+ def process_tags_for_series(series_of_tags_values):
 
67
  processed_tags_accumulator = []
 
 
 
 
 
 
 
 
68
 
69
+ for i, tags_value_from_series in enumerate(tqdm(series_of_tags_values, desc="Standardizing Tags", leave=False, unit="row")):
70
  temp_processed_list_for_row = []
71
+ current_value_for_error_msg = str(tags_value_from_series)[:200] # Truncate for long error messages
72
 
73
  try:
74
+ # Order of checks is important!
75
+ # 1. Handle explicit Python lists first
76
  if isinstance(tags_value_from_series, list):
77
  current_tags_in_list = []
78
+ for idx_tag, tag_item in enumerate(tags_value_from_series):
79
  try:
80
+ # Ensure item is not NaN before string conversion if it might be a float NaN in a list
81
+ if pd.isna(tag_item): continue
82
  str_tag = str(tag_item)
83
  stripped_tag = str_tag.strip()
84
  if stripped_tag:
85
  current_tags_in_list.append(stripped_tag)
86
  except Exception as e_inner_list_proc:
87
+ print(f"ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a list for row {i}. Error: {e_inner_list_proc}. Original list: {current_value_for_error_msg}")
88
  temp_processed_list_for_row = current_tags_in_list
89
 
90
+ # 2. Handle NumPy arrays
91
  elif isinstance(tags_value_from_series, np.ndarray):
92
+ # Convert to list, then process elements, handling potential NaNs within the array
93
  current_tags_in_list = []
94
+ for idx_tag, tag_item in enumerate(tags_value_from_series.tolist()): # .tolist() is crucial
95
  try:
96
+ if pd.isna(tag_item): continue # Check for NaN after converting to Python type
97
  str_tag = str(tag_item)
98
  stripped_tag = str_tag.strip()
99
  if stripped_tag:
100
  current_tags_in_list.append(stripped_tag)
101
  except Exception as e_inner_array_proc:
102
+ print(f"ERROR processing item '{tag_item}' (type: {type(tag_item)}) within a NumPy array for row {i}. Error: {e_inner_array_proc}. Original array: {current_value_for_error_msg}")
103
  temp_processed_list_for_row = current_tags_in_list
104
 
105
+ # 3. Handle simple None or pd.NA after lists and arrays (which might contain pd.NA elements handled above)
106
+ elif tags_value_from_series is None or pd.isna(tags_value_from_series): # Now pd.isna is safe for scalars
107
  temp_processed_list_for_row = []
108
 
109
+ # 4. Handle strings (could be JSON-like, list-like, or comma-separated)
110
  elif isinstance(tags_value_from_series, str):
111
  processed_str_tags = []
112
+ # Attempt ast.literal_eval for strings that look like lists/tuples
113
  if (tags_value_from_series.startswith('[') and tags_value_from_series.endswith(']')) or \
114
  (tags_value_from_series.startswith('(') and tags_value_from_series.endswith(')')):
115
  try:
116
  evaluated_tags = ast.literal_eval(tags_value_from_series)
117
+ if isinstance(evaluated_tags, (list, tuple)): # Check if eval result is a list/tuple
118
+ # Recursively process this evaluated list/tuple, as its elements could be complex
119
+ # For simplicity here, assume elements are simple strings after eval
120
  current_eval_list = []
121
  for tag_item in evaluated_tags:
122
  if pd.isna(tag_item): continue
 
124
  if str_tag: current_eval_list.append(str_tag)
125
  processed_str_tags = current_eval_list
126
  except (ValueError, SyntaxError):
127
+ pass # If ast.literal_eval fails, let it fall to JSON or comma split
128
 
129
+ # If ast.literal_eval didn't populate, try JSON
130
  if not processed_str_tags:
131
  try:
132
  json_tags = json.loads(tags_value_from_series)
133
  if isinstance(json_tags, list):
134
+ # Similar to above, assume elements are simple strings after JSON parsing
135
  current_json_list = []
136
  for tag_item in json_tags:
137
  if pd.isna(tag_item): continue
 
139
  if str_tag: current_json_list.append(str_tag)
140
  processed_str_tags = current_json_list
141
  except json.JSONDecodeError:
142
+ # If not a valid JSON list, fall back to comma splitting as the final string strategy
143
  processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()]
144
  except Exception as e_json_other:
145
+ print(f"ERROR during JSON processing for string '{current_value_for_error_msg}' for row {i}. Error: {e_json_other}")
146
+ processed_str_tags = [tag.strip() for tag in tags_value_from_series.split(',') if tag.strip()] # Fallback
147
 
148
  temp_processed_list_for_row = processed_str_tags
149
 
150
+ # 5. Fallback for other scalar types (e.g., int, float that are not NaN)
151
+ else:
152
+ # This path is for non-list, non-ndarray, non-None/NaN, non-string types.
153
+ # Or for NaNs that slipped through if they are not None or pd.NA (e.g. float('nan'))
154
+ if pd.isna(tags_value_from_series): # Catch any remaining NaNs like float('nan')
155
  temp_processed_list_for_row = []
156
  else:
157
  str_val = str(tags_value_from_series).strip()
 
160
  processed_tags_accumulator.append(temp_processed_list_for_row)
161
 
162
  except Exception as e_outer_tag_proc:
163
+ print(f"CRITICAL UNHANDLED ERROR processing row {i}: value '{current_value_for_error_msg}' (type: {type(tags_value_from_series)}). Error: {e_outer_tag_proc}. Appending [].")
164
  processed_tags_accumulator.append([])
165
 
166
  return processed_tags_accumulator
 
 
167
 
168
+ def load_models_data(force_refresh=False, tqdm_cls=None):
169
+ if tqdm_cls is None: tqdm_cls = tqdm
170
  overall_start_time = time.time()
171
  print(f"Gradio load_models_data called with force_refresh={force_refresh}")
172
 
 
187
  if missing_cols:
188
  raise ValueError(f"Pre-processed Parquet is missing columns: {missing_cols}. Please run preprocessor or refresh data in app.")
189
 
190
+ # --- Diagnostic for 'has_robot' after loading parquet ---
191
  if 'has_robot' in df.columns:
192
  robot_count_parquet = df['has_robot'].sum()
193
  print(f"DIAGNOSTIC (App - Parquet Load): 'has_robot' column found. Number of True values: {robot_count_parquet}")
194
+ if 0 < robot_count_parquet < 10:
195
+ print(f"Sample 'has_robot' models (from parquet): {df[df['has_robot']]['id'].head().tolist()}")
196
  else:
197
  print("DIAGNOSTIC (App - Parquet Load): 'has_robot' column NOT FOUND.")
198
+ # --- End Diagnostic ---
199
 
200
  msg = f"Successfully loaded pre-processed data in {elapsed:.2f}s. Shape: {df.shape}"
201
  print(msg)
 
214
  print("force_refresh=True (Gradio). Fetching fresh data...")
215
  fetch_start = time.time()
216
  try:
217
+ query = f"SELECT * FROM read_parquet('{HF_PARQUET_URL}')" # Ensure HF_PARQUET_URL is defined
218
  df_raw = duckdb.sql(query).df()
219
  if df_raw is None or df_raw.empty: raise ValueError("Fetched data is empty or None.")
220
  raw_data_source_msg = f"Fetched by Gradio in {time.time() - fetch_start:.2f}s. Rows: {len(df_raw)}"
 
227
  return pd.DataFrame(), False, err_msg
228
 
229
  print(f"Initiating processing for data newly fetched by Gradio. {raw_data_source_msg}")
230
+ df = pd.DataFrame()
231
  proc_start = time.time()
232
 
233
  core_cols = {'id': str, 'downloads': float, 'downloadsAllTime': float, 'likes': float,
234
  'pipeline_tag': str, 'tags': object, 'safetensors': object}
235
  for col, dtype in core_cols.items():
236
  if col in df_raw.columns:
237
+ df[col] = df_raw[col]
238
  if dtype == float: df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0.0)
239
  elif dtype == str: df[col] = df[col].astype(str).fillna('')
240
+ else:
 
241
  if col in ['downloads', 'downloadsAllTime', 'likes']: df[col] = 0.0
242
  elif col == 'pipeline_tag': df[col] = ''
243
+ elif col == 'tags': df[col] = pd.Series([[] for _ in range(len(df_raw))])
244
+ elif col == 'safetensors': df[col] = None
245
  elif col == 'id': return pd.DataFrame(), False, "Critical: 'id' column missing."
246
 
247
  output_filesize_col_name = 'params'
 
249
  df[output_filesize_col_name] = pd.to_numeric(df_raw[output_filesize_col_name], errors='coerce').fillna(0.0)
250
  elif 'safetensors' in df.columns:
251
  safetensors_iter = df['safetensors']
252
+ if tqdm_cls != tqdm :
253
+ safetensors_iter = tqdm_cls(df['safetensors'], desc="Extracting model sizes (GB)")
 
 
 
254
  df[output_filesize_col_name] = [extract_model_size(s) for s in safetensors_iter]
255
  df[output_filesize_col_name] = pd.to_numeric(df[output_filesize_col_name], errors='coerce').fillna(0.0)
256
  else:
 
265
  elif 5 <= numeric_size_gb < 20: return "Large (5-20GB)"
266
  elif 20 <= numeric_size_gb < 50: return "X-Large (20-50GB)"
267
  elif numeric_size_gb >= 50: return "XX-Large (>50GB)"
268
+ else: return "Small (<1GB)"
269
  df['size_category'] = df[output_filesize_col_name].apply(get_size_category_gradio)
270
 
271
+ df['tags'] = process_tags_for_series(df['tags'])
 
272
  df['temp_tags_joined'] = df['tags'].apply(
273
+ lambda tl: '~~~'.join(str(t).lower() for t in tl if pd.notna(t) and str(t).strip()) if isinstance(tl, list) else ''
274
  )
275
  tag_map = {
276
  'has_audio': ['audio'], 'has_speech': ['speech'], 'has_music': ['music'],
277
+ 'has_robot': ['robot', 'robotics'],
278
  'has_bio': ['bio'], 'has_med': ['medic', 'medical'],
279
  'has_series': ['series', 'time-series', 'timeseries'],
280
  'has_video': ['video'], 'has_image': ['image', 'vision'],
 
297
  not (output_filesize_col_name in df_raw.columns and pd.api.types.is_numeric_dtype(df_raw[output_filesize_col_name])):
298
  df = df.drop(columns=['safetensors'], errors='ignore')
299
 
300
+ # --- Diagnostic for 'has_robot' after app-side processing (force_refresh path) ---
301
  if force_refresh and 'has_robot' in df.columns:
302
  robot_count_app_proc = df['has_robot'].sum()
303
  print(f"DIAGNOSTIC (App - Force Refresh Processing): 'has_robot' column processed. Number of True values: {robot_count_app_proc}")
304
+ if 0 < robot_count_app_proc < 10:
305
+ print(f"Sample 'has_robot' models (App processed): {df[df['has_robot']]['id'].head().tolist()}")
306
+ # --- End Diagnostic ---
307
 
308
  print(f"Data processing by Gradio completed in {time.time() - proc_start:.2f}s.")
309
 
 
320
  "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science",
321
  "Video": "has_video", "Images": "has_image", "Text": "has_text"}
322
 
323
+ # --- Diagnostic within make_treemap_data ---
324
  if 'has_robot' in filtered_df.columns:
325
  initial_robot_count = filtered_df['has_robot'].sum()
326
+ print(f"DIAGNOSTIC (make_treemap_data entry): Input df has {initial_robot_count} 'has_robot' models.")
327
+ else:
328
+ print("DIAGNOSTIC (make_treemap_data entry): 'has_robot' column NOT in input df.")
329
+ # --- End Diagnostic ---
330
+
331
  if tag_filter and tag_filter in col_map:
332
  target_col = col_map[tag_filter]
333
  if target_col in filtered_df.columns:
334
+ # --- Diagnostic for specific 'Robotics' filter application ---
335
+ if tag_filter == "Robotics":
336
+ count_before_robot_filter = filtered_df[target_col].sum()
337
+ print(f"DIAGNOSTIC (make_treemap_data): Applying 'Robotics' filter. Models with '{target_col}'=True before this filter step: {count_before_robot_filter}")
338
+ # --- End Diagnostic ---
339
  filtered_df = filtered_df[filtered_df[target_col]]
340
+ if tag_filter == "Robotics":
341
+ print(f"DIAGNOSTIC (make_treemap_data): After 'Robotics' filter ({target_col}), df rows: {len(filtered_df)}")
342
  else:
343
  print(f"Warning: Tag filter column '{col_map[tag_filter]}' not found in DataFrame.")
344
  if pipeline_filter:
 
359
  if filtered_df.empty: return pd.DataFrame()
360
  if count_by not in filtered_df.columns or not pd.api.types.is_numeric_dtype(filtered_df[count_by]):
361
  filtered_df[count_by] = pd.to_numeric(filtered_df.get(count_by), errors="coerce").fillna(0.0)
362
+ org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
 
363
  top_orgs_list = org_totals.index.tolist()
364
  treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
365
+ treemap_data["root"] = "models"
366
  treemap_data[count_by] = pd.to_numeric(treemap_data[count_by], errors="coerce").fillna(0.0)
367
  return treemap_data
368
 
369
  def create_treemap(treemap_data, count_by, title=None):
370
  if treemap_data.empty:
371
+ fig = px.treemap(names=["No data matches filters"], parents=[""], values=[1])
372
  fig.update_layout(title="No data matches the selected filters", margin=dict(t=50, l=25, r=25, b=25))
373
  return fig
374
  fig = px.treemap(
 
382
 
383
  with gr.Blocks(title="HuggingFace Model Explorer") as demo:
384
  models_data_state = gr.State(pd.DataFrame())
385
+ loading_complete_state = gr.State(False)
386
 
387
+ with gr.Row(): gr.Markdown("# HuggingFace Models TreeMap Visualization")
388
  with gr.Row():
389
+ with gr.Column(scale=1):
 
 
390
  count_by_dropdown = gr.Dropdown(label="Metric", choices=[("Downloads (last 30 days)", "downloads"), ("Downloads (All Time)", "downloadsAllTime"), ("Likes", "likes")], value="downloads")
391
  filter_choice_radio = gr.Radio(label="Filter Type", choices=["None", "Tag Filter", "Pipeline Filter"], value="None")
392
  tag_filter_dropdown = gr.Dropdown(label="Select Tag", choices=TAG_FILTER_CHOICES, value=None, visible=False)
393
  pipeline_filter_dropdown = gr.Dropdown(label="Select Pipeline Tag", choices=PIPELINE_TAGS, value=None, visible=False)
394
  size_filter_dropdown = gr.Dropdown(label="Model Size Filter", choices=["None"] + list(MODEL_SIZE_RANGES.keys()), value="None")
395
  top_k_slider = gr.Slider(label="Number of Top Organizations", minimum=5, maximum=50, value=25, step=5)
396
+ skip_orgs_textbox = gr.Textbox(label="Organizations to Skip (comma-separated)", value="TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski")
397
+ generate_plot_button = gr.Button(value="Generate Plot", variant="primary", interactive=False)
 
398
  refresh_data_button = gr.Button(value="Refresh Data from Hugging Face", variant="secondary")
399
+ with gr.Column(scale=3):
 
400
  plot_output = gr.Plot()
401
+ status_message_md = gr.Markdown("Initializing...")
402
+ data_info_md = gr.Markdown("")
403
 
404
  def _update_button_interactivity(is_loaded_flag):
405
  return gr.update(interactive=is_loaded_flag)
 
409
  return gr.update(visible=choice == "Tag Filter"), gr.update(visible=choice == "Pipeline Filter")
410
  filter_choice_radio.change(fn=_toggle_filters_visibility, inputs=filter_choice_radio, outputs=[tag_filter_dropdown, pipeline_filter_dropdown])
411
 
412
+ def ui_load_data_controller(force_refresh_ui_trigger=False, progress=gr.Progress(track_tqdm=True)):
 
413
  print(f"ui_load_data_controller called with force_refresh_ui_trigger={force_refresh_ui_trigger}")
414
  status_msg_ui = "Loading data..."
415
  data_info_text = ""
416
  current_df = pd.DataFrame()
417
  load_success_flag = False
418
+ data_as_of_date_display = "N/A"
 
419
  try:
420
  current_df, load_success_flag, status_msg_from_load = load_models_data(
421
+ force_refresh=force_refresh_ui_trigger, tqdm_cls=progress.tqdm
422
  )
 
423
  if load_success_flag:
424
+ if force_refresh_ui_trigger:
 
 
 
425
  data_as_of_date_display = pd.Timestamp.now(tz='UTC').strftime('%B %d, %Y, %H:%M:%S %Z')
426
+ elif 'data_download_timestamp' in current_df.columns and not current_df.empty and pd.notna(current_df['data_download_timestamp'].iloc[0]):
427
+ timestamp_from_parquet = pd.to_datetime(current_df['data_download_timestamp'].iloc[0])
428
+ if timestamp_from_parquet.tzinfo is None:
429
+ timestamp_from_parquet = timestamp_from_parquet.tz_localize('UTC')
430
+ data_as_of_date_display = timestamp_from_parquet.strftime('%B %d, %Y, %H:%M:%S %Z')
431
+ else:
432
+ data_as_of_date_display = "Pre-processed (date unavailable)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
 
 
434
  size_dist_lines = []
435
  if 'size_category' in current_df.columns:
436
  for cat in MODEL_SIZE_RANGES.keys():
 
438
  size_dist_lines.append(f" - {cat}: {count:,} models")
439
  else: size_dist_lines.append(" - Size category information not available.")
440
  size_dist = "\n".join(size_dist_lines)
441
+
442
  data_info_text = (f"### Data Information\n"
443
  f"- Overall Status: {status_msg_from_load}\n"
444
  f"- Total models loaded: {len(current_df):,}\n"
445
  f"- Data as of: {data_as_of_date_display}\n"
446
  f"- Size categories:\n{size_dist}")
447
 
448
+ # --- MODIFICATION: Add 'has_robot' count to UI data_info_text ---
449
  if not current_df.empty and 'has_robot' in current_df.columns:
450
  robot_true_count = current_df['has_robot'].sum()
451
  data_info_text += f"\n- **Models flagged 'has_robot'**: {robot_true_count}"
452
+ if 0 < robot_true_count <= 10: # If a few are found, list some IDs
453
  sample_robot_ids = current_df[current_df['has_robot']]['id'].head(5).tolist()
454
  data_info_text += f"\n - Sample 'has_robot' model IDs: `{', '.join(sample_robot_ids)}`"
455
  elif not current_df.empty:
456
+ data_info_text += "\n- **Models flagged 'has_robot'**: 'has_robot' column not found in loaded data."
457
+ # --- END MODIFICATION ---
458
 
459
  status_msg_ui = "Data loaded successfully. Ready to generate plot."
460
+ else:
461
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
462
  status_msg_ui = status_msg_from_load
 
463
  except Exception as e:
464
  status_msg_ui = f"An unexpected error occurred in ui_load_data_controller: {str(e)}"
465
  data_info_text = f"### Critical Error\n- {status_msg_ui}"
466
+ print(f"Critical error in ui_load_data_controller: {e}")
467
  load_success_flag = False
 
468
  return current_df, load_success_flag, data_info_text, status_msg_ui
469
 
470
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
 
472
  if df_current_models is None or df_current_models.empty:
473
  empty_fig = create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded")
474
  error_msg = "Model data is not loaded or is empty. Please load or refresh data first."
475
+ gr.Warning(error_msg)
476
  return empty_fig, error_msg
 
477
  tag_to_use = tag_choice if filter_type == "Tag Filter" else None
478
  pipeline_to_use = pipeline_choice if filter_type == "Pipeline Filter" else None
479
+ size_to_use = size_choice if size_choice != "None" else None
480
  orgs_to_skip = [org.strip() for org in skip_orgs_input.split(',') if org.strip()] if skip_orgs_input else []
481
+
482
+ # --- Diagnostic before calling make_treemap_data ---
483
+ if 'has_robot' in df_current_models.columns:
484
+ robot_count_before_treemap = df_current_models['has_robot'].sum()
485
+ print(f"DIAGNOSTIC (ui_generate_plot_controller): df_current_models entering make_treemap_data has {robot_count_before_treemap} 'has_robot' models.")
486
+ # --- End Diagnostic ---
487
 
488
  treemap_df = make_treemap_data(df_current_models, metric_choice, k_orgs, tag_to_use, pipeline_to_use, size_to_use, orgs_to_skip)
489
 
490
  title_labels = {"downloads": "Downloads (last 30 days)", "downloadsAllTime": "Downloads (All Time)", "likes": "Likes"}
491
  chart_title = f"HuggingFace Models - {title_labels.get(metric_choice, metric_choice)} by Organization"
492
  plotly_fig = create_treemap(treemap_df, metric_choice, chart_title)
 
493
  if treemap_df.empty:
494
  plot_stats_md = "No data matches the selected filters. Try adjusting your filters."
495
  else:
496
+ total_items_in_plot = len(treemap_df['id'].unique())
497
+ total_value_in_plot = treemap_df[metric_choice].sum()
498
  plot_stats_md = (f"## Plot Statistics\n- **Models shown**: {total_items_in_plot:,}\n- **Total {metric_choice}**: {int(total_value_in_plot):,}")
 
499
  return plotly_fig, plot_stats_md
500
 
501
  demo.load(
502
  fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=False, progress=progress),
503
+ inputs=[],
504
  outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
505
  )
 
506
  refresh_data_button.click(
507
  fn=lambda progress=gr.Progress(track_tqdm=True): ui_load_data_controller(force_refresh_ui_trigger=True, progress=progress),
508
  inputs=[],
509
  outputs=[models_data_state, loading_complete_state, data_info_md, status_message_md]
510
  )
 
511
  generate_plot_button.click(
512
  fn=ui_generate_plot_controller,
513
  inputs=[count_by_dropdown, filter_choice_radio, tag_filter_dropdown, pipeline_filter_dropdown,
514
  size_filter_dropdown, top_k_slider, skip_orgs_textbox, models_data_state],
515
+ outputs=[plot_output, status_message_md]
516
  )
517
 
518
  if __name__ == "__main__":
519
  if not os.path.exists(PROCESSED_PARQUET_FILE_PATH):
520
  print(f"WARNING: Pre-processed data file '{PROCESSED_PARQUET_FILE_PATH}' not found.")
521
+ print("It is highly recommended to run the preprocessing script (e.g., preprocess.py) first.") # Corrected script name
522
  else:
523
  print(f"Found pre-processed data file: '{PROCESSED_PARQUET_FILE_PATH}'.")
524
+ demo.launch()
525
+
526
+ # --- END OF FILE app.py ---