Avijit Ghosh commited on
Commit
fd50825
·
1 Parent(s): ce43639

fixed bugs

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +102 -61
  3. requirements.txt +1 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🚀
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.40.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py CHANGED
@@ -2,7 +2,8 @@ import gradio as gr
2
  import pandas as pd
3
  import plotly.express as px
4
  import time
5
- from datasets import load_dataset
 
6
  # Using the stable, community-built RangeSlider component
7
  from gradio_rangeslider import RangeSlider
8
  import datetime # Import the datetime module
@@ -19,25 +20,22 @@ PIPELINE_TAGS = [ 'text-generation', 'text-to-image', 'text-classification', 'te
19
 
20
  def load_models_data():
21
  overall_start_time = time.time()
22
- print(f"Attempting to load dataset from Hugging Face Hub: {HF_DATASET_ID}")
23
  try:
24
- dataset_dict = load_dataset(HF_DATASET_ID)
25
- df = dataset_dict[list(dataset_dict.keys())[0]].to_pandas()
26
- if 'params' in df.columns:
27
- df['params'] = pd.to_numeric(df['params'], errors='coerce').fillna(-1)
28
- else:
29
- df['params'] = -1
30
 
31
- if 'createdAt' in df.columns:
32
- df['createdAt'] = pd.to_datetime(df['createdAt'], errors='coerce')
33
-
34
- msg = f"Successfully loaded dataset in {time.time() - overall_start_time:.2f}s."
35
  print(msg)
36
- return df, True, msg
37
  except Exception as e:
38
- err_msg = f"Failed to load dataset. Error: {e}"
39
  print(err_msg)
40
- return pd.DataFrame(), False, err_msg
41
 
42
  def get_param_range_values(param_range_labels):
43
  min_label, max_label = param_range_labels
@@ -45,44 +43,76 @@ def get_param_range_values(param_range_labels):
45
  max_val = float('inf') if '>' in max_label else float(max_label.replace('B', ''))
46
  return min_val, max_val
47
 
48
- def make_treemap_data(df, count_by, top_k=25, tag_filter=None, pipeline_filter=None, param_range=None, skip_orgs=None, include_unknown_param_size=True, created_after_date: float = None):
49
- if df is None or df.empty: return pd.DataFrame()
50
- filtered_df = df.copy()
51
-
52
- if not include_unknown_param_size and 'params' in filtered_df.columns:
53
- filtered_df = filtered_df[filtered_df['params'] != -1]
54
-
 
 
 
 
 
 
 
55
  col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot", "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science", "Video": "has_video", "Images": "has_image", "Text": "has_text" }
56
- if tag_filter and tag_filter in col_map and col_map[tag_filter] in filtered_df.columns:
57
- filtered_df = filtered_df[filtered_df[col_map[tag_filter]]]
58
- if pipeline_filter and "pipeline_tag" in filtered_df.columns:
59
- filtered_df = filtered_df[filtered_df["pipeline_tag"].astype(str) == pipeline_filter]
 
 
 
60
  if param_range:
61
  min_params, max_params = get_param_range_values(param_range)
62
  is_default_range = (param_range[0] == PARAM_CHOICES[0] and param_range[1] == PARAM_CHOICES[-1])
63
- if not is_default_range and 'params' in filtered_df.columns:
64
- if min_params is not None: filtered_df = filtered_df[filtered_df['params'] >= min_params]
65
- if max_params is not None and max_params != float('inf'): filtered_df = filtered_df[filtered_df['params'] < max_params]
66
-
67
- # --- CORRECTED DATE FILTER LOGIC FOR FLOAT TIMESTAMP ---
68
- if created_after_date is not None and 'createdAt' in filtered_df.columns:
69
- # Drop rows where 'createdAt' could not be parsed to avoid errors
70
- filtered_df = filtered_df.dropna(subset=['createdAt'])
 
 
 
 
 
 
 
71
 
72
- # Convert the Unix timestamp (float) from the UI into a Python date object
73
- filter_date = datetime.datetime.fromtimestamp(created_after_date).date()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- # Compare its date part with the date part of the 'createdAt' column.
76
- filtered_df = filtered_df[filtered_df['createdAt'].dt.date > filter_date]
77
-
78
- if skip_orgs and len(skip_orgs) > 0 and "organization" in filtered_df.columns:
79
- filtered_df = filtered_df[~filtered_df["organization"].isin(skip_orgs)]
80
- if filtered_df.empty: return pd.DataFrame()
81
- if count_by not in filtered_df.columns: filtered_df[count_by] = 0.0
82
- filtered_df[count_by] = pd.to_numeric(filtered_df[count_by], errors='coerce').fillna(0.0)
83
- org_totals = filtered_df.groupby("organization")[count_by].sum().nlargest(top_k, keep='first')
84
- top_orgs_list = org_totals.index.tolist()
85
- treemap_data = filtered_df[filtered_df["organization"].isin(top_orgs_list)][["id", "organization", count_by]].copy()
86
  treemap_data["root"] = "models"
87
  return treemap_data
88
 
@@ -109,7 +139,7 @@ custom_css = """
109
  """
110
 
111
  with gr.Blocks(title="🤗 ModelVerse Explorer", fill_width=True, css=custom_css) as demo:
112
- models_data_state = gr.State(pd.DataFrame())
113
  loading_complete_state = gr.State(False)
114
 
115
  with gr.Row():
@@ -166,19 +196,30 @@ with gr.Blocks(title="🤗 ModelVerse Explorer", fill_width=True, css=custom_css
166
  filter_choice_radio, [tag_filter_dropdown, pipeline_filter_dropdown])
167
 
168
  def load_and_generate_initial_plot(progress=gr.Progress()):
169
- progress(0, desc=f"Loading dataset '{HF_DATASET_ID}'...")
170
- current_df, load_success_flag, status_msg_from_load = pd.DataFrame(), False, ""
171
  try:
172
- current_df, load_success_flag, status_msg_from_load = load_models_data()
173
  if load_success_flag:
174
- progress(0.5, desc="Processing data...")
175
- ts = pd.to_datetime(current_df['data_download_timestamp'].iloc[0], utc=True) if 'data_download_timestamp' in current_df.columns and pd.notna(current_df['data_download_timestamp'].iloc[0]) else None
 
 
 
 
 
 
 
 
 
 
 
 
176
  date_display = ts.strftime('%B %d, %Y, %H:%M:%S %Z') if ts else "Pre-processed (date unavailable)"
177
 
178
- param_count = (current_df['params'] != -1).sum()
179
  data_info_text = (f"### Data Information\n- Source: `{HF_DATASET_ID}`\n- Status: {status_msg_from_load}\n"
180
- f"- Total models loaded: {len(current_df):,}\n- Models with known parameter counts: {param_count:,}\n"
181
- f"- Models with unknown parameter counts: {len(current_df) - param_count:,}\n- Data as of: {date_display}\n")
182
  else:
183
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
184
  except Exception as e:
@@ -189,21 +230,21 @@ with gr.Blocks(title="🤗 ModelVerse Explorer", fill_width=True, css=custom_css
189
  progress(0.6, desc="Generating initial plot...")
190
  initial_plot, initial_status = ui_generate_plot_controller(
191
  "downloads", "None", None, None, PARAM_CHOICES_DEFAULT_INDICES, 25,
192
- "TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski", True, None, current_df, progress
193
  )
194
- return current_df, load_success_flag, data_info_text, initial_status, initial_plot
195
 
196
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
197
  param_range_indices, k_orgs, skip_orgs_input, include_unknown_param_size_flag,
198
- created_after_date, df_current_models, progress=gr.Progress()):
199
- if df_current_models.empty:
200
  return create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded"), "Model data is not loaded."
201
 
202
  progress(0.1, desc="Preparing data...")
203
  param_labels = [PARAM_CHOICES[int(param_range_indices[0])], PARAM_CHOICES[int(param_range_indices[1])]]
204
 
205
  treemap_df = make_treemap_data(
206
- df_current_models, metric_choice, k_orgs,
207
  tag_choice if filter_type == "Tag Filter" else None,
208
  pipeline_choice if filter_type == "Pipeline Filter" else None,
209
  param_labels, [org.strip() for org in skip_orgs_input.split(',') if org.strip()],
 
2
  import pandas as pd
3
  import plotly.express as px
4
  import time
5
+ import duckdb
6
+ from huggingface_hub import list_repo_files
7
  # Using the stable, community-built RangeSlider component
8
  from gradio_rangeslider import RangeSlider
9
  import datetime # Import the datetime module
 
20
 
21
  def load_models_data():
22
  overall_start_time = time.time()
23
+ print(f"Attempting to load dataset metadata from Hugging Face Hub: {HF_DATASET_ID}")
24
  try:
25
+ files = list_repo_files(HF_DATASET_ID, repo_type="dataset")
26
+ parquet_files = [f for f in files if f.endswith('.parquet')]
27
+ if not parquet_files:
28
+ return [], False, "No parquet files found in dataset."
29
+
30
+ urls = [f"https://huggingface.co/datasets/{HF_DATASET_ID}/resolve/main/{f}" for f in parquet_files]
31
 
32
+ msg = f"Successfully identified {len(urls)} parquet files in {time.time() - overall_start_time:.2f}s."
 
 
 
33
  print(msg)
34
+ return urls, True, msg
35
  except Exception as e:
36
+ err_msg = f"Failed to load dataset metadata. Error: {e}"
37
  print(err_msg)
38
+ return [], False, err_msg
39
 
40
  def get_param_range_values(param_range_labels):
41
  min_label, max_label = param_range_labels
 
43
  max_val = float('inf') if '>' in max_label else float(max_label.replace('B', ''))
44
  return min_val, max_val
45
 
46
+ def make_treemap_data(parquet_urls, count_by, top_k=25, tag_filter=None, pipeline_filter=None, param_range=None, skip_orgs=None, include_unknown_param_size=True, created_after_date: float = None):
47
+ if not parquet_urls: return pd.DataFrame()
48
+
49
+ con = duckdb.connect()
50
+ con.execute("INSTALL httpfs; LOAD httpfs;")
51
+
52
+ urls_str = ", ".join([f"'{u}'" for u in parquet_urls])
53
+ con.execute(f"CREATE VIEW models AS SELECT * FROM read_parquet([{urls_str}])")
54
+
55
+ where_clauses = []
56
+
57
+ if not include_unknown_param_size:
58
+ where_clauses.append("params IS NOT NULL AND params != -1")
59
+
60
  col_map = { "Audio & Speech": "is_audio_speech", "Music": "has_music", "Robotics": "has_robot", "Biomedical": "is_biomed", "Time series": "has_series", "Sciences": "has_science", "Video": "has_video", "Images": "has_image", "Text": "has_text" }
61
+
62
+ if tag_filter and tag_filter in col_map:
63
+ where_clauses.append(f"{col_map[tag_filter]} = true")
64
+
65
+ if pipeline_filter:
66
+ where_clauses.append(f"pipeline_tag = '{pipeline_filter}'")
67
+
68
  if param_range:
69
  min_params, max_params = get_param_range_values(param_range)
70
  is_default_range = (param_range[0] == PARAM_CHOICES[0] and param_range[1] == PARAM_CHOICES[-1])
71
+ if not is_default_range:
72
+ conditions = []
73
+ if min_params is not None:
74
+ conditions.append(f"params >= {min_params}")
75
+ if max_params is not None and max_params != float('inf'):
76
+ conditions.append(f"params < {max_params}")
77
+ if conditions:
78
+ where_clauses.append("(" + " AND ".join(conditions) + ")")
79
+
80
+ if created_after_date is not None:
81
+ where_clauses.append(f"CAST(createdAt AS TIMESTAMPTZ) > to_timestamp({created_after_date})")
82
+
83
+ if skip_orgs and len(skip_orgs) > 0:
84
+ orgs_str = ", ".join([f"'{o}'" for o in skip_orgs])
85
+ where_clauses.append(f"organization NOT IN ({orgs_str})")
86
 
87
+ where_sql = " WHERE " + " AND ".join(where_clauses) if where_clauses else ""
88
+
89
+ metric = f"COALESCE({count_by}, 0)"
90
+
91
+ query = f"""
92
+ SELECT organization, SUM({metric}) as total_metric
93
+ FROM models
94
+ {where_sql}
95
+ GROUP BY organization
96
+ ORDER BY total_metric DESC
97
+ LIMIT {top_k}
98
+ """
99
+
100
+ top_orgs_df = con.execute(query).df()
101
+
102
+ if top_orgs_df.empty:
103
+ return pd.DataFrame()
104
 
105
+ top_orgs_list = top_orgs_df['organization'].tolist()
106
+ orgs_filter = ", ".join([f"'{o}'" for o in top_orgs_list])
107
+
108
+ detail_query = f"""
109
+ SELECT id, organization, {metric} as {count_by}
110
+ FROM models
111
+ {where_sql}
112
+ AND organization IN ({orgs_filter})
113
+ """
114
+
115
+ treemap_data = con.execute(detail_query).df()
116
  treemap_data["root"] = "models"
117
  return treemap_data
118
 
 
139
  """
140
 
141
  with gr.Blocks(title="🤗 ModelVerse Explorer", fill_width=True, css=custom_css) as demo:
142
+ models_data_state = gr.State([])
143
  loading_complete_state = gr.State(False)
144
 
145
  with gr.Row():
 
196
  filter_choice_radio, [tag_filter_dropdown, pipeline_filter_dropdown])
197
 
198
  def load_and_generate_initial_plot(progress=gr.Progress()):
199
+ progress(0, desc=f"Loading dataset metadata '{HF_DATASET_ID}'...")
200
+ parquet_urls, load_success_flag, status_msg_from_load = [], False, ""
201
  try:
202
+ parquet_urls, load_success_flag, status_msg_from_load = load_models_data()
203
  if load_success_flag:
204
+ progress(0.5, desc="Processing metadata...")
205
+
206
+ # Quick query to get stats
207
+ con = duckdb.connect()
208
+ con.execute("INSTALL httpfs; LOAD httpfs;")
209
+ urls_str = ", ".join([f"'{u}'" for u in parquet_urls])
210
+ con.execute(f"CREATE VIEW models AS SELECT * FROM read_parquet([{urls_str}])")
211
+
212
+ # Get total count and timestamp
213
+ stats = con.execute("SELECT count(*), max(data_download_timestamp), count(params) FROM models").fetchone()
214
+ total_count = stats[0]
215
+ ts = stats[1] # Timestamp object
216
+ param_count = stats[2]
217
+
218
  date_display = ts.strftime('%B %d, %Y, %H:%M:%S %Z') if ts else "Pre-processed (date unavailable)"
219
 
 
220
  data_info_text = (f"### Data Information\n- Source: `{HF_DATASET_ID}`\n- Status: {status_msg_from_load}\n"
221
+ f"- Total models loaded: {total_count:,}\n- Models with known parameter counts: {param_count:,}\n"
222
+ f"- Models with unknown parameter counts: {total_count - param_count:,}\n- Data as of: {date_display}\n")
223
  else:
224
  data_info_text = f"### Data Load Failed\n- {status_msg_from_load}"
225
  except Exception as e:
 
230
  progress(0.6, desc="Generating initial plot...")
231
  initial_plot, initial_status = ui_generate_plot_controller(
232
  "downloads", "None", None, None, PARAM_CHOICES_DEFAULT_INDICES, 25,
233
+ "TheBloke,MaziyarPanahi,unsloth,modularai,Gensyn,bartowski", True, None, parquet_urls, progress
234
  )
235
+ return parquet_urls, load_success_flag, data_info_text, initial_status, initial_plot
236
 
237
  def ui_generate_plot_controller(metric_choice, filter_type, tag_choice, pipeline_choice,
238
  param_range_indices, k_orgs, skip_orgs_input, include_unknown_param_size_flag,
239
+ created_after_date, parquet_urls, progress=gr.Progress()):
240
+ if not parquet_urls:
241
  return create_treemap(pd.DataFrame(), metric_choice, "Error: Model Data Not Loaded"), "Model data is not loaded."
242
 
243
  progress(0.1, desc="Preparing data...")
244
  param_labels = [PARAM_CHOICES[int(param_range_indices[0])], PARAM_CHOICES[int(param_range_indices[1])]]
245
 
246
  treemap_df = make_treemap_data(
247
+ parquet_urls, metric_choice, k_orgs,
248
  tag_choice if filter_type == "Tag Filter" else None,
249
  pipeline_choice if filter_type == "Pipeline Filter" else None,
250
  param_labels, [org.strip() for org in skip_orgs_input.split(',') if org.strip()],
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  plotly
2
  duckdb
3
- gradio-rangeslider
 
1
  plotly
2
  duckdb
3
+ gradio-rangeslider