gavinxing commited on
Commit
242a726
·
verified ·
1 Parent(s): 8ac3bdc

Update src/display/utils.py

Browse files
Files changed (1) hide show
  1. src/display/utils.py +6 -1
src/display/utils.py CHANGED
@@ -8,6 +8,7 @@ from src.about import Tasks
8
  def fields(raw_class):
9
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
 
 
11
 
12
  # These classes are for user facing column names,
13
  # to avoid having to change them all around the code
@@ -27,6 +28,10 @@ auto_eval_column_dict = []
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
  auto_eval_column_dict.append(["overall", ColumnContent, ColumnContent("Overall", "number", True)])
 
 
 
 
30
  # for task in Tasks:
31
  # auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
  # Model information
@@ -39,7 +44,7 @@ auto_eval_column_dict.append(["model_size", ColumnContent, ColumnContent("Model
39
  # auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
40
  # auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
41
  # auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
42
- # auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
43
 
44
  # We use make dataclass to dynamically fill the scores from Tasks
45
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
8
  def fields(raw_class):
9
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
 
11
+ TASK_CATEGORIES = ['Column Relationship', 'Column Transform', 'Data Cleaning', 'KB mapping', 'NL-2-code', 'Table Join', 'Table Matching', 'Table QA', 'Table Transform', 'Table Understanding']
12
 
13
  # These classes are for user facing column names,
14
  # to avoid having to change them all around the code
 
28
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
29
  #Scores
30
  auto_eval_column_dict.append(["overall", ColumnContent, ColumnContent("Overall", "number", True)])
31
+
32
+ for task_cat in TASK_CATEGORIES:
33
+ auto_eval_column_dict.append(["_".join(task_cat.replace("-", " ").lower().split()), ColumnContent, ColumnContent(task_cat, "number", True)])
34
+
35
  # for task in Tasks:
36
  # auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
37
  # Model information
 
44
  # auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
45
  # auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
46
  # auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
47
+ # auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
48
 
49
  # We use make dataclass to dynamically fill the scores from Tasks
50
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)