crfm-helm 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (209) hide show
  1. {crfm_helm-0.5.2.dist-info → crfm_helm-0.5.4.dist-info}/METADATA +81 -112
  2. {crfm_helm-0.5.2.dist-info → crfm_helm-0.5.4.dist-info}/RECORD +165 -155
  3. {crfm_helm-0.5.2.dist-info → crfm_helm-0.5.4.dist-info}/WHEEL +1 -1
  4. helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +12 -5
  5. helm/benchmark/adaptation/adapters/test_generation_adapter.py +12 -12
  6. helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +8 -8
  7. helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +77 -9
  8. helm/benchmark/adaptation/common_adapter_specs.py +2 -0
  9. helm/benchmark/annotation/anthropic_red_team_annotator.py +57 -0
  10. helm/benchmark/annotation/call_center_annotator.py +258 -0
  11. helm/benchmark/annotation/financebench_annotator.py +79 -0
  12. helm/benchmark/annotation/harm_bench_annotator.py +55 -0
  13. helm/benchmark/annotation/{image2structure → image2struct}/latex_compiler_annotator.py +2 -2
  14. helm/benchmark/annotation/{image2structure → image2struct}/lilypond_compiler_annotator.py +5 -3
  15. helm/benchmark/annotation/{image2structure → image2struct}/webpage_compiler_annotator.py +5 -5
  16. helm/benchmark/annotation/live_qa_annotator.py +37 -45
  17. helm/benchmark/annotation/medication_qa_annotator.py +36 -44
  18. helm/benchmark/annotation/model_as_judge.py +96 -0
  19. helm/benchmark/annotation/simple_safety_tests_annotator.py +50 -0
  20. helm/benchmark/annotation/xstest_annotator.py +100 -0
  21. helm/benchmark/metrics/annotation_metrics.py +108 -0
  22. helm/benchmark/metrics/bhasa_metrics.py +188 -0
  23. helm/benchmark/metrics/bhasa_metrics_specs.py +10 -0
  24. helm/benchmark/metrics/code_metrics_helper.py +11 -1
  25. helm/benchmark/metrics/safety_metrics.py +79 -0
  26. helm/benchmark/metrics/summac/model_summac.py +3 -3
  27. helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +2 -2
  28. helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +4 -4
  29. helm/benchmark/metrics/unitxt_metrics.py +17 -3
  30. helm/benchmark/metrics/vision_language/image_metrics.py +7 -3
  31. helm/benchmark/metrics/vision_language/image_utils.py +1 -1
  32. helm/benchmark/model_metadata_registry.py +3 -3
  33. helm/benchmark/presentation/create_plots.py +1 -1
  34. helm/benchmark/presentation/schema.py +3 -0
  35. helm/benchmark/presentation/summarize.py +106 -256
  36. helm/benchmark/presentation/test_run_entry.py +1 -0
  37. helm/benchmark/presentation/test_summarize.py +145 -3
  38. helm/benchmark/run.py +15 -0
  39. helm/benchmark/run_expander.py +83 -30
  40. helm/benchmark/run_specs/bhasa_run_specs.py +652 -0
  41. helm/benchmark/run_specs/call_center_run_specs.py +152 -0
  42. helm/benchmark/run_specs/decodingtrust_run_specs.py +8 -8
  43. helm/benchmark/run_specs/experimental_run_specs.py +52 -0
  44. helm/benchmark/run_specs/finance_run_specs.py +82 -1
  45. helm/benchmark/run_specs/safety_run_specs.py +154 -0
  46. helm/benchmark/run_specs/vlm_run_specs.py +100 -24
  47. helm/benchmark/scenarios/anthropic_red_team_scenario.py +71 -0
  48. helm/benchmark/scenarios/banking77_scenario.py +51 -0
  49. helm/benchmark/scenarios/bhasa_scenario.py +1942 -0
  50. helm/benchmark/scenarios/call_center_scenario.py +84 -0
  51. helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +2 -1
  52. helm/benchmark/scenarios/ewok_scenario.py +116 -0
  53. helm/benchmark/scenarios/fin_qa_scenario.py +2 -0
  54. helm/benchmark/scenarios/financebench_scenario.py +53 -0
  55. helm/benchmark/scenarios/harm_bench_scenario.py +59 -0
  56. helm/benchmark/scenarios/raft_scenario.py +1 -1
  57. helm/benchmark/scenarios/scenario.py +1 -1
  58. helm/benchmark/scenarios/simple_safety_tests_scenario.py +33 -0
  59. helm/benchmark/scenarios/test_commonsense_scenario.py +21 -0
  60. helm/benchmark/scenarios/test_ewok_scenario.py +25 -0
  61. helm/benchmark/scenarios/test_financebench_scenario.py +26 -0
  62. helm/benchmark/scenarios/test_gsm_scenario.py +31 -0
  63. helm/benchmark/scenarios/test_legalbench_scenario.py +30 -0
  64. helm/benchmark/scenarios/test_math_scenario.py +2 -8
  65. helm/benchmark/scenarios/test_med_qa_scenario.py +30 -0
  66. helm/benchmark/scenarios/test_mmlu_scenario.py +33 -0
  67. helm/benchmark/scenarios/test_narrativeqa_scenario.py +73 -0
  68. helm/benchmark/scenarios/thai_exam_scenario.py +4 -4
  69. helm/benchmark/scenarios/vision_language/a_okvqa_scenario.py +1 -1
  70. helm/benchmark/scenarios/vision_language/bingo_scenario.py +2 -2
  71. helm/benchmark/scenarios/vision_language/crossmodal_3600_scenario.py +2 -1
  72. helm/benchmark/scenarios/vision_language/exams_v_scenario.py +104 -0
  73. helm/benchmark/scenarios/vision_language/fair_face_scenario.py +136 -0
  74. helm/benchmark/scenarios/vision_language/flickr30k_scenario.py +1 -1
  75. helm/benchmark/scenarios/vision_language/gqa_scenario.py +2 -2
  76. helm/benchmark/scenarios/vision_language/hateful_memes_scenario.py +1 -1
  77. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/chart2csv_scenario.py +1 -1
  78. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/latex_scenario.py +3 -3
  79. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/musicsheet_scenario.py +1 -1
  80. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/utils_latex.py +31 -39
  81. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/webpage/driver.py +1 -1
  82. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/webpage/utils.py +1 -1
  83. helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/webpage_scenario.py +41 -12
  84. helm/benchmark/scenarios/vision_language/math_vista_scenario.py +1 -1
  85. helm/benchmark/scenarios/vision_language/mementos_scenario.py +3 -3
  86. helm/benchmark/scenarios/vision_language/mm_safety_bench_scenario.py +2 -2
  87. helm/benchmark/scenarios/vision_language/mme_scenario.py +21 -18
  88. helm/benchmark/scenarios/vision_language/mmmu_scenario.py +1 -1
  89. helm/benchmark/scenarios/vision_language/pairs_scenario.py +1 -1
  90. helm/benchmark/scenarios/vision_language/pope_scenario.py +2 -1
  91. helm/benchmark/scenarios/vision_language/real_world_qa_scenario.py +57 -0
  92. helm/benchmark/scenarios/vision_language/seed_bench_scenario.py +7 -5
  93. helm/benchmark/scenarios/vision_language/unicorn_scenario.py +2 -2
  94. helm/benchmark/scenarios/vision_language/vibe_eval_scenario.py +6 -3
  95. helm/benchmark/scenarios/vision_language/viz_wiz_scenario.py +1 -1
  96. helm/benchmark/scenarios/vision_language/vqa_scenario.py +3 -1
  97. helm/benchmark/scenarios/xstest_scenario.py +35 -0
  98. helm/benchmark/server.py +1 -6
  99. helm/benchmark/static/schema_air_bench.yaml +750 -750
  100. helm/benchmark/static/schema_bhasa.yaml +709 -0
  101. helm/benchmark/static/schema_call_center.yaml +232 -0
  102. helm/benchmark/static/schema_cleva.yaml +768 -0
  103. helm/benchmark/static/schema_decodingtrust.yaml +444 -0
  104. helm/benchmark/static/schema_ewok.yaml +367 -0
  105. helm/benchmark/static/schema_finance.yaml +55 -9
  106. helm/benchmark/static/{schema_image2structure.yaml → schema_image2struct.yaml} +231 -90
  107. helm/benchmark/static/schema_legal.yaml +566 -0
  108. helm/benchmark/static/schema_safety.yaml +266 -0
  109. helm/benchmark/static/schema_tables.yaml +149 -8
  110. helm/benchmark/static/schema_thai.yaml +21 -0
  111. helm/benchmark/static/schema_vhelm.yaml +137 -101
  112. helm/benchmark/static_build/assets/accenture-6f97eeda.png +0 -0
  113. helm/benchmark/static_build/assets/aisingapore-6dfc9acf.png +0 -0
  114. helm/benchmark/static_build/assets/cresta-9e22b983.png +0 -0
  115. helm/benchmark/static_build/assets/cuhk-8c5631e9.png +0 -0
  116. helm/benchmark/static_build/assets/index-05c76bb1.css +1 -0
  117. helm/benchmark/static_build/assets/index-3ee38b3d.js +10 -0
  118. helm/benchmark/static_build/assets/scb10x-204bd786.png +0 -0
  119. helm/benchmark/static_build/assets/vhelm-aspects-1437d673.png +0 -0
  120. helm/benchmark/static_build/assets/vhelm-framework-a1ca3f3f.png +0 -0
  121. helm/benchmark/static_build/assets/vhelm-model-8afb7616.png +0 -0
  122. helm/benchmark/static_build/assets/wellsfargo-a86a6c4a.png +0 -0
  123. helm/benchmark/static_build/index.html +2 -2
  124. helm/benchmark/window_services/test_openai_window_service.py +8 -8
  125. helm/benchmark/window_services/tokenizer_service.py +0 -5
  126. helm/clients/ai21_client.py +71 -1
  127. helm/clients/anthropic_client.py +7 -19
  128. helm/clients/huggingface_client.py +38 -37
  129. helm/clients/nvidia_nim_client.py +35 -0
  130. helm/clients/openai_client.py +18 -4
  131. helm/clients/palmyra_client.py +24 -0
  132. helm/clients/perspective_api_client.py +11 -6
  133. helm/clients/test_client.py +4 -6
  134. helm/clients/together_client.py +22 -0
  135. helm/clients/vision_language/open_flamingo_client.py +1 -2
  136. helm/clients/vision_language/palmyra_vision_client.py +28 -13
  137. helm/common/cache.py +8 -30
  138. helm/common/images_utils.py +6 -0
  139. helm/common/key_value_store.py +9 -9
  140. helm/common/mongo_key_value_store.py +5 -4
  141. helm/common/request.py +16 -0
  142. helm/common/test_cache.py +1 -48
  143. helm/common/tokenization_request.py +0 -9
  144. helm/config/model_deployments.yaml +444 -329
  145. helm/config/model_metadata.yaml +513 -111
  146. helm/config/tokenizer_configs.yaml +140 -11
  147. helm/proxy/example_queries.py +14 -21
  148. helm/proxy/server.py +0 -9
  149. helm/proxy/services/remote_service.py +0 -6
  150. helm/proxy/services/server_service.py +6 -20
  151. helm/proxy/services/service.py +0 -6
  152. helm/proxy/token_counters/test_auto_token_counter.py +2 -2
  153. helm/tokenizers/ai21_tokenizer.py +51 -59
  154. helm/tokenizers/cohere_tokenizer.py +0 -75
  155. helm/tokenizers/huggingface_tokenizer.py +0 -1
  156. helm/tokenizers/test_ai21_tokenizer.py +48 -0
  157. helm/benchmark/data_overlap/data_overlap_spec.py +0 -86
  158. helm/benchmark/data_overlap/export_scenario_text.py +0 -119
  159. helm/benchmark/data_overlap/light_scenario.py +0 -60
  160. helm/benchmark/scenarios/vision_language/image2structure/webpage/__init__.py +0 -0
  161. helm/benchmark/static/benchmarking.css +0 -156
  162. helm/benchmark/static/benchmarking.js +0 -1705
  163. helm/benchmark/static/config.js +0 -3
  164. helm/benchmark/static/general.js +0 -122
  165. helm/benchmark/static/images/crfm-logo.png +0 -0
  166. helm/benchmark/static/images/helm-logo-simple.png +0 -0
  167. helm/benchmark/static/images/helm-logo.png +0 -0
  168. helm/benchmark/static/images/language-model-helm.png +0 -0
  169. helm/benchmark/static/images/organizations/ai21.png +0 -0
  170. helm/benchmark/static/images/organizations/anthropic.png +0 -0
  171. helm/benchmark/static/images/organizations/bigscience.png +0 -0
  172. helm/benchmark/static/images/organizations/cohere.png +0 -0
  173. helm/benchmark/static/images/organizations/eleutherai.png +0 -0
  174. helm/benchmark/static/images/organizations/google.png +0 -0
  175. helm/benchmark/static/images/organizations/meta.png +0 -0
  176. helm/benchmark/static/images/organizations/microsoft.png +0 -0
  177. helm/benchmark/static/images/organizations/nvidia.png +0 -0
  178. helm/benchmark/static/images/organizations/openai.png +0 -0
  179. helm/benchmark/static/images/organizations/together.png +0 -0
  180. helm/benchmark/static/images/organizations/tsinghua-keg.png +0 -0
  181. helm/benchmark/static/images/organizations/yandex.png +0 -0
  182. helm/benchmark/static/images/scenarios-by-metrics.png +0 -0
  183. helm/benchmark/static/images/taxonomy-scenarios.png +0 -0
  184. helm/benchmark/static/index.html +0 -68
  185. helm/benchmark/static/info-icon.png +0 -0
  186. helm/benchmark/static/json-urls.js +0 -69
  187. helm/benchmark/static/plot-captions.js +0 -27
  188. helm/benchmark/static/utils.js +0 -285
  189. helm/benchmark/static_build/assets/index-30dbceba.js +0 -10
  190. helm/benchmark/static_build/assets/index-66b02d40.css +0 -1
  191. helm/benchmark/static_build/assets/vhelm-framework-cde7618a.png +0 -0
  192. helm/benchmark/static_build/assets/vhelm-model-6d812526.png +0 -0
  193. helm/benchmark/window_services/ai21_window_service.py +0 -247
  194. helm/benchmark/window_services/cohere_window_service.py +0 -101
  195. helm/benchmark/window_services/test_ai21_window_service.py +0 -163
  196. helm/benchmark/window_services/test_cohere_window_service.py +0 -75
  197. helm/benchmark/window_services/test_cohere_window_service_utils.py +0 -8328
  198. helm/benchmark/window_services/test_ice_window_service.py +0 -327
  199. helm/tokenizers/ice_tokenizer.py +0 -30
  200. helm/tokenizers/test_ice_tokenizer.py +0 -57
  201. {crfm_helm-0.5.2.dist-info → crfm_helm-0.5.4.dist-info}/LICENSE +0 -0
  202. {crfm_helm-0.5.2.dist-info → crfm_helm-0.5.4.dist-info}/entry_points.txt +0 -0
  203. {crfm_helm-0.5.2.dist-info → crfm_helm-0.5.4.dist-info}/top_level.txt +0 -0
  204. /helm/benchmark/annotation/{image2structure → image2struct}/__init__.py +0 -0
  205. /helm/benchmark/annotation/{image2structure → image2struct}/image_compiler_annotator.py +0 -0
  206. /helm/benchmark/{data_overlap → scenarios/vision_language/image2struct}/__init__.py +0 -0
  207. /helm/benchmark/scenarios/vision_language/{image2structure/image2structure_scenario.py → image2struct/image2struct_scenario.py} +0 -0
  208. /helm/benchmark/scenarios/vision_language/{image2structure → image2struct/webpage}/__init__.py +0 -0
  209. /helm/benchmark/scenarios/vision_language/{image2structure → image2struct}/webpage/jekyll_server.py +0 -0
@@ -1,16 +1,19 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crfm-helm
3
- Version: 0.5.2
3
+ Version: 0.5.4
4
4
  Summary: Benchmark for language models
5
5
  Home-page: https://github.com/stanford-crfm/helm
6
6
  Author: Stanford CRFM
7
7
  Author-email: contact-crfm@stanford.edu
8
8
  License: Apache License 2.0
9
9
  Keywords: language models benchmarking
10
+ Classifier: Programming Language :: Python :: 3
10
11
  Classifier: Programming Language :: Python :: 3 :: Only
11
- Classifier: Programming Language :: Python :: 3.8
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
12
15
  Classifier: License :: OSI Approved :: Apache Software License
13
- Requires-Python: <3.11,>=3.8
16
+ Requires-Python: <3.12,>=3.9
14
17
  Description-Content-Type: text/markdown
15
18
  License-File: LICENSE
16
19
  Requires-Dist: cattrs ~=22.2
@@ -28,8 +31,7 @@ Requires-Dist: bottle ~=0.12.23
28
31
  Requires-Dist: datasets ~=2.17
29
32
  Requires-Dist: pyarrow >=11.0.0
30
33
  Requires-Dist: pyarrow-hotfix ~=0.6
31
- Requires-Dist: nltk ~=3.7
32
- Requires-Dist: pyext ~=0.7
34
+ Requires-Dist: nltk <3.8.2,~=3.7
33
35
  Requires-Dist: rouge-score ~=0.1.2
34
36
  Requires-Dist: scipy ~=1.10
35
37
  Requires-Dist: uncertainty-calibration ~=0.1.4
@@ -37,7 +39,8 @@ Requires-Dist: scikit-learn ~=1.1
37
39
  Requires-Dist: transformers ~=4.40
38
40
  Requires-Dist: torch <3.0.0,>=1.13.1
39
41
  Requires-Dist: torchvision <3.0.0,>=0.14.1
40
- Requires-Dist: google-api-python-client ~=2.64
42
+ Provides-Extra: accelerate
43
+ Requires-Dist: accelerate ~=0.25 ; extra == 'accelerate'
41
44
  Provides-Extra: aleph-alpha
42
45
  Requires-Dist: aleph-alpha-client ~=2.14.0 ; extra == 'aleph-alpha'
43
46
  Requires-Dist: tokenizers >=0.13.3 ; extra == 'aleph-alpha'
@@ -55,6 +58,7 @@ Requires-Dist: crfm-helm[models] ; extra == 'all'
55
58
  Requires-Dist: crfm-helm[mongo] ; extra == 'all'
56
59
  Requires-Dist: crfm-helm[heim] ; extra == 'all'
57
60
  Requires-Dist: crfm-helm[vlm] ; extra == 'all'
61
+ Requires-Dist: crfm-helm[bhasa] ; extra == 'all'
58
62
  Provides-Extra: allenai
59
63
  Requires-Dist: ai2-olmo ~=0.2 ; extra == 'allenai'
60
64
  Provides-Extra: amazon
@@ -64,6 +68,10 @@ Requires-Dist: botocore ~=1.31.57 ; extra == 'amazon'
64
68
  Provides-Extra: anthropic
65
69
  Requires-Dist: anthropic ~=0.17 ; extra == 'anthropic'
66
70
  Requires-Dist: websocket-client ~=1.3.2 ; extra == 'anthropic'
71
+ Provides-Extra: bhasa
72
+ Requires-Dist: pythainlp ==5.0.0 ; extra == 'bhasa'
73
+ Requires-Dist: pyonmttok ==1.37.0 ; extra == 'bhasa'
74
+ Requires-Dist: sacrebleu ~=2.2.1 ; extra == 'bhasa'
67
75
  Provides-Extra: cleva
68
76
  Requires-Dist: unidecode ==1.3.6 ; extra == 'cleva'
69
77
  Requires-Dist: pypinyin ==0.49.0 ; extra == 'cleva'
@@ -83,8 +91,9 @@ Requires-Dist: flake8 ==5.0.4 ; extra == 'dev'
83
91
  Provides-Extra: google
84
92
  Requires-Dist: google-cloud-aiplatform ~=1.48 ; extra == 'google'
85
93
  Provides-Extra: heim
86
- Requires-Dist: gdown ~=4.4.0 ; extra == 'heim'
94
+ Requires-Dist: gdown ~=5.1 ; extra == 'heim'
87
95
  Requires-Dist: diffusers ~=0.24.0 ; extra == 'heim'
96
+ Requires-Dist: icetk ~=0.0.4 ; extra == 'heim'
88
97
  Requires-Dist: jax ~=0.4.13 ; extra == 'heim'
89
98
  Requires-Dist: jaxlib ~=0.4.13 ; extra == 'heim'
90
99
  Requires-Dist: crfm-helm[openai] ; extra == 'heim'
@@ -98,39 +107,43 @@ Requires-Dist: wandb ~=0.13.11 ; extra == 'heim'
98
107
  Requires-Dist: google-cloud-translate ~=3.11.2 ; extra == 'heim'
99
108
  Requires-Dist: autokeras ~=1.0.20 ; extra == 'heim'
100
109
  Requires-Dist: clip-anytorch ~=2.5.0 ; extra == 'heim'
101
- Requires-Dist: google-cloud-storage ~=2.9.0 ; extra == 'heim'
110
+ Requires-Dist: google-cloud-storage ~=2.9 ; extra == 'heim'
102
111
  Requires-Dist: lpips ~=0.1.4 ; extra == 'heim'
103
112
  Requires-Dist: multilingual-clip ~=1.0.10 ; extra == 'heim'
104
113
  Requires-Dist: NudeNet ~=2.0.9 ; extra == 'heim'
105
114
  Requires-Dist: opencv-python ~=4.7.0.68 ; extra == 'heim'
106
115
  Requires-Dist: pytorch-fid ~=0.3.0 ; extra == 'heim'
107
- Requires-Dist: tensorflow ~=2.11.1 ; extra == 'heim'
116
+ Requires-Dist: tensorflow ~=2.11 ; extra == 'heim'
108
117
  Requires-Dist: timm ~=0.6.12 ; extra == 'heim'
109
118
  Requires-Dist: torch-fidelity ~=0.3.0 ; extra == 'heim'
110
119
  Requires-Dist: torchmetrics ~=0.11.1 ; extra == 'heim'
120
+ Requires-Dist: scikit-image !=0.23.*,==0.*,>=0.22 ; extra == 'heim'
111
121
  Requires-Dist: crfm-helm[images] ; extra == 'heim'
112
122
  Provides-Extra: human-evaluation
113
123
  Requires-Dist: scaleapi ~=2.13.0 ; extra == 'human-evaluation'
114
124
  Requires-Dist: surge-api ~=1.1.0 ; extra == 'human-evaluation'
115
- Provides-Extra: image2structure
116
- Requires-Dist: crfm-helm[images] ; extra == 'image2structure'
117
- Requires-Dist: latex ~=0.7.0 ; extra == 'image2structure'
118
- Requires-Dist: pdf2image ~=1.16.3 ; extra == 'image2structure'
119
- Requires-Dist: selenium ~=4.17.2 ; extra == 'image2structure'
120
- Requires-Dist: html2text ~=2024.2.26 ; extra == 'image2structure'
121
- Requires-Dist: opencv-python ~=4.7.0.68 ; extra == 'image2structure'
122
- Requires-Dist: lpips ~=0.1.4 ; extra == 'image2structure'
123
- Requires-Dist: imagehash ~=4.3.1 ; extra == 'image2structure'
125
+ Provides-Extra: image2struct
126
+ Requires-Dist: crfm-helm[images] ; extra == 'image2struct'
127
+ Requires-Dist: latex ~=0.7.0 ; extra == 'image2struct'
128
+ Requires-Dist: pdf2image ~=1.16.3 ; extra == 'image2struct'
129
+ Requires-Dist: selenium ~=4.17.2 ; extra == 'image2struct'
130
+ Requires-Dist: html2text ~=2024.2.26 ; extra == 'image2struct'
131
+ Requires-Dist: opencv-python ~=4.7.0.68 ; extra == 'image2struct'
132
+ Requires-Dist: lpips ~=0.1.4 ; extra == 'image2struct'
133
+ Requires-Dist: imagehash ~=4.3.1 ; extra == 'image2struct'
124
134
  Provides-Extra: images
125
- Requires-Dist: accelerate ~=0.25.0 ; extra == 'images'
135
+ Requires-Dist: crfm-helm[accelerate] ; extra == 'images'
126
136
  Requires-Dist: pillow ~=10.2 ; extra == 'images'
127
137
  Provides-Extra: metrics
128
- Requires-Dist: numba ~=0.56.4 ; extra == 'metrics'
138
+ Requires-Dist: google-api-python-client ~=2.64 ; extra == 'metrics'
139
+ Requires-Dist: numba ~=0.56 ; extra == 'metrics'
129
140
  Requires-Dist: pytrec-eval ==0.5 ; extra == 'metrics'
130
141
  Requires-Dist: sacrebleu ~=2.2.1 ; extra == 'metrics'
131
142
  Provides-Extra: mistral
132
143
  Requires-Dist: mistralai ~=0.0.11 ; extra == 'mistral'
133
144
  Provides-Extra: models
145
+ Requires-Dist: crfm-helm[ai21] ; extra == 'models'
146
+ Requires-Dist: crfm-helm[accelerate] ; extra == 'models'
134
147
  Requires-Dist: crfm-helm[aleph-alpha] ; extra == 'models'
135
148
  Requires-Dist: crfm-helm[allenai] ; extra == 'models'
136
149
  Requires-Dist: crfm-helm[amazon] ; extra == 'models'
@@ -141,7 +154,6 @@ Requires-Dist: crfm-helm[mistral] ; extra == 'models'
141
154
  Requires-Dist: crfm-helm[openai] ; extra == 'models'
142
155
  Requires-Dist: crfm-helm[reka] ; extra == 'models'
143
156
  Requires-Dist: crfm-helm[together] ; extra == 'models'
144
- Requires-Dist: crfm-helm[tsinghua] ; extra == 'models'
145
157
  Requires-Dist: crfm-helm[yandex] ; extra == 'models'
146
158
  Requires-Dist: crfm-helm[openvino] ; extra == 'models'
147
159
  Provides-Extra: mongo
@@ -157,11 +169,11 @@ Requires-Dist: colorcet ~=3.0.1 ; extra == 'plots'
157
169
  Requires-Dist: matplotlib ~=3.6.0 ; extra == 'plots'
158
170
  Requires-Dist: seaborn ~=0.11.0 ; extra == 'plots'
159
171
  Provides-Extra: proxy-server
160
- Requires-Dist: gunicorn ~=20.1.0 ; extra == 'proxy-server'
172
+ Requires-Dist: gunicorn >=20.1 ; extra == 'proxy-server'
161
173
  Provides-Extra: reka
162
174
  Requires-Dist: reka-api ~=2.0.0 ; extra == 'reka'
163
175
  Provides-Extra: scenarios
164
- Requires-Dist: gdown ~=4.4.0 ; extra == 'scenarios'
176
+ Requires-Dist: gdown ~=5.1 ; extra == 'scenarios'
165
177
  Requires-Dist: sympy ~=1.11.1 ; extra == 'scenarios'
166
178
  Requires-Dist: xlrd ~=2.0.1 ; extra == 'scenarios'
167
179
  Provides-Extra: slurm
@@ -170,22 +182,20 @@ Provides-Extra: summarization
170
182
  Requires-Dist: summ-eval ~=0.892 ; extra == 'summarization'
171
183
  Provides-Extra: together
172
184
  Requires-Dist: together ~=1.1 ; extra == 'together'
173
- Provides-Extra: tsinghua
174
- Requires-Dist: icetk ~=0.0.4 ; extra == 'tsinghua'
175
185
  Provides-Extra: unitxt
176
186
  Requires-Dist: evaluate ~=0.4.1 ; extra == 'unitxt'
177
187
  Provides-Extra: vlm
178
188
  Requires-Dist: crfm-helm[openai] ; extra == 'vlm'
179
189
  Requires-Dist: einops ~=0.7.0 ; extra == 'vlm'
180
190
  Requires-Dist: einops-exts ~=0.0.4 ; extra == 'vlm'
181
- Requires-Dist: open-clip-torch ~=2.24.0 ; extra == 'vlm'
182
- Requires-Dist: torch ~=2.1.2 ; extra == 'vlm'
191
+ Requires-Dist: open-clip-torch ~=2.24 ; extra == 'vlm'
192
+ Requires-Dist: torch ~=2.1 ; extra == 'vlm'
183
193
  Requires-Dist: transformers-stream-generator ~=0.0.4 ; extra == 'vlm'
184
194
  Requires-Dist: scipy ~=1.10 ; extra == 'vlm'
185
195
  Requires-Dist: torchvision <3.0.0,>=0.14.1 ; extra == 'vlm'
186
196
  Requires-Dist: crfm-helm[reka] ; extra == 'vlm'
187
197
  Requires-Dist: crfm-helm[images] ; extra == 'vlm'
188
- Requires-Dist: crfm-helm[image2structure] ; extra == 'vlm'
198
+ Requires-Dist: crfm-helm[image2struct] ; extra == 'vlm'
189
199
  Requires-Dist: pycocoevalcap ~=1.2 ; extra == 'vlm'
190
200
  Provides-Extra: yandex
191
201
  Requires-Dist: sentencepiece ~=0.1.97 ; extra == 'yandex'
@@ -209,40 +219,16 @@ Welcome! The **`crfm-helm`** Python package contains code used in the **Holistic
209
219
 
210
220
  To get started, refer to [the documentation on Read the Docs](https://crfm-helm.readthedocs.io/) for how to install and run the package.
211
221
 
212
- ## Directory Structure
222
+ ## Papers
213
223
 
214
- The directory structure for this repo is as follows
224
+ This repository contains code used to produce results for the following papers:
215
225
 
216
- ```
217
- ├── docs # MD used to generate readthedocs
218
-
219
- ├── scripts # Python utility scripts for HELM
220
- │ ├── cache
221
- │ ├── data_overlap # Calculate train test overlap
222
- │ │ ├── common
223
- │ │ ├── scenarios
224
- │ │ └── test
225
- │ ├── efficiency
226
- │ ├── fact_completion
227
- │ ├── offline_eval
228
- │ └── scale
229
- └── src
230
- ├── helm # Benchmarking Scripts for HELM
231
- │ │
232
- │ ├── benchmark # Main Python code for running HELM
233
- │ │ │
234
- │ │ └── static # Current JS (Jquery) code for rendering front-end
235
- │ │ │
236
- │ │ └── ...
237
- │ │
238
- │ ├── common # Additional Python code for running HELM
239
- │ │
240
- │ └── proxy # Python code for external web requests
241
-
242
- └── helm-frontend # New React Front-end
243
- ```
226
+ - Holistic Evaluation of Vision-Language Models (VHELM) - paper (TBD), [leaderboard](https://crfm.stanford.edu/helm/vhelm/latest/), [documentation](https://crfm-helm.readthedocs.io/en/latest/vhelm/)
227
+ - Holistic Evaluation of Text-To-Image Models (HEIM) - [paper](https://arxiv.org/abs/2311.04287), [leaderboard](https://crfm.stanford.edu/helm/heim/latest/), [documentation](https://crfm-helm.readthedocs.io/en/latest/heim/)
228
+
229
+ The HELM Python package can be used to reproduce the published model evaluation results from these paper. To get started, refer to the documentation links above for the corresponding paper, or the [main Reproducing Leaderboards documentation](https://crfm-helm.readthedocs.io/en/latest/reproducing_leaderboards/).
244
230
 
245
- # Holistic Evaluation of Text-To-Image Models
231
+ ## Holistic Evaluation of Text-To-Image Models
246
232
 
247
233
  <img src="https://github.com/stanford-crfm/helm/raw/heim/src/helm/benchmark/static/heim/images/heim-logo.png" alt="" width="800"/>
248
234
 
@@ -275,6 +261,22 @@ demonstrating strengths in different aspects.
275
261
  This repository contains the code used to produce the [results on the website](https://crfm.stanford.edu/heim/latest/)
276
262
  and [paper](https://arxiv.org/abs/2311.04287).
277
263
 
264
+ ## Citation
265
+
266
+ If you use this software in your research, please cite the [Holistic Evaluation of Language Models paper](https://openreview.net/forum?id=iO4LZibEqW) as below.
267
+
268
+ ```bibtex
269
+ @article{
270
+ liang2023holistic,
271
+ title={Holistic Evaluation of Language Models},
272
+ author={Percy Liang and Rishi Bommasani and Tony Lee and Dimitris Tsipras and Dilara Soylu and Michihiro Yasunaga and Yian Zhang and Deepak Narayanan and Yuhuai Wu and Ananya Kumar and Benjamin Newman and Binhang Yuan and Bobby Yan and Ce Zhang and Christian Alexander Cosgrove and Christopher D Manning and Christopher Re and Diana Acosta-Navas and Drew Arad Hudson and Eric Zelikman and Esin Durmus and Faisal Ladhak and Frieda Rong and Hongyu Ren and Huaxiu Yao and Jue WANG and Keshav Santhanam and Laurel Orr and Lucia Zheng and Mert Yuksekgonul and Mirac Suzgun and Nathan Kim and Neel Guha and Niladri S. Chatterji and Omar Khattab and Peter Henderson and Qian Huang and Ryan Andrew Chi and Sang Michael Xie and Shibani Santurkar and Surya Ganguli and Tatsunori Hashimoto and Thomas Icard and Tianyi Zhang and Vishrav Chaudhary and William Wang and Xuechen Li and Yifan Mai and Yuhui Zhang and Yuta Koreeda},
273
+ journal={Transactions on Machine Learning Research},
274
+ issn={2835-8856},
275
+ year={2023},
276
+ url={https://openreview.net/forum?id=iO4LZibEqW},
277
+ note={Featured Certification, Expert Certification}
278
+ }
279
+ ```
278
280
  # Tutorial
279
281
 
280
282
  This tutorial will explain how to use the HELM command line tools to run benchmarks, aggregate statistics, and visualize results.
@@ -285,34 +287,26 @@ We will run two runs using the `mmlu` scenario on the `openai/gpt2` model. The `
285
287
 
286
288
  `helm-run` is a command line tool for running benchmarks.
287
289
 
288
- To run this benchmark using the HELM command-line tools, we need to specify **run spec descriptions** that describes the desired runs. For this example, the run spec descriptions are `mmlu:subject=anatomy,model=openai/gpt2` (for anatomy) and `mmlu:subject=philosophy,model=openai/gpt2` (for philosophy).
289
-
290
- Next, we need to create a **run spec configuration file** containing these run spec descriptions. A run spec configuration file is a text file containing `RunEntries` serialized to JSON, where each entry in `RunEntries` contains a run spec description. The `description` field of each entry should be a **run spec description**. Create a text file named `run_entries.conf` with the following contents:
291
-
292
- ```
293
- entries: [
294
- {description: "mmlu:subject=anatomy,model=openai/gpt2", priority: 1},
295
- {description: "mmlu:subject=philosophy,model=openai/gpt2", priority: 1},
296
- ]
297
- ```
290
+ To run this benchmark using the HELM command-line tools, we need to specify **run entries** that describes the desired runs. For this example, the run entries are `mmlu:subject=anatomy,model=openai/gpt2` (for anatomy) and `mmlu:subject=philosophy,model=openai/gpt2` (for philosophy).
298
291
 
299
- We will now use `helm-run` to execute the runs that have been specified in this run spec configuration file. Run this command:
292
+ We will now use `helm-run` to execute the runs. Run this command:
300
293
 
301
- ```
302
- helm-run --conf-paths run_entries.conf --suite v1 --max-eval-instances 10
294
+ ```sh
295
+ helm-run --run-entries mmlu:subject=anatomy,model=openai/gpt2 mmlu:subject=philosophy,model=openai/gpt2 --suite my-suite --max-eval-instances 10
303
296
  ```
304
297
 
305
- The meaning of the additional arguments are as follows:
298
+ The meaning of the arguments are as follows:
306
299
 
300
+ - `--run-entries` specifies the run entries from the desired runs.
307
301
  - `--suite` specifies a subdirectory under the output directory in which all the output will be placed.
308
- - `--max-eval-instances` limits evaluation to only the first *N* inputs (i.e. instances) from the benchmark.
302
+ - `--max-eval-instances` limits evaluation to only *N* instances (i.e. items) from the benchmark, using a randomly shuffled order of instances.
309
303
 
310
304
  `helm-run` creates an environment directory environment and an output directory by default.
311
305
 
312
306
  - The environment directory is `prod_env/` by default and can be set using `--local-path`. Credentials for making API calls should be added to a `credentials.conf` file in this directory.
313
307
  - The output directory is `benchmark_output/` by default and can be set using `--output-path`.
314
308
 
315
- After running this command, navigate to the `benchmark_output/runs/v1/` directory. This should contain a two sub-directories named `mmlu:subject=anatomy,model=openai_gpt2` and `mmlu:subject=philosophy,model=openai_gpt2`. Note that the names of these sub-directories is based on the run spec descriptions we used earlier, but with `/` replaced with `_`.
309
+ After running this command, navigate to the `benchmark_output/runs/my-suite/` directory. This should contain a two sub-directories named `mmlu:subject=anatomy,model=openai_gpt2` and `mmlu:subject=philosophy,model=openai_gpt2`. Note that the names of these sub-directories is based on the run entries we used earlier, but with `/` replaced with `_`.
316
310
 
317
311
  Each output sub-directory will contain several JSON files that were generated during the corresponding run:
318
312
 
@@ -322,60 +316,35 @@ Each output sub-directory will contain several JSON files that were generated du
322
316
  - `per_instance_stats.json` contains a serialized list of `PerInstanceStats`, which contains the statistics produced for the metrics for each instance (i.e. input).
323
317
  - `stats.json` contains a serialized list of `PerInstanceStats`, which contains the statistics produced for the metrics, aggregated across all instances (i.e. inputs).
324
318
 
325
- `helm-run` provides additional arguments that can be used to filter out `--models-to-run`, `--groups-to-run` and `--priority`. It can be convenient to create a large `run_entries.conf` file containing every run spec description of interest, and then use these flags to filter down the RunSpecs to actually run. As an example, the main `run_specs.conf` file used for the HELM benchmarking paper can be found [here](https://github.com/stanford-crfm/helm/blob/main/src/helm/benchmark/presentation/run_specs.conf).
326
-
327
- **Using model or model_deployment:** Some models have several deployments (for exmaple `eleutherai/gpt-j-6b` is deployed under `huggingface/gpt-j-6b`, `gooseai/gpt-j-6b` and `together/gpt-j-6b`). Since the results can differ depending on the deployment, we provide a way to specify the deployment instead of the model. Instead of using `model=eleutherai/gpt-g-6b`, use `model_deployment=huggingface/gpt-j-6b`. If you do not, a deployment will be arbitrarily chosen. This can still be used for models that have a single deployment and is a good practice to follow to avoid any ambiguity.
328
-
329
319
  ## Using `helm-summarize`
330
320
 
331
321
  The `helm-summarize` reads the output files of `helm-run` and computes aggregate statistics across runs. Run the following:
332
322
 
333
- ```
334
- helm-summarize --suite v1
323
+ ```sh
324
+ helm-summarize --suite my-suite
335
325
  ```
336
326
 
337
- This reads the pre-existing files in `benchmark_output/runs/v1/` that were written by `helm-run` previously, and writes the following new files back to `benchmark_output/runs/v1/`:
327
+ This reads the pre-existing files in `benchmark_output/runs/my-suite/` that were written by `helm-run` previously, and writes the following new files back to `benchmark_output/runs/my-suite/`:
338
328
 
339
329
  - `summary.json` contains a serialized `ExecutiveSummary` with a date and suite name.
340
- - `run_specs.json` contains the run spec descriptions for all the runs.
330
+ - `run_specs.json` contains the run entries for all the runs.
341
331
  - `runs.json` contains serialized list of `Run`, which contains the run path, run spec and adapter spec and statistics for each run.
342
332
  - `groups.json` contains a serialized list of `Table`, each containing information about groups in a group category.
343
333
  - `groups_metadata.json` contains a list of all the groups along with a human-readable description and a taxonomy.
344
334
 
345
- Additionally, for each group and group-relavent metric, it will output a pair of files: `benchmark_output/runs/v1/groups/latex/<group_name>_<metric_name>.tex` and `benchmark_output/runs/v1/groups/json/<group_name>_<metric_name>.json`. These files contain the statistics for that metric from each run within the group.
346
-
347
- <!--
348
- # TODO(#1441): Enable plots
349
-
350
- ## Using `helm-create-plots`
351
-
352
- The `helm-create-plots` reads the `groups` directory created by `helm-summarize` and creates plots, equivalent to those use in the HELM paper. Run the following:
353
-
354
- ```
355
- helm-create-plots --suite v1
356
- ```
357
-
358
- This reads the pre-existing files in `benchmark_output/runs/v1/groups` that were written by `helm-summarize` previously,
359
- and creates plots (`.png` or `.pdf`) at `benchmark_output/runs/v1/plots`.
360
-
361
- -->
335
+ Additionally, for each group and group-relavent metric, it will output a pair of files: `benchmark_output/runs/my-suite/groups/latex/<group_name>_<metric_name>.tex` and `benchmark_output/runs/my-suite/groups/json/<group_name>_<metric_name>.json`. These files contain the statistics for that metric from each run within the group.
362
336
 
363
337
  ## Using `helm-server`
364
338
 
365
339
  Finally, the `helm-server` command launches a web server to visualize the output files of `helm-run` and `helm-benchmark`. Run:
366
340
 
341
+ ```sh
342
+ helm-server --suite my-suite
367
343
  ```
368
- helm-server
369
- ```
370
-
371
- Open a browser and go to http://localhost:8000/ to view the visualization. You should see a similar view as [live website for the paper](https://crfm.stanford.edu/helm/v1.0/), but for the data from your benchmark runs. The website has three main sections:
372
-
373
- - **Models** contains a list of available models.
374
- - **Scenarios** contains a list of available scenarios.
375
- - **Results** contains results from the runs, organized into groups and categories of groups.
376
- - **Raw Runs** contains a searchable list of runs.
377
344
 
378
- ## Other Tips
345
+ Open a browser and go to http://localhost:8000/ to view the visualization. You should see a similar view as [live website for the paper](https://crfm.stanford.edu/helm/classic/latest/), but for the data from your benchmark runs. The website has the following sections accessible from the top menu bar:
379
346
 
380
- - The suite name can be used as a versioning mechanism to separate runs using different versions of scenarios or models.
381
- - Tools such as [`jq`](https://stedolan.github.io/jq/) are useful for examining the JSON output files on the command line.
347
+ - **Leaderboards** contains the leaderboards with aggregate metrics.
348
+ - **Models** contains a list of models and their descriptions
349
+ - **Scenarios** contains a list of scenarios and their descriptions.
350
+ - **Predictions** contains a searchable list of runs.