crfm-helm 0.5.4__py3-none-any.whl → 0.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (580) hide show
  1. crfm_helm-0.5.5.dist-info/METADATA +413 -0
  2. crfm_helm-0.5.5.dist-info/RECORD +894 -0
  3. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info}/WHEEL +1 -1
  4. helm/benchmark/adaptation/adapter_spec.py +13 -1
  5. helm/benchmark/adaptation/adapters/adapter_factory.py +15 -1
  6. helm/benchmark/adaptation/adapters/binary_ranking_adapter.py +1 -1
  7. helm/benchmark/adaptation/adapters/chat_adapter.py +49 -0
  8. helm/benchmark/adaptation/adapters/ehr_instruction_adapter.py +108 -0
  9. helm/benchmark/adaptation/adapters/generation_adapter.py +1 -1
  10. helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +1 -1
  11. helm/benchmark/adaptation/adapters/language_modeling_adapter.py +1 -1
  12. helm/benchmark/adaptation/adapters/multimodal/generation_multimodal_adapter.py +4 -2
  13. helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +1 -1
  14. helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +1 -1
  15. helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +4 -2
  16. helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +1 -1
  17. helm/benchmark/adaptation/adapters/multiple_choice_calibrated_adapter.py +1 -1
  18. helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +2 -2
  19. helm/benchmark/adaptation/adapters/multiple_choice_joint_chain_of_thought_adapter.py +87 -0
  20. helm/benchmark/adaptation/adapters/multiple_choice_separate_adapter.py +1 -1
  21. helm/benchmark/adaptation/adapters/test_generation_adapter.py +3 -3
  22. helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +2 -2
  23. helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +2 -2
  24. helm/benchmark/adaptation/common_adapter_specs.py +69 -4
  25. helm/benchmark/adaptation/prompt.py +1 -1
  26. helm/benchmark/annotation/aci_bench_annotator.py +95 -0
  27. helm/benchmark/annotation/air_bench_annotator.py +20 -5
  28. helm/benchmark/annotation/annotator.py +5 -0
  29. helm/benchmark/annotation/annotator_factory.py +3 -20
  30. helm/benchmark/annotation/autobencher_capabilities_annotator.py +107 -0
  31. helm/benchmark/annotation/autobencher_safety_annotator.py +98 -0
  32. helm/benchmark/annotation/bigcodebench_annotator.py +108 -0
  33. helm/benchmark/annotation/bird_sql_annotator.py +58 -0
  34. helm/benchmark/annotation/chw_care_plan_annotator.py +98 -0
  35. helm/benchmark/annotation/czech_bank_qa_annotator.py +78 -0
  36. helm/benchmark/annotation/dischargeme_annotator.py +107 -0
  37. helm/benchmark/annotation/ehr_sql_annotator.py +87 -0
  38. helm/benchmark/annotation/helpdesk_call_summarization_annotator.py +131 -0
  39. helm/benchmark/annotation/image2struct/image_compiler_annotator.py +6 -1
  40. helm/benchmark/annotation/live_qa_annotator.py +1 -1
  41. helm/benchmark/annotation/med_dialog_annotator.py +99 -0
  42. helm/benchmark/annotation/medalign_annotator.py +100 -0
  43. helm/benchmark/annotation/medi_qa_annotator.py +98 -0
  44. helm/benchmark/annotation/medication_qa_annotator.py +87 -63
  45. helm/benchmark/annotation/mental_health_annotator.py +98 -0
  46. helm/benchmark/annotation/mimic_rrs_annotator.py +100 -0
  47. helm/benchmark/annotation/model_as_judge.py +218 -6
  48. helm/benchmark/annotation/mtsamples_procedures_annotator.py +98 -0
  49. helm/benchmark/annotation/mtsamples_replicate_annotator.py +101 -0
  50. helm/benchmark/annotation/omni_math/gpt_evaluation_template.txt +152 -0
  51. helm/benchmark/annotation/omni_math/gpt_evaluation_zero_shot_template.txt +36 -0
  52. helm/benchmark/annotation/omni_math_annotator.py +132 -0
  53. helm/benchmark/annotation/spider_annotator.py +18 -0
  54. helm/benchmark/annotation/starr_patient_instructions_annotator.py +98 -0
  55. helm/benchmark/annotation/wildbench/eval_template.pairwise.v2.md +75 -0
  56. helm/benchmark/annotation/wildbench/eval_template.score.v2.md +66 -0
  57. helm/benchmark/annotation/wildbench_annotator.py +119 -0
  58. helm/benchmark/annotation_executor.py +35 -15
  59. helm/benchmark/augmentations/cleva_perturbation.py +9 -8
  60. helm/benchmark/augmentations/contraction_expansion_perturbation.py +2 -2
  61. helm/benchmark/augmentations/contrast_sets_perturbation.py +2 -2
  62. helm/benchmark/augmentations/dialect_perturbation.py +4 -5
  63. helm/benchmark/augmentations/extra_space_perturbation.py +2 -2
  64. helm/benchmark/augmentations/filler_words_perturbation.py +2 -2
  65. helm/benchmark/augmentations/gender_perturbation.py +2 -2
  66. helm/benchmark/augmentations/lowercase_perturbation.py +2 -2
  67. helm/benchmark/augmentations/mild_mix_perturbation.py +6 -6
  68. helm/benchmark/augmentations/misspelling_perturbation.py +2 -2
  69. helm/benchmark/augmentations/person_name_perturbation.py +4 -5
  70. helm/benchmark/augmentations/perturbation.py +1 -1
  71. helm/benchmark/augmentations/space_perturbation.py +2 -2
  72. helm/benchmark/augmentations/suffix_perturbation.py +2 -2
  73. helm/benchmark/augmentations/synonym_perturbation.py +4 -3
  74. helm/benchmark/augmentations/test_perturbation.py +16 -13
  75. helm/benchmark/augmentations/translate_perturbation.py +2 -2
  76. helm/benchmark/augmentations/typos_perturbation.py +2 -2
  77. helm/benchmark/data_preprocessor.py +2 -2
  78. helm/benchmark/huggingface_registration.py +2 -7
  79. helm/benchmark/metrics/aci_bench_metrics.py +34 -0
  80. helm/benchmark/metrics/basic_metrics.py +6 -6
  81. helm/benchmark/metrics/bbq_metrics.py +2 -2
  82. helm/benchmark/metrics/bias_metrics.py +12 -3
  83. helm/benchmark/metrics/bigcodebench_metrics.py +25 -0
  84. helm/benchmark/metrics/bird_sql_metrics.py +28 -0
  85. helm/benchmark/metrics/chw_care_plan_metrics.py +34 -0
  86. helm/benchmark/metrics/classification_metrics.py +76 -12
  87. helm/benchmark/metrics/cleva_harms_metrics.py +8 -7
  88. helm/benchmark/metrics/code_metrics.py +5 -5
  89. helm/benchmark/metrics/comet_metric.py +125 -0
  90. helm/benchmark/metrics/common_metric_specs.py +9 -2
  91. helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +72 -0
  92. helm/benchmark/metrics/copyright_metrics.py +4 -4
  93. helm/benchmark/metrics/czech_bank_qa_metrics.py +29 -0
  94. helm/benchmark/metrics/decodingtrust_fairness_metrics.py +2 -2
  95. helm/benchmark/metrics/decodingtrust_privacy_metrics.py +2 -2
  96. helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +2 -2
  97. helm/benchmark/metrics/dischargeme_metrics.py +34 -0
  98. helm/benchmark/metrics/disinformation_metrics.py +4 -4
  99. helm/benchmark/metrics/dry_run_metrics.py +5 -5
  100. helm/benchmark/metrics/efficiency_metrics.py +3 -3
  101. helm/benchmark/metrics/ehr_sql_metrics.py +103 -0
  102. helm/benchmark/metrics/evaluate_instances_metric.py +3 -3
  103. helm/benchmark/metrics/evaluate_reference_metrics.py +144 -16
  104. helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +103 -0
  105. helm/benchmark/metrics/gpt4_audio_critique_metrics.py +167 -0
  106. helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +36 -0
  107. helm/benchmark/metrics/ifeval/__init__.py +0 -0
  108. helm/benchmark/metrics/ifeval/instructions.py +1574 -0
  109. helm/benchmark/metrics/ifeval/instructions_registry.py +182 -0
  110. helm/benchmark/metrics/ifeval/instructions_registry.pyi +3 -0
  111. helm/benchmark/metrics/ifeval/instructions_util.py +153 -0
  112. helm/benchmark/metrics/ifeval_metrics.py +55 -0
  113. helm/benchmark/metrics/image_generation/aesthetics_metrics.py +1 -1
  114. helm/benchmark/metrics/image_generation/detection_metrics.py +1 -1
  115. helm/benchmark/metrics/image_generation/detectors/vitdet.py +1 -1
  116. helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +1 -1
  117. helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +1 -1
  118. helm/benchmark/metrics/image_generation/nsfw_metrics.py +1 -1
  119. helm/benchmark/metrics/image_generation/q16/test_q16.py +3 -1
  120. helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +1 -1
  121. helm/benchmark/metrics/image_generation/skin_tone_metrics.py +2 -2
  122. helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +1 -1
  123. helm/benchmark/metrics/image_generation/watermark_metrics.py +1 -1
  124. helm/benchmark/metrics/instruction_following_critique_metrics.py +4 -4
  125. helm/benchmark/metrics/language_modeling_metrics.py +4 -4
  126. helm/benchmark/metrics/machine_translation_metrics.py +2 -2
  127. helm/benchmark/metrics/med_dialog_metrics.py +34 -0
  128. helm/benchmark/metrics/medalign_metrics.py +34 -0
  129. helm/benchmark/metrics/medcalc_bench_metrics.py +124 -0
  130. helm/benchmark/metrics/medec_metrics.py +101 -0
  131. helm/benchmark/metrics/medi_qa_metrics.py +34 -0
  132. helm/benchmark/metrics/medication_qa_metrics.py +15 -4
  133. helm/benchmark/metrics/mental_health_metrics.py +34 -0
  134. helm/benchmark/metrics/metric.py +3 -3
  135. helm/benchmark/metrics/mimic_rrs_metrics.py +34 -0
  136. helm/benchmark/metrics/mimiciv_billing_code_metrics.py +96 -0
  137. helm/benchmark/metrics/mtsamples_procedures_metrics.py +34 -0
  138. helm/benchmark/metrics/mtsamples_replicate_metrics.py +34 -0
  139. helm/benchmark/metrics/nltk_helper.py +32 -0
  140. helm/benchmark/metrics/numeracy_metrics.py +4 -4
  141. helm/benchmark/metrics/omni_math_metrics.py +32 -0
  142. helm/benchmark/metrics/output_processing_metric.py +60 -0
  143. helm/benchmark/metrics/output_processors.py +15 -0
  144. helm/benchmark/metrics/paraphrase_generation_metrics.py +2 -2
  145. helm/benchmark/metrics/ranking_metrics.py +3 -3
  146. helm/benchmark/metrics/reference_metric.py +3 -3
  147. helm/benchmark/metrics/{bhasa_metrics.py → seahelm_metrics.py} +3 -3
  148. helm/benchmark/metrics/seahelm_metrics_specs.py +10 -0
  149. helm/benchmark/metrics/spider_metrics.py +7 -0
  150. helm/benchmark/metrics/starr_patient_instructions_metrics.py +34 -0
  151. helm/benchmark/metrics/statistic.py +1 -1
  152. helm/benchmark/metrics/summac/model_summac.py +1 -1
  153. helm/benchmark/metrics/summarization_critique_metrics.py +4 -4
  154. helm/benchmark/metrics/summarization_metrics.py +19 -9
  155. helm/benchmark/metrics/test_bias_metrics.py +5 -1
  156. helm/benchmark/metrics/test_classification_metrics.py +140 -68
  157. helm/benchmark/metrics/test_evaluate_reference_metrics.py +15 -0
  158. helm/benchmark/metrics/test_metric.py +1 -1
  159. helm/benchmark/metrics/test_statistic.py +2 -2
  160. helm/benchmark/metrics/tokens/ai21_token_cost_estimator.py +1 -1
  161. helm/benchmark/metrics/tokens/auto_token_cost_estimator.py +6 -6
  162. helm/benchmark/metrics/tokens/cohere_token_cost_estimator.py +1 -1
  163. helm/benchmark/metrics/tokens/free_token_cost_estimator.py +1 -1
  164. helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +1 -1
  165. helm/benchmark/metrics/tokens/openai_token_cost_estimator.py +1 -1
  166. helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +1 -1
  167. helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +1 -1
  168. helm/benchmark/metrics/toxicity_metrics.py +4 -4
  169. helm/benchmark/metrics/unitxt_metrics.py +4 -1
  170. helm/benchmark/metrics/vision_language/image_metrics.py +1 -1
  171. helm/benchmark/metrics/wildbench_metrics.py +34 -0
  172. helm/benchmark/model_metadata_registry.py +16 -0
  173. helm/benchmark/presentation/summarize.py +23 -10
  174. helm/benchmark/presentation/torr_robustness_summarizer.py +178 -0
  175. helm/benchmark/reeval_run.py +203 -0
  176. helm/benchmark/reeval_runner.py +355 -0
  177. helm/benchmark/run.py +8 -17
  178. helm/benchmark/run_expander.py +78 -8
  179. helm/benchmark/run_spec_factory.py +12 -0
  180. helm/benchmark/run_specs/air_bench_run_specs.py +21 -3
  181. helm/benchmark/run_specs/audio_run_specs.py +613 -0
  182. helm/benchmark/run_specs/call_center_run_specs.py +49 -0
  183. helm/benchmark/run_specs/capabilities_run_specs.py +308 -0
  184. helm/benchmark/run_specs/classic_run_specs.py +1 -69
  185. helm/benchmark/run_specs/enem_challenge_specs.py +31 -0
  186. helm/benchmark/run_specs/enterprise_run_specs.py +260 -0
  187. helm/benchmark/run_specs/experimental_run_specs.py +112 -3
  188. helm/benchmark/run_specs/imdb_ptbr_run_specs.py +30 -0
  189. helm/benchmark/run_specs/lite_run_specs.py +2 -2
  190. helm/benchmark/run_specs/long_context_run_specs.py +89 -0
  191. helm/benchmark/run_specs/medhelm_run_specs.py +1155 -0
  192. helm/benchmark/run_specs/mmlu_clinical_afr_run_specs.py +49 -0
  193. helm/benchmark/run_specs/oab_exams_specs.py +32 -0
  194. helm/benchmark/run_specs/safety_run_specs.py +37 -0
  195. helm/benchmark/run_specs/{bhasa_run_specs.py → seahelm_run_specs.py} +44 -44
  196. helm/benchmark/run_specs/sql_run_specs.py +54 -0
  197. helm/benchmark/run_specs/tweetsentbr_run_specs.py +32 -0
  198. helm/benchmark/run_specs/unitxt_run_specs.py +14 -5
  199. helm/benchmark/run_specs/vlm_run_specs.py +75 -2
  200. helm/benchmark/run_specs/winogrande_afr_run_specs.py +47 -0
  201. helm/benchmark/scenarios/aci_bench_scenario.py +120 -0
  202. helm/benchmark/scenarios/air_bench_scenario.py +6 -1
  203. helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +5 -3
  204. helm/benchmark/scenarios/anthropic_red_team_scenario.py +1 -1
  205. helm/benchmark/scenarios/audio_language/__init__.py +0 -0
  206. helm/benchmark/scenarios/audio_language/air_bench_chat_scenario.py +128 -0
  207. helm/benchmark/scenarios/audio_language/air_bench_foundation_scenario.py +154 -0
  208. helm/benchmark/scenarios/audio_language/ami_scenario.py +96 -0
  209. helm/benchmark/scenarios/audio_language/audio_mnist_scenario.py +62 -0
  210. helm/benchmark/scenarios/audio_language/audio_pairs_scenario.py +62 -0
  211. helm/benchmark/scenarios/audio_language/audiocaps_scenario.py +59 -0
  212. helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +152 -0
  213. helm/benchmark/scenarios/audio_language/common_voice_15_scenario.py +99 -0
  214. helm/benchmark/scenarios/audio_language/covost2_scenario.py +163 -0
  215. helm/benchmark/scenarios/audio_language/fleurs_fairness_scenario.py +83 -0
  216. helm/benchmark/scenarios/audio_language/fleurs_scenario.py +312 -0
  217. helm/benchmark/scenarios/audio_language/iemocap_audio_scenario.py +83 -0
  218. helm/benchmark/scenarios/audio_language/librispeech_fairness_scenario.py +96 -0
  219. helm/benchmark/scenarios/audio_language/librispeech_scenario.py +80 -0
  220. helm/benchmark/scenarios/audio_language/meld_audio_scenario.py +113 -0
  221. helm/benchmark/scenarios/audio_language/multilingual_librispeech_scenario.py +80 -0
  222. helm/benchmark/scenarios/audio_language/mustard_scenario.py +142 -0
  223. helm/benchmark/scenarios/audio_language/mutox_scenario.py +254 -0
  224. helm/benchmark/scenarios/audio_language/parade_scenario.py +97 -0
  225. helm/benchmark/scenarios/audio_language/speech_robust_bench_scenario.py +124 -0
  226. helm/benchmark/scenarios/audio_language/vocal_sound_scenario.py +69 -0
  227. helm/benchmark/scenarios/audio_language/voice_jailbreak_attacks_scenario.py +87 -0
  228. helm/benchmark/scenarios/audio_language/voxceleb2_scenario.py +106 -0
  229. helm/benchmark/scenarios/autobencher_capabilities_scenario.py +68 -0
  230. helm/benchmark/scenarios/autobencher_safety_scenario.py +51 -0
  231. helm/benchmark/scenarios/babi_qa_scenario.py +1 -1
  232. helm/benchmark/scenarios/banking77_scenario.py +6 -1
  233. helm/benchmark/scenarios/bbq_scenario.py +1 -1
  234. helm/benchmark/scenarios/big_bench_scenario.py +11 -1
  235. helm/benchmark/scenarios/bigcodebench_scenario.py +58 -0
  236. helm/benchmark/scenarios/bird_sql_scenario.py +94 -0
  237. helm/benchmark/scenarios/bird_sql_scenario_helper.py +118 -0
  238. helm/benchmark/scenarios/blimp_scenario.py +1 -1
  239. helm/benchmark/scenarios/bold_scenario.py +1 -1
  240. helm/benchmark/scenarios/boolq_scenario.py +1 -1
  241. helm/benchmark/scenarios/casehold_scenario.py +79 -0
  242. helm/benchmark/scenarios/chw_care_plan_scenario.py +105 -0
  243. helm/benchmark/scenarios/civil_comments_scenario.py +1 -1
  244. helm/benchmark/scenarios/clear_scenario.py +153 -0
  245. helm/benchmark/scenarios/cleva_scenario.py +2 -2
  246. helm/benchmark/scenarios/code_scenario.py +17 -4
  247. helm/benchmark/scenarios/commonsense_scenario.py +1 -1
  248. helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +97 -0
  249. helm/benchmark/scenarios/copyright_scenario.py +1 -1
  250. helm/benchmark/scenarios/covid_dialog_scenario.py +10 -1
  251. helm/benchmark/scenarios/cti_to_mitre_scenario.py +240 -0
  252. helm/benchmark/scenarios/custom_mcqa_scenario.py +1 -1
  253. helm/benchmark/scenarios/czech_bank_qa_scenario.py +130 -0
  254. helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +1 -1
  255. helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +1 -1
  256. helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +1 -1
  257. helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +1 -1
  258. helm/benchmark/scenarios/dialogue_scenarios.py +13 -2
  259. helm/benchmark/scenarios/dischargeme_scenario.py +157 -0
  260. helm/benchmark/scenarios/disinformation_scenario.py +10 -1
  261. helm/benchmark/scenarios/dyck_language_scenario.py +10 -1
  262. helm/benchmark/scenarios/echr_judgment_classification_scenario.py +113 -0
  263. helm/benchmark/scenarios/ehr_sql_scenario.py +131 -0
  264. helm/benchmark/scenarios/ehrshot_scenario.py +1546 -0
  265. helm/benchmark/scenarios/enem_challenge_scenario.py +58 -0
  266. helm/benchmark/scenarios/entity_data_imputation_scenario.py +11 -1
  267. helm/benchmark/scenarios/entity_matching_scenario.py +12 -2
  268. helm/benchmark/scenarios/financial_phrasebank_scenario.py +94 -0
  269. helm/benchmark/scenarios/gold_commodity_news_scenario.py +124 -0
  270. helm/benchmark/scenarios/gpqa_scenario.py +80 -0
  271. helm/benchmark/scenarios/grammar_scenario.py +2 -2
  272. helm/benchmark/scenarios/gsm_scenario.py +10 -1
  273. helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +50 -0
  274. helm/benchmark/scenarios/harm_bench_scenario.py +1 -1
  275. helm/benchmark/scenarios/headqa_scenario.py +131 -0
  276. helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +37 -0
  277. helm/benchmark/scenarios/ice_scenario.py +8 -4
  278. helm/benchmark/scenarios/ifeval_scenario.py +53 -0
  279. helm/benchmark/scenarios/imdb_ptbr_scenario.py +60 -0
  280. helm/benchmark/scenarios/imdb_scenario.py +11 -2
  281. helm/benchmark/scenarios/infinite_bench_sum_scenario.py +82 -0
  282. helm/benchmark/scenarios/interactive_qa_mmlu_scenario.py +2 -2
  283. helm/benchmark/scenarios/koala_scenario.py +1 -1
  284. helm/benchmark/scenarios/legal_contract_summarization_scenario.py +129 -0
  285. helm/benchmark/scenarios/legal_opinion_sentiment_classification_scenario.py +77 -0
  286. helm/benchmark/scenarios/legal_summarization_scenario.py +11 -1
  287. helm/benchmark/scenarios/legal_support_scenario.py +11 -1
  288. helm/benchmark/scenarios/legalbench_scenario.py +22 -3
  289. helm/benchmark/scenarios/lex_glue_scenario.py +12 -2
  290. helm/benchmark/scenarios/lextreme_scenario.py +11 -1
  291. helm/benchmark/scenarios/live_qa_scenario.py +1 -1
  292. helm/benchmark/scenarios/lm_entry_scenario.py +1 -1
  293. helm/benchmark/scenarios/lsat_qa_scenario.py +1 -1
  294. helm/benchmark/scenarios/math_scenario.py +9 -1
  295. helm/benchmark/scenarios/me_q_sum_scenario.py +10 -1
  296. helm/benchmark/scenarios/med_dialog_scenario.py +22 -24
  297. helm/benchmark/scenarios/med_mcqa_scenario.py +10 -1
  298. helm/benchmark/scenarios/med_paragraph_simplification_scenario.py +10 -1
  299. helm/benchmark/scenarios/med_qa_scenario.py +10 -1
  300. helm/benchmark/scenarios/medalign_scenario.py +88 -0
  301. helm/benchmark/scenarios/medalign_scenario_helper.py +429 -0
  302. helm/benchmark/scenarios/medbullets_scenario.py +140 -0
  303. helm/benchmark/scenarios/medcalc_bench_scenario.py +125 -0
  304. helm/benchmark/scenarios/medec_scenario.py +120 -0
  305. helm/benchmark/scenarios/medhallu_scenario.py +66 -0
  306. helm/benchmark/scenarios/medi_qa_scenario.py +105 -0
  307. helm/benchmark/scenarios/medication_qa_scenario.py +2 -2
  308. helm/benchmark/scenarios/mental_health_scenario.py +112 -0
  309. helm/benchmark/scenarios/mimic_bhc_scenario.py +98 -0
  310. helm/benchmark/scenarios/mimic_rrs_scenario.py +89 -0
  311. helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +71 -0
  312. helm/benchmark/scenarios/mmlu_clinical_afr_scenario.py +74 -0
  313. helm/benchmark/scenarios/mmlu_pro_scenario.py +95 -0
  314. helm/benchmark/scenarios/mmlu_scenario.py +11 -1
  315. helm/benchmark/scenarios/msmarco_scenario.py +1 -1
  316. helm/benchmark/scenarios/mtsamples_procedures_scenario.py +141 -0
  317. helm/benchmark/scenarios/mtsamples_replicate_scenario.py +141 -0
  318. helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +271 -0
  319. helm/benchmark/scenarios/narrativeqa_scenario.py +1 -1
  320. helm/benchmark/scenarios/natural_qa_scenario.py +1 -1
  321. helm/benchmark/scenarios/newsqa_scenario.py +1 -1
  322. helm/benchmark/scenarios/numeracy_scenario.py +10 -1
  323. helm/benchmark/scenarios/oab_exams_scenario.py +57 -0
  324. helm/benchmark/scenarios/omni_math_scenario.py +53 -0
  325. helm/benchmark/scenarios/open_assistant_scenario.py +11 -2
  326. helm/benchmark/scenarios/opinions_qa_scenario.py +1 -1
  327. helm/benchmark/scenarios/pubmed_qa_scenario.py +54 -43
  328. helm/benchmark/scenarios/quac_scenario.py +10 -1
  329. helm/benchmark/scenarios/race_based_med_scenario.py +142 -0
  330. helm/benchmark/scenarios/raft_scenario.py +17 -2
  331. helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +1 -1
  332. helm/benchmark/scenarios/ruler_qa_scenario_helper.py +171 -0
  333. helm/benchmark/scenarios/ruler_qa_scenarios.py +88 -0
  334. helm/benchmark/scenarios/scenario.py +9 -1
  335. helm/benchmark/scenarios/{bhasa_scenario.py → seahelm_scenario.py} +7 -2
  336. helm/benchmark/scenarios/self_instruct_scenario.py +1 -1
  337. helm/benchmark/scenarios/shc_bmt_scenario.py +69 -0
  338. helm/benchmark/scenarios/shc_cdi_scenario.py +70 -0
  339. helm/benchmark/scenarios/shc_conf_scenario.py +70 -0
  340. helm/benchmark/scenarios/shc_ent_scenario.py +72 -0
  341. helm/benchmark/scenarios/shc_gip_scenario.py +66 -0
  342. helm/benchmark/scenarios/shc_ptbm_scenario.py +76 -0
  343. helm/benchmark/scenarios/shc_sei_scenario.py +89 -0
  344. helm/benchmark/scenarios/shc_sequoia_scenario.py +69 -0
  345. helm/benchmark/scenarios/simple_safety_tests_scenario.py +1 -1
  346. helm/benchmark/scenarios/spider_scenario.py +91 -0
  347. helm/benchmark/scenarios/starr_patient_instructions_scenario.py +90 -0
  348. helm/benchmark/scenarios/summarization_scenario.py +11 -1
  349. helm/benchmark/scenarios/sumosum_scenario.py +157 -0
  350. helm/benchmark/scenarios/synthetic_efficiency_scenario.py +1 -1
  351. helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +11 -1
  352. helm/benchmark/scenarios/synthetic_reasoning_scenario.py +11 -1
  353. helm/benchmark/scenarios/test_bigcodebench_scenario.py +26 -0
  354. helm/benchmark/scenarios/test_czech_bank_qa_scenario.py +18 -0
  355. helm/benchmark/scenarios/test_enem_challenge_scenario.py +53 -0
  356. helm/benchmark/scenarios/test_ewok_scenario.py +6 -2
  357. helm/benchmark/scenarios/test_gold_commodity_news_scenario.py +18 -0
  358. helm/benchmark/scenarios/test_gpqa_scenario.py +44 -0
  359. helm/benchmark/scenarios/test_ifeval_scenario.py +36 -0
  360. helm/benchmark/scenarios/test_imdb_ptbr_scenario.py +27 -0
  361. helm/benchmark/scenarios/test_infinite_bench_sum_scenario.py +46 -0
  362. helm/benchmark/scenarios/test_math_scenario.py +1 -0
  363. helm/benchmark/scenarios/test_mmlu_clinical_afr_scenario.py +21 -0
  364. helm/benchmark/scenarios/test_mmlu_pro_scenario.py +53 -0
  365. helm/benchmark/scenarios/test_oab_exams_scenario.py +51 -0
  366. helm/benchmark/scenarios/test_omni_math_scenario.py +27 -0
  367. helm/benchmark/scenarios/test_tweetsentbr_scenario.py +24 -0
  368. helm/benchmark/scenarios/test_wildbench_scenario.py +15 -0
  369. helm/benchmark/scenarios/test_winogrande_afr_scenario.py +19 -0
  370. helm/benchmark/scenarios/thai_exam_scenario.py +10 -1
  371. helm/benchmark/scenarios/the_pile_scenario.py +1 -1
  372. helm/benchmark/scenarios/truthful_qa_scenario.py +10 -1
  373. helm/benchmark/scenarios/tweetsentbr_scenario.py +66 -0
  374. helm/benchmark/scenarios/twitter_aae_scenario.py +1 -1
  375. helm/benchmark/scenarios/unitxt_scenario.py +8 -2
  376. helm/benchmark/scenarios/verifiability_judgment_scenario.py +1 -1
  377. helm/benchmark/scenarios/vicuna_scenario.py +1 -1
  378. helm/benchmark/scenarios/vision_language/blink_scenario.py +140 -0
  379. helm/benchmark/scenarios/vision_language/mm_star_scenario.py +95 -0
  380. helm/benchmark/scenarios/vision_language/vqa_rad_scenario.py +88 -0
  381. helm/benchmark/scenarios/wikifact_scenario.py +11 -1
  382. helm/benchmark/scenarios/wikitext_103_scenario.py +1 -1
  383. helm/benchmark/scenarios/wildbench_scenario.py +83 -0
  384. helm/benchmark/scenarios/winogrande_afr_scenario.py +78 -0
  385. helm/benchmark/scenarios/wmt_14_scenario.py +14 -2
  386. helm/benchmark/scenarios/xstest_scenario.py +1 -1
  387. helm/benchmark/server.py +11 -0
  388. helm/benchmark/slurm_runner.py +1 -1
  389. helm/benchmark/static/schema_audio.yaml +752 -0
  390. helm/benchmark/static/schema_autobencher.yaml +150 -0
  391. helm/benchmark/static/schema_call_center.yaml +97 -60
  392. helm/benchmark/static/schema_capabilities.yaml +254 -0
  393. helm/benchmark/static/schema_czech_bank.yaml +148 -0
  394. helm/benchmark/static/schema_enem_challenge.yaml +146 -0
  395. helm/benchmark/static/schema_enterprise.yaml +298 -0
  396. helm/benchmark/static/schema_finance.yaml +14 -12
  397. helm/benchmark/static/schema_heim.yaml +1389 -0
  398. helm/benchmark/static/{schema_medical.yaml → schema_long_context.yaml} +67 -82
  399. helm/benchmark/static/schema_medhelm.yaml +1081 -0
  400. helm/benchmark/static/schema_mmlu_winogrande_afr.yaml +1045 -0
  401. helm/benchmark/static/schema_safety.yaml +18 -1
  402. helm/benchmark/static/{schema_bhasa.yaml → schema_seahelm.yaml} +30 -16
  403. helm/benchmark/static/schema_social_audio.yaml +224 -0
  404. helm/benchmark/static/schema_sql.yaml +171 -0
  405. helm/benchmark/static/{schema_tables.yaml → schema_torr.yaml} +169 -36
  406. helm/benchmark/static/schema_tweetsentbr.yaml +146 -0
  407. helm/benchmark/static/schema_vhelm.yaml +109 -36
  408. helm/benchmark/static_build/assets/helm-safety-2907a7b6.png +0 -0
  409. helm/benchmark/static_build/assets/index-262903c1.js +10 -0
  410. helm/benchmark/static_build/assets/index-42060d71.css +1 -0
  411. helm/benchmark/static_build/assets/medhelm-overview-3ddfcd65.png +0 -0
  412. helm/benchmark/static_build/assets/{react-d4a0b69b.js → react-f82877fd.js} +1 -1
  413. helm/benchmark/static_build/assets/{recharts-6d337683.js → recharts-4037aff0.js} +1 -1
  414. helm/benchmark/static_build/assets/{tremor-54a99cc4.js → tremor-9cefc3c5.js} +1 -1
  415. helm/benchmark/static_build/config.js +1 -1
  416. helm/benchmark/static_build/index.html +5 -5
  417. helm/benchmark/window_services/default_window_service.py +1 -1
  418. helm/benchmark/window_services/encoder_decoder_window_service.py +1 -1
  419. helm/benchmark/window_services/ice_window_service.py +1 -1
  420. helm/benchmark/window_services/image_generation/lexica_search_window_service.py +1 -1
  421. helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +1 -1
  422. helm/benchmark/window_services/local_window_service.py +2 -2
  423. helm/benchmark/window_services/test_anthropic_window_service.py +3 -3
  424. helm/benchmark/window_services/test_bloom_window_service.py +3 -3
  425. helm/benchmark/window_services/test_gpt2_window_service.py +7 -2
  426. helm/benchmark/window_services/test_gpt4_window_service.py +8 -3
  427. helm/benchmark/window_services/test_gptj_window_service.py +8 -3
  428. helm/benchmark/window_services/test_gptneox_window_service.py +3 -3
  429. helm/benchmark/window_services/test_openai_window_service.py +8 -3
  430. helm/benchmark/window_services/test_opt_window_service.py +3 -3
  431. helm/benchmark/window_services/test_palmyra_window_service.py +3 -3
  432. helm/benchmark/window_services/test_t0pp_window_service.py +3 -3
  433. helm/benchmark/window_services/test_t511b_window_service.py +3 -3
  434. helm/benchmark/window_services/test_ul2_window_service.py +3 -3
  435. helm/benchmark/window_services/test_utils.py +1 -1
  436. helm/benchmark/window_services/test_yalm_window_service.py +3 -3
  437. helm/benchmark/window_services/yalm_window_service.py +1 -1
  438. helm/clients/ai21_client.py +3 -3
  439. helm/clients/aleph_alpha_client.py +1 -1
  440. helm/clients/audio_language/__init__.py +0 -0
  441. helm/clients/audio_language/diva_llama_client.py +118 -0
  442. helm/clients/audio_language/llama_omni_client.py +198 -0
  443. helm/clients/audio_language/qwen2_audiolm_client.py +188 -0
  444. helm/clients/audio_language/qwen_audiolm_client.py +150 -0
  445. helm/clients/auto_client.py +4 -2
  446. helm/clients/azure_openai_client.py +55 -0
  447. helm/clients/bedrock_client.py +201 -7
  448. helm/clients/bedrock_utils.py +33 -0
  449. helm/clients/clip_scorers/clip_scorer.py +1 -1
  450. helm/clients/clip_scorers/multilingual_clip_scorer.py +1 -1
  451. helm/clients/cohere_client.py +3 -3
  452. helm/clients/google_client.py +1 -1
  453. helm/clients/http_model_client.py +1 -1
  454. helm/clients/huggingface_client.py +10 -18
  455. helm/clients/ibm_client.py +267 -0
  456. helm/clients/image_generation/adobe_vision_client.py +1 -1
  457. helm/clients/image_generation/aleph_alpha_image_generation_client.py +1 -1
  458. helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +3 -3
  459. helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +5 -2
  460. helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +5 -2
  461. helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +2 -2
  462. helm/clients/image_generation/cogview2_client.py +1 -1
  463. helm/clients/image_generation/dalle2_client.py +1 -1
  464. helm/clients/image_generation/dalle3_client.py +2 -2
  465. helm/clients/image_generation/dalle_mini/__init__.py +1 -1
  466. helm/clients/image_generation/dalle_mini/data.py +1 -1
  467. helm/clients/image_generation/dalle_mini/model/__init__.py +5 -5
  468. helm/clients/image_generation/dalle_mini/model/configuration.py +1 -1
  469. helm/clients/image_generation/dalle_mini/model/modeling.py +2 -2
  470. helm/clients/image_generation/dalle_mini/model/processor.py +4 -4
  471. helm/clients/image_generation/dalle_mini/model/tokenizer.py +1 -1
  472. helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -1
  473. helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +2 -2
  474. helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +1 -1
  475. helm/clients/image_generation/dalle_mini_client.py +1 -1
  476. helm/clients/image_generation/deep_floyd_client.py +1 -1
  477. helm/clients/image_generation/huggingface_diffusers_client.py +1 -1
  478. helm/clients/image_generation/lexica_client.py +1 -1
  479. helm/clients/image_generation/mindalle/models/__init__.py +6 -6
  480. helm/clients/image_generation/mindalle/models/stage1/vqgan.py +1 -1
  481. helm/clients/image_generation/mindalle/models/stage2/transformer.py +1 -1
  482. helm/clients/image_generation/mindalle/utils/__init__.py +3 -3
  483. helm/clients/image_generation/mindalle_client.py +1 -1
  484. helm/clients/image_generation/together_image_generation_client.py +1 -1
  485. helm/clients/lit_gpt_client.py +2 -2
  486. helm/clients/mistral_client.py +62 -18
  487. helm/clients/nvidia_nim_client.py +0 -3
  488. helm/clients/openai_client.py +241 -22
  489. helm/clients/palmyra_client.py +1 -4
  490. helm/clients/reka_client.py +1 -1
  491. helm/clients/stanfordhealthcare_azure_openai_client.py +58 -0
  492. helm/clients/stanfordhealthcare_claude_client.py +31 -0
  493. helm/clients/stanfordhealthcare_google_client.py +43 -0
  494. helm/clients/stanfordhealthcare_http_model_client.py +93 -0
  495. helm/clients/stanfordhealthcare_openai_client.py +62 -0
  496. helm/clients/stanfordhealthcare_shc_openai_client.py +42 -0
  497. helm/clients/test_client.py +1 -1
  498. helm/clients/test_together_client.py +6 -1
  499. helm/clients/together_client.py +47 -7
  500. helm/clients/upstage_client.py +23 -0
  501. helm/clients/vertexai_client.py +39 -13
  502. helm/clients/vision_language/open_flamingo/__init__.py +2 -2
  503. helm/clients/vision_language/open_flamingo/src/factory.py +3 -3
  504. helm/clients/vision_language/open_flamingo/src/flamingo.py +2 -2
  505. helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +2 -2
  506. helm/clients/vision_language/qwen2_vlm_client.py +175 -0
  507. helm/clients/vllm_client.py +4 -6
  508. helm/clients/yi_client.py +0 -3
  509. helm/common/audio_utils.py +111 -0
  510. helm/common/file_caches/local_file_cache.py +1 -1
  511. helm/common/file_caches/test_local_file_cache.py +1 -1
  512. helm/common/images_utils.py +2 -2
  513. helm/common/media_object.py +2 -2
  514. helm/common/multimodal_request_utils.py +26 -0
  515. helm/common/reeval_parameters.py +12 -0
  516. helm/common/request.py +6 -2
  517. helm/common/response_format.py +18 -0
  518. helm/common/test_media_object.py +1 -1
  519. helm/config/model_deployments.yaml +1112 -19
  520. helm/config/model_metadata.yaml +985 -44
  521. helm/config/tokenizer_configs.yaml +379 -3
  522. helm/proxy/cli.py +2 -2
  523. helm/proxy/example_queries.py +1 -1
  524. helm/proxy/server.py +11 -4
  525. helm/proxy/services/remote_service.py +1 -1
  526. helm/proxy/services/server_service.py +1 -1
  527. helm/proxy/services/test_remote_service.py +2 -2
  528. helm/proxy/services/test_service.py +1 -1
  529. helm/proxy/static/general.js +122 -0
  530. helm/proxy/static/help.html +99 -0
  531. helm/proxy/static/index.css +57 -0
  532. helm/proxy/static/index.html +40 -0
  533. helm/proxy/static/index.js +456 -0
  534. helm/proxy/static/info-icon.png +0 -0
  535. helm/proxy/test_retry.py +1 -1
  536. helm/proxy/token_counters/auto_token_counter.py +1 -1
  537. helm/tokenizers/aleph_alpha_tokenizer.py +1 -1
  538. helm/tokenizers/caching_tokenizer.py +2 -30
  539. helm/tokenizers/http_model_tokenizer.py +1 -1
  540. helm/tokenizers/huggingface_tokenizer.py +2 -2
  541. helm/tokenizers/lit_gpt_tokenizer.py +1 -1
  542. helm/tokenizers/test_anthropic_tokenizer.py +6 -2
  543. helm/tokenizers/test_huggingface_tokenizer.py +1 -1
  544. helm/tokenizers/test_yalm_tokenizer.py +1 -1
  545. helm/tokenizers/tiktoken_tokenizer.py +1 -1
  546. helm/tokenizers/tokenizer.py +3 -1
  547. helm/tokenizers/yalm_tokenizer.py +3 -3
  548. helm/tokenizers/yalm_tokenizer_data/test_yalm_tokenizer.py +1 -1
  549. crfm_helm-0.5.4.dist-info/METADATA +0 -350
  550. crfm_helm-0.5.4.dist-info/RECORD +0 -697
  551. helm/benchmark/metrics/bhasa_metrics_specs.py +0 -10
  552. helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
  553. helm/benchmark/static_build/assets/accenture-6f97eeda.png +0 -0
  554. helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
  555. helm/benchmark/static_build/assets/aisingapore-6dfc9acf.png +0 -0
  556. helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
  557. helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
  558. helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
  559. helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
  560. helm/benchmark/static_build/assets/cresta-9e22b983.png +0 -0
  561. helm/benchmark/static_build/assets/cuhk-8c5631e9.png +0 -0
  562. helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
  563. helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
  564. helm/benchmark/static_build/assets/index-05c76bb1.css +0 -1
  565. helm/benchmark/static_build/assets/index-3ee38b3d.js +0 -10
  566. helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
  567. helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
  568. helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
  569. helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
  570. helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
  571. helm/benchmark/static_build/assets/scb10x-204bd786.png +0 -0
  572. helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
  573. helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
  574. helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
  575. helm/benchmark/static_build/assets/wellsfargo-a86a6c4a.png +0 -0
  576. helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
  577. helm/tokenizers/anthropic_tokenizer.py +0 -52
  578. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info}/entry_points.txt +0 -0
  579. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info/licenses}/LICENSE +0 -0
  580. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info}/top_level.txt +0 -0
@@ -10,7 +10,7 @@ from helm.common.tokenization_request import (
10
10
  DecodeRequest,
11
11
  TokenizationToken,
12
12
  )
13
- from .caching_tokenizer import CachingTokenizer
13
+ from helm.tokenizers.caching_tokenizer import CachingTokenizer
14
14
 
15
15
  try:
16
16
  from aleph_alpha_client import Client as AlephAlphaPythonClient
@@ -1,6 +1,6 @@
1
1
  from abc import abstractmethod
2
2
  from dataclasses import asdict
3
- from typing import Any, Dict, List, Optional
3
+ from typing import Any, Dict, List
4
4
 
5
5
  from helm.common.cache import Cache, CacheConfig
6
6
  from helm.common.request import wrap_request_time
@@ -11,7 +11,7 @@ from helm.common.tokenization_request import (
11
11
  DecodeRequestResult,
12
12
  TokenizationToken,
13
13
  )
14
- from .tokenizer import Tokenizer
14
+ from helm.tokenizers.tokenizer import Tokenizer
15
15
 
16
16
 
17
17
  class CachingTokenizer(Tokenizer):
@@ -153,31 +153,3 @@ class CachingTokenizer(Tokenizer):
153
153
  )
154
154
  except Exception as error:
155
155
  raise ValueError(f"Failed to decode tokens with {self.__class__.__name__} tokenizer: {error}") from error
156
-
157
-
158
- def cleanup_str(token: str, tokenizer_name: Optional[str] = None) -> str:
159
- """
160
- Certain tokenizers introduce special characters to represent spaces, such as
161
- "Ġ" or "▁". This function removes those characters.
162
- """
163
- if tokenizer_name in [
164
- "TsinghuaKEG/ice",
165
- "bigscience/T0pp",
166
- "google/t5-11b",
167
- "google/flan-t5-xxl",
168
- "google/ul2",
169
- "Yandex/yalm",
170
- "ai21/j1",
171
- "together",
172
- ]:
173
- return token.replace("▁", " ")
174
- elif tokenizer_name is not None and tokenizer_name.startswith("huggingface"):
175
- return token.replace("Ġ", " ")
176
- return token
177
-
178
-
179
- def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]:
180
- """
181
- Applies `cleanup_str` to each token in `tokens`.
182
- """
183
- return [cleanup_str(token, tokenizer_name) for token in tokens]
@@ -11,7 +11,7 @@ from helm.common.tokenization_request import (
11
11
  TokenizationRequestResult,
12
12
  TokenizationToken,
13
13
  )
14
- from .tokenizer import Tokenizer
14
+ from helm.tokenizers.tokenizer import Tokenizer
15
15
 
16
16
  import requests
17
17
 
@@ -7,8 +7,8 @@ from helm.common.concurrency import ThreadSafeWrapper
7
7
  from transformers import AutoTokenizer, PreTrainedTokenizerBase
8
8
 
9
9
  from helm.common.hierarchical_logger import htrack_block, hlog
10
- from .caching_tokenizer import CachingTokenizer
11
- from .tokenizer import cleanup_tokens
10
+ from helm.tokenizers.caching_tokenizer import CachingTokenizer
11
+ from helm.tokenizers.tokenizer import cleanup_tokens
12
12
 
13
13
 
14
14
  WrappedPreTrainedTokenizer = ThreadSafeWrapper[PreTrainedTokenizerBase]
@@ -5,7 +5,7 @@ import torch
5
5
 
6
6
  from helm.common.cache import CacheConfig
7
7
  from helm.common.optional_dependencies import OptionalDependencyNotInstalled
8
- from .caching_tokenizer import CachingTokenizer
8
+ from helm.tokenizers.caching_tokenizer import CachingTokenizer
9
9
 
10
10
  try:
11
11
  from lit_gpt import Tokenizer as InternalTokenizer
@@ -10,7 +10,7 @@ from helm.common.tokenization_request import (
10
10
  TokenizationRequest,
11
11
  TokenizationRequestResult,
12
12
  )
13
- from helm.tokenizers.anthropic_tokenizer import AnthropicTokenizer
13
+ from helm.tokenizers.huggingface_tokenizer import HuggingFaceTokenizer
14
14
 
15
15
 
16
16
  class TestAnthropicTokenizer:
@@ -21,7 +21,11 @@ class TestAnthropicTokenizer:
21
21
  def setup_method(self, method):
22
22
  cache_file = tempfile.NamedTemporaryFile(delete=False)
23
23
  self.cache_path: str = cache_file.name
24
- self.tokenizer = AnthropicTokenizer(SqliteCacheConfig(self.cache_path))
24
+ self.tokenizer = HuggingFaceTokenizer(
25
+ SqliteCacheConfig(self.cache_path),
26
+ tokenizer_name="anthropic/claude",
27
+ pretrained_model_name_or_path="Xenova/claude-tokenizer",
28
+ )
25
29
 
26
30
  def teardown_method(self, method):
27
31
  os.remove(self.cache_path)
@@ -10,7 +10,7 @@ from helm.common.tokenization_request import (
10
10
  TokenizationRequest,
11
11
  TokenizationRequestResult,
12
12
  )
13
- from .huggingface_tokenizer import HuggingFaceTokenizer
13
+ from helm.tokenizers.huggingface_tokenizer import HuggingFaceTokenizer
14
14
 
15
15
 
16
16
  class TestHuggingFaceGPT2Tokenizer:
@@ -10,7 +10,7 @@ from helm.common.tokenization_request import (
10
10
  TokenizationRequest,
11
11
  TokenizationRequestResult,
12
12
  )
13
- from .yalm_tokenizer import YaLMTokenizer
13
+ from helm.tokenizers.yalm_tokenizer import YaLMTokenizer
14
14
 
15
15
 
16
16
  class TestYaLMTokenizer:
@@ -1,7 +1,7 @@
1
1
  from typing import Any, Dict
2
2
 
3
3
  from helm.common.optional_dependencies import handle_module_not_found_error
4
- from .caching_tokenizer import CachingTokenizer
4
+ from helm.tokenizers.caching_tokenizer import CachingTokenizer
5
5
 
6
6
  try:
7
7
  import tiktoken
@@ -41,7 +41,9 @@ def cleanup_str(token: str, tokenizer_name: Optional[str] = None) -> str:
41
41
  "together",
42
42
  ]:
43
43
  return token.replace("▁", " ")
44
- elif tokenizer_name is not None and tokenizer_name.startswith("huggingface"):
44
+ elif tokenizer_name is not None and (
45
+ tokenizer_name.startswith("huggingface") or tokenizer_name == "anthropic/claude"
46
+ ):
45
47
  return token.replace("Ġ", " ")
46
48
  return token
47
49
 
@@ -1,9 +1,9 @@
1
1
  from typing import Any, Dict
2
2
 
3
3
  from helm.common.cache import CacheConfig
4
- from .caching_tokenizer import CachingTokenizer
5
- from .tokenizer import cleanup_tokens
6
- from .yalm_tokenizer_data.yalm_tokenizer import YaLMTokenizer as YaLMTokenizerInternal
4
+ from helm.tokenizers.caching_tokenizer import CachingTokenizer
5
+ from helm.tokenizers.tokenizer import cleanup_tokens
6
+ from helm.tokenizers.yalm_tokenizer_data.yalm_tokenizer import YaLMTokenizer as YaLMTokenizerInternal
7
7
 
8
8
 
9
9
  class YaLMTokenizer(CachingTokenizer):
@@ -1,7 +1,7 @@
1
1
  from typing import List
2
2
 
3
3
  from helm.common.general import singleton
4
- from .yalm_tokenizer import YaLMTokenizer
4
+ from helm.tokenizers.yalm_tokenizer_data.yalm_tokenizer import YaLMTokenizer
5
5
 
6
6
 
7
7
  class TestYaLMTokenizer:
@@ -1,350 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: crfm-helm
3
- Version: 0.5.4
4
- Summary: Benchmark for language models
5
- Home-page: https://github.com/stanford-crfm/helm
6
- Author: Stanford CRFM
7
- Author-email: contact-crfm@stanford.edu
8
- License: Apache License 2.0
9
- Keywords: language models benchmarking
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: Programming Language :: Python :: 3 :: Only
12
- Classifier: Programming Language :: Python :: 3.9
13
- Classifier: Programming Language :: Python :: 3.10
14
- Classifier: Programming Language :: Python :: 3.11
15
- Classifier: License :: OSI Approved :: Apache Software License
16
- Requires-Python: <3.12,>=3.9
17
- Description-Content-Type: text/markdown
18
- License-File: LICENSE
19
- Requires-Dist: cattrs ~=22.2
20
- Requires-Dist: dacite ~=1.6
21
- Requires-Dist: importlib-resources ~=5.10
22
- Requires-Dist: Mako ~=1.2
23
- Requires-Dist: numpy ~=1.23
24
- Requires-Dist: pyhocon ~=0.3.59
25
- Requires-Dist: retrying ~=1.3
26
- Requires-Dist: spacy ~=3.5
27
- Requires-Dist: tqdm ~=4.64
28
- Requires-Dist: zstandard ~=0.18.0
29
- Requires-Dist: sqlitedict ~=1.7
30
- Requires-Dist: bottle ~=0.12.23
31
- Requires-Dist: datasets ~=2.17
32
- Requires-Dist: pyarrow >=11.0.0
33
- Requires-Dist: pyarrow-hotfix ~=0.6
34
- Requires-Dist: nltk <3.8.2,~=3.7
35
- Requires-Dist: rouge-score ~=0.1.2
36
- Requires-Dist: scipy ~=1.10
37
- Requires-Dist: uncertainty-calibration ~=0.1.4
38
- Requires-Dist: scikit-learn ~=1.1
39
- Requires-Dist: transformers ~=4.40
40
- Requires-Dist: torch <3.0.0,>=1.13.1
41
- Requires-Dist: torchvision <3.0.0,>=0.14.1
42
- Provides-Extra: accelerate
43
- Requires-Dist: accelerate ~=0.25 ; extra == 'accelerate'
44
- Provides-Extra: aleph-alpha
45
- Requires-Dist: aleph-alpha-client ~=2.14.0 ; extra == 'aleph-alpha'
46
- Requires-Dist: tokenizers >=0.13.3 ; extra == 'aleph-alpha'
47
- Provides-Extra: all
48
- Requires-Dist: crfm-helm[proxy-server] ; extra == 'all'
49
- Requires-Dist: crfm-helm[human-evaluation] ; extra == 'all'
50
- Requires-Dist: crfm-helm[scenarios] ; extra == 'all'
51
- Requires-Dist: crfm-helm[metrics] ; extra == 'all'
52
- Requires-Dist: crfm-helm[plots] ; extra == 'all'
53
- Requires-Dist: crfm-helm[decodingtrust] ; extra == 'all'
54
- Requires-Dist: crfm-helm[slurm] ; extra == 'all'
55
- Requires-Dist: crfm-helm[cleva] ; extra == 'all'
56
- Requires-Dist: crfm-helm[images] ; extra == 'all'
57
- Requires-Dist: crfm-helm[models] ; extra == 'all'
58
- Requires-Dist: crfm-helm[mongo] ; extra == 'all'
59
- Requires-Dist: crfm-helm[heim] ; extra == 'all'
60
- Requires-Dist: crfm-helm[vlm] ; extra == 'all'
61
- Requires-Dist: crfm-helm[bhasa] ; extra == 'all'
62
- Provides-Extra: allenai
63
- Requires-Dist: ai2-olmo ~=0.2 ; extra == 'allenai'
64
- Provides-Extra: amazon
65
- Requires-Dist: boto3 ~=1.28.57 ; extra == 'amazon'
66
- Requires-Dist: awscli ~=1.29.57 ; extra == 'amazon'
67
- Requires-Dist: botocore ~=1.31.57 ; extra == 'amazon'
68
- Provides-Extra: anthropic
69
- Requires-Dist: anthropic ~=0.17 ; extra == 'anthropic'
70
- Requires-Dist: websocket-client ~=1.3.2 ; extra == 'anthropic'
71
- Provides-Extra: bhasa
72
- Requires-Dist: pythainlp ==5.0.0 ; extra == 'bhasa'
73
- Requires-Dist: pyonmttok ==1.37.0 ; extra == 'bhasa'
74
- Requires-Dist: sacrebleu ~=2.2.1 ; extra == 'bhasa'
75
- Provides-Extra: cleva
76
- Requires-Dist: unidecode ==1.3.6 ; extra == 'cleva'
77
- Requires-Dist: pypinyin ==0.49.0 ; extra == 'cleva'
78
- Requires-Dist: jieba ==0.42.1 ; extra == 'cleva'
79
- Requires-Dist: opencc ==1.1.6 ; extra == 'cleva'
80
- Requires-Dist: langdetect ==1.0.9 ; extra == 'cleva'
81
- Provides-Extra: cohere
82
- Requires-Dist: cohere ~=5.3 ; extra == 'cohere'
83
- Provides-Extra: decodingtrust
84
- Requires-Dist: fairlearn ~=0.9.0 ; extra == 'decodingtrust'
85
- Provides-Extra: dev
86
- Requires-Dist: pytest ~=7.2.0 ; extra == 'dev'
87
- Requires-Dist: pre-commit ~=2.20.0 ; extra == 'dev'
88
- Requires-Dist: black ==24.3.0 ; extra == 'dev'
89
- Requires-Dist: mypy ==1.5.1 ; extra == 'dev'
90
- Requires-Dist: flake8 ==5.0.4 ; extra == 'dev'
91
- Provides-Extra: google
92
- Requires-Dist: google-cloud-aiplatform ~=1.48 ; extra == 'google'
93
- Provides-Extra: heim
94
- Requires-Dist: gdown ~=5.1 ; extra == 'heim'
95
- Requires-Dist: diffusers ~=0.24.0 ; extra == 'heim'
96
- Requires-Dist: icetk ~=0.0.4 ; extra == 'heim'
97
- Requires-Dist: jax ~=0.4.13 ; extra == 'heim'
98
- Requires-Dist: jaxlib ~=0.4.13 ; extra == 'heim'
99
- Requires-Dist: crfm-helm[openai] ; extra == 'heim'
100
- Requires-Dist: einops ~=0.7.0 ; extra == 'heim'
101
- Requires-Dist: omegaconf ~=2.3.0 ; extra == 'heim'
102
- Requires-Dist: pytorch-lightning ~=2.0.5 ; extra == 'heim'
103
- Requires-Dist: flax ~=0.6.11 ; extra == 'heim'
104
- Requires-Dist: ftfy ~=6.1.1 ; extra == 'heim'
105
- Requires-Dist: Unidecode ~=1.3.6 ; extra == 'heim'
106
- Requires-Dist: wandb ~=0.13.11 ; extra == 'heim'
107
- Requires-Dist: google-cloud-translate ~=3.11.2 ; extra == 'heim'
108
- Requires-Dist: autokeras ~=1.0.20 ; extra == 'heim'
109
- Requires-Dist: clip-anytorch ~=2.5.0 ; extra == 'heim'
110
- Requires-Dist: google-cloud-storage ~=2.9 ; extra == 'heim'
111
- Requires-Dist: lpips ~=0.1.4 ; extra == 'heim'
112
- Requires-Dist: multilingual-clip ~=1.0.10 ; extra == 'heim'
113
- Requires-Dist: NudeNet ~=2.0.9 ; extra == 'heim'
114
- Requires-Dist: opencv-python ~=4.7.0.68 ; extra == 'heim'
115
- Requires-Dist: pytorch-fid ~=0.3.0 ; extra == 'heim'
116
- Requires-Dist: tensorflow ~=2.11 ; extra == 'heim'
117
- Requires-Dist: timm ~=0.6.12 ; extra == 'heim'
118
- Requires-Dist: torch-fidelity ~=0.3.0 ; extra == 'heim'
119
- Requires-Dist: torchmetrics ~=0.11.1 ; extra == 'heim'
120
- Requires-Dist: scikit-image !=0.23.*,==0.*,>=0.22 ; extra == 'heim'
121
- Requires-Dist: crfm-helm[images] ; extra == 'heim'
122
- Provides-Extra: human-evaluation
123
- Requires-Dist: scaleapi ~=2.13.0 ; extra == 'human-evaluation'
124
- Requires-Dist: surge-api ~=1.1.0 ; extra == 'human-evaluation'
125
- Provides-Extra: image2struct
126
- Requires-Dist: crfm-helm[images] ; extra == 'image2struct'
127
- Requires-Dist: latex ~=0.7.0 ; extra == 'image2struct'
128
- Requires-Dist: pdf2image ~=1.16.3 ; extra == 'image2struct'
129
- Requires-Dist: selenium ~=4.17.2 ; extra == 'image2struct'
130
- Requires-Dist: html2text ~=2024.2.26 ; extra == 'image2struct'
131
- Requires-Dist: opencv-python ~=4.7.0.68 ; extra == 'image2struct'
132
- Requires-Dist: lpips ~=0.1.4 ; extra == 'image2struct'
133
- Requires-Dist: imagehash ~=4.3.1 ; extra == 'image2struct'
134
- Provides-Extra: images
135
- Requires-Dist: crfm-helm[accelerate] ; extra == 'images'
136
- Requires-Dist: pillow ~=10.2 ; extra == 'images'
137
- Provides-Extra: metrics
138
- Requires-Dist: google-api-python-client ~=2.64 ; extra == 'metrics'
139
- Requires-Dist: numba ~=0.56 ; extra == 'metrics'
140
- Requires-Dist: pytrec-eval ==0.5 ; extra == 'metrics'
141
- Requires-Dist: sacrebleu ~=2.2.1 ; extra == 'metrics'
142
- Provides-Extra: mistral
143
- Requires-Dist: mistralai ~=0.0.11 ; extra == 'mistral'
144
- Provides-Extra: models
145
- Requires-Dist: crfm-helm[ai21] ; extra == 'models'
146
- Requires-Dist: crfm-helm[accelerate] ; extra == 'models'
147
- Requires-Dist: crfm-helm[aleph-alpha] ; extra == 'models'
148
- Requires-Dist: crfm-helm[allenai] ; extra == 'models'
149
- Requires-Dist: crfm-helm[amazon] ; extra == 'models'
150
- Requires-Dist: crfm-helm[anthropic] ; extra == 'models'
151
- Requires-Dist: crfm-helm[cohere] ; extra == 'models'
152
- Requires-Dist: crfm-helm[google] ; extra == 'models'
153
- Requires-Dist: crfm-helm[mistral] ; extra == 'models'
154
- Requires-Dist: crfm-helm[openai] ; extra == 'models'
155
- Requires-Dist: crfm-helm[reka] ; extra == 'models'
156
- Requires-Dist: crfm-helm[together] ; extra == 'models'
157
- Requires-Dist: crfm-helm[yandex] ; extra == 'models'
158
- Requires-Dist: crfm-helm[openvino] ; extra == 'models'
159
- Provides-Extra: mongo
160
- Requires-Dist: pymongo ~=4.2 ; extra == 'mongo'
161
- Provides-Extra: openai
162
- Requires-Dist: openai ~=1.0 ; extra == 'openai'
163
- Requires-Dist: tiktoken ~=0.7 ; extra == 'openai'
164
- Requires-Dist: pydantic ~=2.0 ; extra == 'openai'
165
- Provides-Extra: openvino
166
- Requires-Dist: optimum[openvino] ~=1.19 ; extra == 'openvino'
167
- Provides-Extra: plots
168
- Requires-Dist: colorcet ~=3.0.1 ; extra == 'plots'
169
- Requires-Dist: matplotlib ~=3.6.0 ; extra == 'plots'
170
- Requires-Dist: seaborn ~=0.11.0 ; extra == 'plots'
171
- Provides-Extra: proxy-server
172
- Requires-Dist: gunicorn >=20.1 ; extra == 'proxy-server'
173
- Provides-Extra: reka
174
- Requires-Dist: reka-api ~=2.0.0 ; extra == 'reka'
175
- Provides-Extra: scenarios
176
- Requires-Dist: gdown ~=5.1 ; extra == 'scenarios'
177
- Requires-Dist: sympy ~=1.11.1 ; extra == 'scenarios'
178
- Requires-Dist: xlrd ~=2.0.1 ; extra == 'scenarios'
179
- Provides-Extra: slurm
180
- Requires-Dist: simple-slurm ~=0.2.6 ; extra == 'slurm'
181
- Provides-Extra: summarization
182
- Requires-Dist: summ-eval ~=0.892 ; extra == 'summarization'
183
- Provides-Extra: together
184
- Requires-Dist: together ~=1.1 ; extra == 'together'
185
- Provides-Extra: unitxt
186
- Requires-Dist: evaluate ~=0.4.1 ; extra == 'unitxt'
187
- Provides-Extra: vlm
188
- Requires-Dist: crfm-helm[openai] ; extra == 'vlm'
189
- Requires-Dist: einops ~=0.7.0 ; extra == 'vlm'
190
- Requires-Dist: einops-exts ~=0.0.4 ; extra == 'vlm'
191
- Requires-Dist: open-clip-torch ~=2.24 ; extra == 'vlm'
192
- Requires-Dist: torch ~=2.1 ; extra == 'vlm'
193
- Requires-Dist: transformers-stream-generator ~=0.0.4 ; extra == 'vlm'
194
- Requires-Dist: scipy ~=1.10 ; extra == 'vlm'
195
- Requires-Dist: torchvision <3.0.0,>=0.14.1 ; extra == 'vlm'
196
- Requires-Dist: crfm-helm[reka] ; extra == 'vlm'
197
- Requires-Dist: crfm-helm[images] ; extra == 'vlm'
198
- Requires-Dist: crfm-helm[image2struct] ; extra == 'vlm'
199
- Requires-Dist: pycocoevalcap ~=1.2 ; extra == 'vlm'
200
- Provides-Extra: yandex
201
- Requires-Dist: sentencepiece ~=0.1.97 ; extra == 'yandex'
202
-
203
- <!--intro-start-->
204
-
205
- # Holistic Evaluation of Language Models
206
-
207
- [comment]: <> (When using the img tag, which allows us to specify size, src has to be a URL.)
208
- <img src="https://github.com/stanford-crfm/helm/raw/main/src/helm/benchmark/static/images/helm-logo.png" alt="" width="800"/>
209
-
210
- Welcome! The **`crfm-helm`** Python package contains code used in the **Holistic Evaluation of Language Models** project ([paper](https://arxiv.org/abs/2211.09110), [website](https://crfm.stanford.edu/helm/latest/)) by [Stanford CRFM](https://crfm.stanford.edu/). This package includes the following features:
211
-
212
- - Collection of datasets in a standard format (e.g., NaturalQuestions)
213
- - Collection of models accessible via a unified API (e.g., GPT-3, MT-NLG, OPT, BLOOM)
214
- - Collection of metrics beyond accuracy (efficiency, bias, toxicity, etc.)
215
- - Collection of perturbations for evaluating robustness and fairness (e.g., typos, dialect)
216
- - Modular framework for constructing prompts from datasets
217
- - Proxy server for managing accounts and providing unified interface to access models
218
- <!--intro-end-->
219
-
220
- To get started, refer to [the documentation on Read the Docs](https://crfm-helm.readthedocs.io/) for how to install and run the package.
221
-
222
- ## Papers
223
-
224
- This repository contains code used to produce results for the following papers:
225
-
226
- - Holistic Evaluation of Vision-Language Models (VHELM) - paper (TBD), [leaderboard](https://crfm.stanford.edu/helm/vhelm/latest/), [documentation](https://crfm-helm.readthedocs.io/en/latest/vhelm/)
227
- - Holistic Evaluation of Text-To-Image Models (HEIM) - [paper](https://arxiv.org/abs/2311.04287), [leaderboard](https://crfm.stanford.edu/helm/heim/latest/), [documentation](https://crfm-helm.readthedocs.io/en/latest/heim/)
228
-
229
- The HELM Python package can be used to reproduce the published model evaluation results from these paper. To get started, refer to the documentation links above for the corresponding paper, or the [main Reproducing Leaderboards documentation](https://crfm-helm.readthedocs.io/en/latest/reproducing_leaderboards/).
230
-
231
- ## Holistic Evaluation of Text-To-Image Models
232
-
233
- <img src="https://github.com/stanford-crfm/helm/raw/heim/src/helm/benchmark/static/heim/images/heim-logo.png" alt="" width="800"/>
234
-
235
- Significant effort has recently been made in developing text-to-image generation models, which take textual prompts as
236
- input and generate images. As these models are widely used in real-world applications, there is an urgent need to
237
- comprehensively understand their capabilities and risks. However, existing evaluations primarily focus on image-text
238
- alignment and image quality. To address this limitation, we introduce a new benchmark,
239
- **Holistic Evaluation of Text-To-Image Models (HEIM)**.
240
-
241
- We identify 12 different aspects that are important in real-world model deployment, including:
242
-
243
- - image-text alignment
244
- - image quality
245
- - aesthetics
246
- - originality
247
- - reasoning
248
- - knowledge
249
- - bias
250
- - toxicity
251
- - fairness
252
- - robustness
253
- - multilinguality
254
- - efficiency
255
-
256
- By curating scenarios encompassing these aspects, we evaluate state-of-the-art text-to-image models using this benchmark.
257
- Unlike previous evaluations that focused on alignment and quality, HEIM significantly improves coverage by evaluating all
258
- models across all aspects. Our results reveal that no single model excels in all aspects, with different models
259
- demonstrating strengths in different aspects.
260
-
261
- This repository contains the code used to produce the [results on the website](https://crfm.stanford.edu/heim/latest/)
262
- and [paper](https://arxiv.org/abs/2311.04287).
263
-
264
- ## Citation
265
-
266
- If you use this software in your research, please cite the [Holistic Evaluation of Language Models paper](https://openreview.net/forum?id=iO4LZibEqW) as below.
267
-
268
- ```bibtex
269
- @article{
270
- liang2023holistic,
271
- title={Holistic Evaluation of Language Models},
272
- author={Percy Liang and Rishi Bommasani and Tony Lee and Dimitris Tsipras and Dilara Soylu and Michihiro Yasunaga and Yian Zhang and Deepak Narayanan and Yuhuai Wu and Ananya Kumar and Benjamin Newman and Binhang Yuan and Bobby Yan and Ce Zhang and Christian Alexander Cosgrove and Christopher D Manning and Christopher Re and Diana Acosta-Navas and Drew Arad Hudson and Eric Zelikman and Esin Durmus and Faisal Ladhak and Frieda Rong and Hongyu Ren and Huaxiu Yao and Jue WANG and Keshav Santhanam and Laurel Orr and Lucia Zheng and Mert Yuksekgonul and Mirac Suzgun and Nathan Kim and Neel Guha and Niladri S. Chatterji and Omar Khattab and Peter Henderson and Qian Huang and Ryan Andrew Chi and Sang Michael Xie and Shibani Santurkar and Surya Ganguli and Tatsunori Hashimoto and Thomas Icard and Tianyi Zhang and Vishrav Chaudhary and William Wang and Xuechen Li and Yifan Mai and Yuhui Zhang and Yuta Koreeda},
273
- journal={Transactions on Machine Learning Research},
274
- issn={2835-8856},
275
- year={2023},
276
- url={https://openreview.net/forum?id=iO4LZibEqW},
277
- note={Featured Certification, Expert Certification}
278
- }
279
- ```
280
- # Tutorial
281
-
282
- This tutorial will explain how to use the HELM command line tools to run benchmarks, aggregate statistics, and visualize results.
283
-
284
- We will run two runs using the `mmlu` scenario on the `openai/gpt2` model. The `mmlu` scenario implements the **Massive Multitask Language (MMLU)** benchmark from [this paper](https://arxiv.org/pdf/2009.03300.pdf), and consists of a Question Answering (QA) task using a dataset with questions from 57 subjects such as elementary mathematics, US history, computer science, law, and more. Note that GPT-2 performs poorly on MMLU, so this is just a proof of concept. We will run two runs: the first using questions about anatomy, and the second using questions about philosophy.
285
-
286
- ## Using `helm-run`
287
-
288
- `helm-run` is a command line tool for running benchmarks.
289
-
290
- To run this benchmark using the HELM command-line tools, we need to specify **run entries** that describes the desired runs. For this example, the run entries are `mmlu:subject=anatomy,model=openai/gpt2` (for anatomy) and `mmlu:subject=philosophy,model=openai/gpt2` (for philosophy).
291
-
292
- We will now use `helm-run` to execute the runs. Run this command:
293
-
294
- ```sh
295
- helm-run --run-entries mmlu:subject=anatomy,model=openai/gpt2 mmlu:subject=philosophy,model=openai/gpt2 --suite my-suite --max-eval-instances 10
296
- ```
297
-
298
- The meaning of the arguments are as follows:
299
-
300
- - `--run-entries` specifies the run entries from the desired runs.
301
- - `--suite` specifies a subdirectory under the output directory in which all the output will be placed.
302
- - `--max-eval-instances` limits evaluation to only *N* instances (i.e. items) from the benchmark, using a randomly shuffled order of instances.
303
-
304
- `helm-run` creates an environment directory environment and an output directory by default.
305
-
306
- - The environment directory is `prod_env/` by default and can be set using `--local-path`. Credentials for making API calls should be added to a `credentials.conf` file in this directory.
307
- - The output directory is `benchmark_output/` by default and can be set using `--output-path`.
308
-
309
- After running this command, navigate to the `benchmark_output/runs/my-suite/` directory. This should contain a two sub-directories named `mmlu:subject=anatomy,model=openai_gpt2` and `mmlu:subject=philosophy,model=openai_gpt2`. Note that the names of these sub-directories is based on the run entries we used earlier, but with `/` replaced with `_`.
310
-
311
- Each output sub-directory will contain several JSON files that were generated during the corresponding run:
312
-
313
- - `run_spec.json` contains the `RunSpec`, which specifies the scenario, adapter and metrics for the run.
314
- - `scenario.json` contains a serialized `Scenario`, which contains the scenario for the run and specifies the instances (i.e. inputs) used.
315
- - `scenario_state.json` contains a serialized `ScenarioState`, which contains every request to and response from the model.
316
- - `per_instance_stats.json` contains a serialized list of `PerInstanceStats`, which contains the statistics produced for the metrics for each instance (i.e. input).
317
- - `stats.json` contains a serialized list of `PerInstanceStats`, which contains the statistics produced for the metrics, aggregated across all instances (i.e. inputs).
318
-
319
- ## Using `helm-summarize`
320
-
321
- The `helm-summarize` reads the output files of `helm-run` and computes aggregate statistics across runs. Run the following:
322
-
323
- ```sh
324
- helm-summarize --suite my-suite
325
- ```
326
-
327
- This reads the pre-existing files in `benchmark_output/runs/my-suite/` that were written by `helm-run` previously, and writes the following new files back to `benchmark_output/runs/my-suite/`:
328
-
329
- - `summary.json` contains a serialized `ExecutiveSummary` with a date and suite name.
330
- - `run_specs.json` contains the run entries for all the runs.
331
- - `runs.json` contains serialized list of `Run`, which contains the run path, run spec and adapter spec and statistics for each run.
332
- - `groups.json` contains a serialized list of `Table`, each containing information about groups in a group category.
333
- - `groups_metadata.json` contains a list of all the groups along with a human-readable description and a taxonomy.
334
-
335
- Additionally, for each group and group-relavent metric, it will output a pair of files: `benchmark_output/runs/my-suite/groups/latex/<group_name>_<metric_name>.tex` and `benchmark_output/runs/my-suite/groups/json/<group_name>_<metric_name>.json`. These files contain the statistics for that metric from each run within the group.
336
-
337
- ## Using `helm-server`
338
-
339
- Finally, the `helm-server` command launches a web server to visualize the output files of `helm-run` and `helm-benchmark`. Run:
340
-
341
- ```sh
342
- helm-server --suite my-suite
343
- ```
344
-
345
- Open a browser and go to http://localhost:8000/ to view the visualization. You should see a similar view as [live website for the paper](https://crfm.stanford.edu/helm/classic/latest/), but for the data from your benchmark runs. The website has the following sections accessible from the top menu bar:
346
-
347
- - **Leaderboards** contains the leaderboards with aggregate metrics.
348
- - **Models** contains a list of models and their descriptions
349
- - **Scenarios** contains a list of scenarios and their descriptions.
350
- - **Predictions** contains a searchable list of runs.