crfm-helm 0.4.0__py3-none-any.whl → 0.5.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (1033) hide show
  1. crfm_helm-0.5.10.dist-info/METADATA +369 -0
  2. crfm_helm-0.5.10.dist-info/RECORD +1008 -0
  3. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info}/WHEEL +1 -1
  4. helm/benchmark/adaptation/adapter_spec.py +80 -29
  5. helm/benchmark/adaptation/adapters/adapter.py +2 -2
  6. helm/benchmark/adaptation/adapters/adapter_factory.py +39 -28
  7. helm/benchmark/adaptation/adapters/binary_ranking_adapter.py +1 -1
  8. helm/benchmark/adaptation/adapters/chat_adapter.py +49 -0
  9. helm/benchmark/adaptation/adapters/ehr_instruction_adapter.py +108 -0
  10. helm/benchmark/adaptation/adapters/generation_adapter.py +2 -1
  11. helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +24 -8
  12. helm/benchmark/adaptation/adapters/language_modeling_adapter.py +3 -4
  13. helm/benchmark/adaptation/adapters/multimodal/generation_multimodal_adapter.py +4 -2
  14. helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +2 -1
  15. helm/benchmark/adaptation/adapters/multimodal/multimodal_prompt.py +7 -0
  16. helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +112 -0
  17. helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +6 -3
  18. helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +3 -1
  19. helm/benchmark/adaptation/adapters/multiple_choice_calibrated_adapter.py +1 -1
  20. helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +18 -8
  21. helm/benchmark/adaptation/adapters/multiple_choice_joint_chain_of_thought_adapter.py +87 -0
  22. helm/benchmark/adaptation/adapters/multiple_choice_separate_adapter.py +1 -1
  23. helm/benchmark/adaptation/adapters/test_adapter.py +5 -4
  24. helm/benchmark/adaptation/adapters/test_generation_adapter.py +46 -22
  25. helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +17 -29
  26. helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +138 -16
  27. helm/benchmark/adaptation/common_adapter_specs.py +443 -0
  28. helm/benchmark/adaptation/prompt.py +1 -1
  29. helm/benchmark/adaptation/request_state.py +6 -1
  30. helm/benchmark/adaptation/scenario_state.py +6 -2
  31. helm/benchmark/annotation/aci_bench_annotator.py +84 -0
  32. helm/benchmark/annotation/air_bench_annotator.py +79 -0
  33. helm/benchmark/annotation/alrage_annotator.py +90 -0
  34. helm/benchmark/annotation/annotator.py +48 -0
  35. helm/benchmark/annotation/annotator_factory.py +50 -0
  36. helm/benchmark/annotation/anthropic_red_team_annotator.py +57 -0
  37. helm/benchmark/annotation/autobencher_capabilities_annotator.py +107 -0
  38. helm/benchmark/annotation/autobencher_safety_annotator.py +98 -0
  39. helm/benchmark/annotation/bigcodebench_annotator.py +108 -0
  40. helm/benchmark/annotation/bird_sql_annotator.py +58 -0
  41. helm/benchmark/annotation/call_center_annotator.py +258 -0
  42. helm/benchmark/annotation/chw_care_plan_annotator.py +82 -0
  43. helm/benchmark/annotation/czech_bank_qa_annotator.py +78 -0
  44. helm/benchmark/annotation/dischargeme_annotator.py +96 -0
  45. helm/benchmark/annotation/ehr_sql_annotator.py +87 -0
  46. helm/benchmark/annotation/financebench_annotator.py +79 -0
  47. helm/benchmark/annotation/harm_bench_annotator.py +55 -0
  48. helm/benchmark/annotation/helpdesk_call_summarization_annotator.py +131 -0
  49. helm/benchmark/annotation/image2struct/image_compiler_annotator.py +93 -0
  50. helm/benchmark/annotation/image2struct/latex_compiler_annotator.py +59 -0
  51. helm/benchmark/annotation/image2struct/lilypond_compiler_annotator.py +86 -0
  52. helm/benchmark/annotation/image2struct/webpage_compiler_annotator.py +132 -0
  53. helm/benchmark/annotation/live_qa_annotator.py +76 -0
  54. helm/benchmark/annotation/med_dialog_annotator.py +88 -0
  55. helm/benchmark/annotation/medalign_annotator.py +89 -0
  56. helm/benchmark/annotation/medi_qa_annotator.py +87 -0
  57. helm/benchmark/annotation/medication_qa_annotator.py +86 -0
  58. helm/benchmark/annotation/mental_health_annotator.py +87 -0
  59. helm/benchmark/annotation/mimic_bhc_annotator.py +89 -0
  60. helm/benchmark/annotation/mimic_rrs_annotator.py +89 -0
  61. helm/benchmark/annotation/model_as_judge.py +309 -0
  62. helm/benchmark/annotation/mtsamples_procedures_annotator.py +87 -0
  63. helm/benchmark/annotation/mtsamples_replicate_annotator.py +90 -0
  64. helm/benchmark/annotation/omni_math/gpt_evaluation_template.txt +152 -0
  65. helm/benchmark/annotation/omni_math/gpt_evaluation_zero_shot_template.txt +36 -0
  66. helm/benchmark/annotation/omni_math_annotator.py +131 -0
  67. helm/benchmark/annotation/simple_safety_tests_annotator.py +50 -0
  68. helm/benchmark/annotation/spider_annotator.py +18 -0
  69. helm/benchmark/annotation/starr_patient_instructions_annotator.py +87 -0
  70. helm/benchmark/annotation/test_annotator_factory.py +26 -0
  71. helm/benchmark/annotation/test_dummy_annotator.py +44 -0
  72. helm/benchmark/annotation/wildbench/eval_template.pairwise.v2.md +75 -0
  73. helm/benchmark/annotation/wildbench/eval_template.score.v2.md +66 -0
  74. helm/benchmark/annotation/wildbench_annotator.py +119 -0
  75. helm/benchmark/annotation/xstest_annotator.py +100 -0
  76. helm/benchmark/annotation_executor.py +144 -0
  77. helm/benchmark/augmentations/cleva_perturbation.py +9 -8
  78. helm/benchmark/augmentations/contraction_expansion_perturbation.py +2 -2
  79. helm/benchmark/augmentations/contrast_sets_perturbation.py +2 -2
  80. helm/benchmark/augmentations/data_augmenter.py +0 -2
  81. helm/benchmark/augmentations/dialect_perturbation.py +4 -5
  82. helm/benchmark/augmentations/extra_space_perturbation.py +2 -2
  83. helm/benchmark/augmentations/filler_words_perturbation.py +2 -2
  84. helm/benchmark/augmentations/gender_perturbation.py +3 -3
  85. helm/benchmark/augmentations/lowercase_perturbation.py +2 -2
  86. helm/benchmark/augmentations/mild_mix_perturbation.py +6 -6
  87. helm/benchmark/augmentations/misspelling_perturbation.py +2 -2
  88. helm/benchmark/augmentations/person_name_perturbation.py +4 -5
  89. helm/benchmark/augmentations/perturbation.py +26 -4
  90. helm/benchmark/augmentations/perturbation_description.py +1 -1
  91. helm/benchmark/augmentations/space_perturbation.py +2 -2
  92. helm/benchmark/augmentations/suffix_perturbation.py +29 -0
  93. helm/benchmark/augmentations/synonym_perturbation.py +4 -3
  94. helm/benchmark/augmentations/test_perturbation.py +56 -19
  95. helm/benchmark/augmentations/translate_perturbation.py +31 -0
  96. helm/benchmark/augmentations/typos_perturbation.py +2 -2
  97. helm/benchmark/config_registry.py +7 -1
  98. helm/benchmark/data_preprocessor.py +2 -2
  99. helm/benchmark/executor.py +54 -25
  100. helm/benchmark/huggingface_registration.py +28 -10
  101. helm/benchmark/metrics/air_bench_metrics.py +3212 -0
  102. helm/benchmark/metrics/alrage_metric.py +35 -0
  103. helm/benchmark/metrics/annotation_metrics.py +108 -0
  104. helm/benchmark/metrics/basic_metrics.py +437 -667
  105. helm/benchmark/metrics/bbq_metrics.py +17 -6
  106. helm/benchmark/metrics/bias_metrics.py +18 -9
  107. helm/benchmark/metrics/bias_word_lists.py +1 -1
  108. helm/benchmark/metrics/bigcodebench_metrics.py +25 -0
  109. helm/benchmark/metrics/bird_sql_metrics.py +28 -0
  110. helm/benchmark/metrics/classification_metrics.py +107 -22
  111. helm/benchmark/metrics/cleva_accuracy_metrics.py +8 -5
  112. helm/benchmark/metrics/cleva_harms_metrics.py +12 -11
  113. helm/benchmark/metrics/code_metrics.py +5 -5
  114. helm/benchmark/metrics/code_metrics_helper.py +11 -3
  115. helm/benchmark/metrics/codeinsights_code_efficiency_metrics.py +186 -0
  116. helm/benchmark/metrics/codeinsights_code_evaluation_metrics.py +477 -0
  117. helm/benchmark/metrics/codeinsights_correct_code_metrics.py +366 -0
  118. helm/benchmark/metrics/codeinsights_edge_case_metrics.py +92 -0
  119. helm/benchmark/metrics/codeinsights_metric_specs.py +51 -0
  120. helm/benchmark/metrics/comet_metric.py +125 -0
  121. helm/benchmark/metrics/common_metric_specs.py +174 -0
  122. helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +83 -0
  123. helm/benchmark/metrics/copyright_metrics.py +5 -5
  124. helm/benchmark/metrics/czech_bank_qa_metrics.py +29 -0
  125. helm/benchmark/metrics/decodingtrust_fairness_metrics.py +72 -0
  126. helm/benchmark/metrics/decodingtrust_ood_knowledge_metrics.py +66 -0
  127. helm/benchmark/metrics/decodingtrust_privacy_metrics.py +101 -0
  128. helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +202 -0
  129. helm/benchmark/metrics/disinformation_metrics.py +8 -114
  130. helm/benchmark/metrics/dry_run_metrics.py +35 -6
  131. helm/benchmark/metrics/efficiency_metrics.py +287 -0
  132. helm/benchmark/metrics/ehr_sql_metrics.py +159 -0
  133. helm/benchmark/metrics/evaluate_instances_metric.py +59 -0
  134. helm/benchmark/metrics/evaluate_reference_metrics.py +831 -0
  135. helm/benchmark/metrics/fin_qa_metrics.py +60 -0
  136. helm/benchmark/metrics/fin_qa_metrics_helper.py +398 -0
  137. helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +115 -0
  138. helm/benchmark/metrics/gpt4_audio_critique_metrics.py +167 -0
  139. helm/benchmark/metrics/gpt4_audio_refusal_metrics.py +145 -0
  140. helm/benchmark/metrics/gpt4v_originality_critique_metrics.py +126 -0
  141. helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +48 -0
  142. helm/benchmark/metrics/ifeval/instructions.py +1574 -0
  143. helm/benchmark/metrics/ifeval/instructions_registry.py +182 -0
  144. helm/benchmark/metrics/ifeval/instructions_registry.pyi +3 -0
  145. helm/benchmark/metrics/ifeval/instructions_util.py +153 -0
  146. helm/benchmark/metrics/ifeval_metrics.py +67 -0
  147. helm/benchmark/metrics/image_generation/aesthetics_metrics.py +54 -0
  148. helm/benchmark/metrics/image_generation/aesthetics_scorer.py +66 -0
  149. helm/benchmark/metrics/image_generation/clip_score_metrics.py +84 -0
  150. helm/benchmark/metrics/image_generation/denoised_runtime_metric.py +42 -0
  151. helm/benchmark/metrics/image_generation/detection_metrics.py +57 -0
  152. helm/benchmark/metrics/image_generation/detectors/base_detector.py +8 -0
  153. helm/benchmark/metrics/image_generation/detectors/vitdet.py +178 -0
  154. helm/benchmark/metrics/image_generation/efficiency_metrics.py +41 -0
  155. helm/benchmark/metrics/image_generation/fidelity_metrics.py +168 -0
  156. helm/benchmark/metrics/image_generation/fractal_dimension/__init__.py +0 -0
  157. helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +63 -0
  158. helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +33 -0
  159. helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +50 -0
  160. helm/benchmark/metrics/image_generation/gender_metrics.py +58 -0
  161. helm/benchmark/metrics/image_generation/image_critique_metrics.py +284 -0
  162. helm/benchmark/metrics/image_generation/lpips_metrics.py +82 -0
  163. helm/benchmark/metrics/image_generation/multi_scale_ssim_metrics.py +82 -0
  164. helm/benchmark/metrics/image_generation/nsfw_detector.py +96 -0
  165. helm/benchmark/metrics/image_generation/nsfw_metrics.py +103 -0
  166. helm/benchmark/metrics/image_generation/nudity_metrics.py +38 -0
  167. helm/benchmark/metrics/image_generation/photorealism_critique_metrics.py +153 -0
  168. helm/benchmark/metrics/image_generation/psnr_metrics.py +78 -0
  169. helm/benchmark/metrics/image_generation/q16/__init__.py +0 -0
  170. helm/benchmark/metrics/image_generation/q16/q16_toxicity_detector.py +90 -0
  171. helm/benchmark/metrics/image_generation/q16/test_q16.py +20 -0
  172. helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +48 -0
  173. helm/benchmark/metrics/image_generation/skin_tone_metrics.py +164 -0
  174. helm/benchmark/metrics/image_generation/uiqi_metrics.py +92 -0
  175. helm/benchmark/metrics/image_generation/watermark/__init__.py +0 -0
  176. helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +16 -0
  177. helm/benchmark/metrics/image_generation/watermark/watermark_detector.py +87 -0
  178. helm/benchmark/metrics/image_generation/watermark_metrics.py +48 -0
  179. helm/benchmark/metrics/instruction_following_critique_metrics.py +48 -5
  180. helm/benchmark/metrics/kpi_edgar_metrics.py +142 -0
  181. helm/benchmark/metrics/language_modeling_metrics.py +111 -0
  182. helm/benchmark/metrics/live_qa_metrics.py +35 -0
  183. helm/benchmark/metrics/llm_jury_metrics.py +58 -0
  184. helm/benchmark/metrics/lmkt_metric_specs.py +12 -0
  185. helm/benchmark/metrics/lmkt_metrics.py +47 -0
  186. helm/benchmark/metrics/machine_translation_metrics.py +89 -0
  187. helm/benchmark/metrics/medcalc_bench_metrics.py +137 -0
  188. helm/benchmark/metrics/medec_metrics.py +124 -0
  189. helm/benchmark/metrics/melt_bias_metric.py +234 -0
  190. helm/benchmark/metrics/melt_bias_word_lists.py +1367 -0
  191. helm/benchmark/metrics/melt_metric_specs.py +43 -0
  192. helm/benchmark/metrics/melt_toxicity_metric.py +107 -0
  193. helm/benchmark/metrics/metric.py +121 -175
  194. helm/benchmark/metrics/metric_name.py +0 -1
  195. helm/benchmark/metrics/metric_service.py +23 -7
  196. helm/benchmark/metrics/mimiciv_billing_code_metrics.py +127 -0
  197. helm/benchmark/metrics/nltk_helper.py +32 -0
  198. helm/benchmark/metrics/omni_math_metrics.py +44 -0
  199. helm/benchmark/metrics/openai_mrcr_metrics.py +52 -0
  200. helm/benchmark/metrics/output_processing_metric.py +60 -0
  201. helm/benchmark/metrics/output_processors.py +15 -0
  202. helm/benchmark/metrics/paraphrase_generation_metrics.py +5 -6
  203. helm/benchmark/metrics/prometheus_vision_critique_metrics.py +185 -0
  204. helm/benchmark/metrics/ranking_metrics.py +5 -5
  205. helm/benchmark/metrics/reference_metric.py +148 -0
  206. helm/benchmark/metrics/reka_vibe_critique_metrics.py +158 -0
  207. helm/benchmark/metrics/ruler_qa_metrics.py +34 -0
  208. helm/benchmark/metrics/safety_metrics.py +91 -0
  209. helm/benchmark/metrics/seahelm_metrics.py +201 -0
  210. helm/benchmark/metrics/seahelm_metrics_specs.py +10 -0
  211. helm/benchmark/metrics/spider_metrics.py +7 -0
  212. helm/benchmark/metrics/statistic.py +1 -1
  213. helm/benchmark/metrics/summac/model_summac.py +8 -11
  214. helm/benchmark/metrics/summarization_critique_metrics.py +4 -4
  215. helm/benchmark/metrics/summarization_metrics.py +150 -11
  216. helm/benchmark/metrics/test_bias_metrics.py +5 -1
  217. helm/benchmark/metrics/test_classification_metrics.py +145 -70
  218. helm/benchmark/metrics/test_disinformation_metrics.py +78 -0
  219. helm/benchmark/metrics/{test_basic_metrics.py → test_evaluate_reference_metrics.py} +20 -1
  220. helm/benchmark/metrics/test_metric.py +3 -3
  221. helm/benchmark/metrics/test_statistic.py +2 -2
  222. helm/benchmark/metrics/tokens/ai21_token_cost_estimator.py +1 -1
  223. helm/benchmark/metrics/tokens/auto_token_cost_estimator.py +6 -6
  224. helm/benchmark/metrics/tokens/cohere_token_cost_estimator.py +1 -1
  225. helm/benchmark/metrics/tokens/free_token_cost_estimator.py +1 -1
  226. helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +11 -3
  227. helm/benchmark/metrics/tokens/openai_token_cost_estimator.py +1 -1
  228. helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +3 -3
  229. helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +7 -7
  230. helm/benchmark/metrics/toxicity_metrics.py +37 -7
  231. helm/benchmark/metrics/toxicity_utils.py +23 -0
  232. helm/benchmark/metrics/ultra_suite_asr_classification_metrics.py +52 -0
  233. helm/benchmark/metrics/unitxt_metrics.py +107 -0
  234. helm/benchmark/metrics/vision_language/__init__.py +0 -0
  235. helm/benchmark/metrics/vision_language/emd_utils.py +347 -0
  236. helm/benchmark/metrics/vision_language/image_metrics.py +537 -0
  237. helm/benchmark/metrics/vision_language/image_utils.py +100 -0
  238. helm/benchmark/metrics/wildbench_metrics.py +54 -0
  239. helm/benchmark/model_deployment_registry.py +69 -5
  240. helm/benchmark/model_metadata_registry.py +58 -2
  241. helm/benchmark/multi_gpu_runner.py +133 -0
  242. helm/benchmark/presentation/contamination.py +3 -3
  243. helm/benchmark/presentation/create_plots.py +51 -20
  244. helm/benchmark/presentation/run_display.py +51 -12
  245. helm/benchmark/presentation/run_entry.py +2 -2
  246. helm/benchmark/presentation/schema.py +83 -66
  247. helm/benchmark/presentation/summarize.py +483 -388
  248. helm/benchmark/presentation/table.py +8 -8
  249. helm/benchmark/presentation/taxonomy_info.py +20 -0
  250. helm/benchmark/presentation/test_contamination.py +2 -2
  251. helm/benchmark/presentation/test_create_plots.py +4 -1
  252. helm/benchmark/presentation/test_run_entry.py +2 -2
  253. helm/benchmark/presentation/test_schema.py +11 -0
  254. helm/benchmark/presentation/test_summarize.py +148 -6
  255. helm/benchmark/presentation/torr_robustness_summarizer.py +178 -0
  256. helm/benchmark/reeval_run.py +202 -0
  257. helm/benchmark/reeval_runner.py +355 -0
  258. helm/benchmark/run.py +151 -87
  259. helm/benchmark/run_expander.py +418 -33
  260. helm/benchmark/run_spec.py +93 -0
  261. helm/benchmark/run_spec_factory.py +180 -0
  262. helm/benchmark/run_specs/__init__.py +0 -0
  263. helm/benchmark/run_specs/air_bench_run_specs.py +58 -0
  264. helm/benchmark/run_specs/arabic_run_specs.py +197 -0
  265. helm/benchmark/run_specs/audio_run_specs.py +657 -0
  266. helm/benchmark/run_specs/bluex_run_specs.py +40 -0
  267. helm/benchmark/run_specs/call_center_run_specs.py +201 -0
  268. helm/benchmark/run_specs/capabilities_run_specs.py +308 -0
  269. helm/benchmark/run_specs/classic_run_specs.py +1393 -0
  270. helm/benchmark/run_specs/cleva_run_specs.py +277 -0
  271. helm/benchmark/run_specs/codeinsights_run_specs.py +192 -0
  272. helm/benchmark/run_specs/decodingtrust_run_specs.py +316 -0
  273. helm/benchmark/run_specs/enem_challenge_specs.py +31 -0
  274. helm/benchmark/run_specs/enterprise_run_specs.py +280 -0
  275. helm/benchmark/run_specs/experimental_run_specs.py +224 -0
  276. helm/benchmark/run_specs/finance_run_specs.py +114 -0
  277. helm/benchmark/run_specs/healthqa_br_run_specs.py +40 -0
  278. helm/benchmark/run_specs/heim_run_specs.py +625 -0
  279. helm/benchmark/run_specs/imdb_ptbr_run_specs.py +30 -0
  280. helm/benchmark/run_specs/instruction_following_run_specs.py +129 -0
  281. helm/benchmark/run_specs/lite_run_specs.py +307 -0
  282. helm/benchmark/run_specs/lmkt_run_specs.py +144 -0
  283. helm/benchmark/run_specs/long_context_run_specs.py +188 -0
  284. helm/benchmark/run_specs/medhelm/__init__.py +0 -0
  285. helm/benchmark/run_specs/medhelm/benchmark_config.py +219 -0
  286. helm/benchmark/run_specs/medhelm_run_specs.py +1570 -0
  287. helm/benchmark/run_specs/melt_run_specs.py +783 -0
  288. helm/benchmark/run_specs/mmlu_clinical_afr_run_specs.py +49 -0
  289. helm/benchmark/run_specs/multilingual_run_specs.py +50 -0
  290. helm/benchmark/run_specs/oab_exams_specs.py +32 -0
  291. helm/benchmark/run_specs/safety_run_specs.py +191 -0
  292. helm/benchmark/run_specs/seahelm_run_specs.py +652 -0
  293. helm/benchmark/run_specs/simple_run_specs.py +104 -0
  294. helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +167 -0
  295. helm/benchmark/run_specs/sql_run_specs.py +54 -0
  296. helm/benchmark/run_specs/tweetsentbr_run_specs.py +32 -0
  297. helm/benchmark/run_specs/unitxt_run_specs.py +51 -0
  298. helm/benchmark/run_specs/vlm_run_specs.py +1057 -0
  299. helm/benchmark/run_specs/winogrande_afr_run_specs.py +47 -0
  300. helm/benchmark/runner.py +63 -62
  301. helm/benchmark/runner_config_registry.py +21 -0
  302. helm/benchmark/scenarios/aci_bench_scenario.py +149 -0
  303. helm/benchmark/scenarios/air_bench_scenario.py +76 -0
  304. helm/benchmark/scenarios/alghafa_scenario.py +126 -0
  305. helm/benchmark/scenarios/alrage_scenario.py +54 -0
  306. helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +27 -3
  307. helm/benchmark/scenarios/anthropic_red_team_scenario.py +82 -0
  308. helm/benchmark/scenarios/arabic_exams_scenario.py +114 -0
  309. helm/benchmark/scenarios/arabic_mmlu_scenario.py +82 -0
  310. helm/benchmark/scenarios/aratrust_scenario.py +95 -0
  311. helm/benchmark/scenarios/audio_language/__init__.py +0 -0
  312. helm/benchmark/scenarios/audio_language/air_bench_chat_scenario.py +130 -0
  313. helm/benchmark/scenarios/audio_language/air_bench_foundation_scenario.py +154 -0
  314. helm/benchmark/scenarios/audio_language/ami_scenario.py +96 -0
  315. helm/benchmark/scenarios/audio_language/audio_mnist_scenario.py +62 -0
  316. helm/benchmark/scenarios/audio_language/audio_pairs_scenario.py +62 -0
  317. helm/benchmark/scenarios/audio_language/audiocaps_scenario.py +59 -0
  318. helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +152 -0
  319. helm/benchmark/scenarios/audio_language/common_voice_15_scenario.py +99 -0
  320. helm/benchmark/scenarios/audio_language/corebench_scenario.py +77 -0
  321. helm/benchmark/scenarios/audio_language/covost2_scenario.py +163 -0
  322. helm/benchmark/scenarios/audio_language/fleurs_fairness_scenario.py +83 -0
  323. helm/benchmark/scenarios/audio_language/fleurs_scenario.py +312 -0
  324. helm/benchmark/scenarios/audio_language/iemocap_audio_scenario.py +83 -0
  325. helm/benchmark/scenarios/audio_language/librispeech_fairness_scenario.py +96 -0
  326. helm/benchmark/scenarios/audio_language/librispeech_scenario.py +80 -0
  327. helm/benchmark/scenarios/audio_language/meld_audio_scenario.py +113 -0
  328. helm/benchmark/scenarios/audio_language/multilingual_librispeech_scenario.py +80 -0
  329. helm/benchmark/scenarios/audio_language/mustard_scenario.py +142 -0
  330. helm/benchmark/scenarios/audio_language/mutox_scenario.py +254 -0
  331. helm/benchmark/scenarios/audio_language/parade_scenario.py +97 -0
  332. helm/benchmark/scenarios/audio_language/speech_robust_bench_scenario.py +124 -0
  333. helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification_scenario.py +74 -0
  334. helm/benchmark/scenarios/audio_language/ultra_suite_asr_transcription_scenario.py +70 -0
  335. helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +79 -0
  336. helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +78 -0
  337. helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +78 -0
  338. helm/benchmark/scenarios/audio_language/vocal_sound_scenario.py +83 -0
  339. helm/benchmark/scenarios/audio_language/voice_jailbreak_attacks_scenario.py +87 -0
  340. helm/benchmark/scenarios/audio_language/voxceleb2_scenario.py +105 -0
  341. helm/benchmark/scenarios/autobencher_capabilities_scenario.py +68 -0
  342. helm/benchmark/scenarios/autobencher_safety_scenario.py +51 -0
  343. helm/benchmark/scenarios/babi_qa_scenario.py +16 -1
  344. helm/benchmark/scenarios/banking77_scenario.py +77 -0
  345. helm/benchmark/scenarios/bbq_scenario.py +17 -2
  346. helm/benchmark/scenarios/best_chatgpt_prompts.yaml +473 -0
  347. helm/benchmark/scenarios/big_bench_scenario.py +11 -1
  348. helm/benchmark/scenarios/bigcodebench_scenario.py +58 -0
  349. helm/benchmark/scenarios/bird_sql_scenario.py +112 -0
  350. helm/benchmark/scenarios/bird_sql_scenario_helper.py +118 -0
  351. helm/benchmark/scenarios/blimp_scenario.py +1 -1
  352. helm/benchmark/scenarios/bluex_scenario.py +70 -0
  353. helm/benchmark/scenarios/bold_scenario.py +18 -3
  354. helm/benchmark/scenarios/boolq_scenario.py +21 -1
  355. helm/benchmark/scenarios/call_center_scenario.py +84 -0
  356. helm/benchmark/scenarios/casehold_scenario.py +79 -0
  357. helm/benchmark/scenarios/chw_care_plan_scenario.py +129 -0
  358. helm/benchmark/scenarios/ci_mcqa_scenario.py +80 -0
  359. helm/benchmark/scenarios/civil_comments_scenario.py +14 -1
  360. helm/benchmark/scenarios/clear_scenario.py +180 -0
  361. helm/benchmark/scenarios/cleva_scenario.py +482 -3
  362. helm/benchmark/scenarios/code_scenario.py +46 -4
  363. helm/benchmark/scenarios/codeinsights_code_efficiency_scenario.py +197 -0
  364. helm/benchmark/scenarios/codeinsights_correct_code_scenario.py +78 -0
  365. helm/benchmark/scenarios/codeinsights_edge_case_scenario.py +192 -0
  366. helm/benchmark/scenarios/codeinsights_student_coding_scenario.py +162 -0
  367. helm/benchmark/scenarios/codeinsights_student_mistake_scenario.py +188 -0
  368. helm/benchmark/scenarios/commonsense_scenario.py +33 -1
  369. helm/benchmark/scenarios/compositional_instructions.yaml +70 -0
  370. helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +118 -0
  371. helm/benchmark/scenarios/copyright_scenario.py +35 -1
  372. helm/benchmark/scenarios/covid_dialog_scenario.py +10 -1
  373. helm/benchmark/scenarios/cti_to_mitre_scenario.py +261 -0
  374. helm/benchmark/scenarios/custom_mcqa_scenario.py +1 -1
  375. helm/benchmark/scenarios/czech_bank_qa_scenario.py +148 -0
  376. helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +190 -0
  377. helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +143 -0
  378. helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +98 -0
  379. helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +344 -0
  380. helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +217 -0
  381. helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +571 -0
  382. helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +80 -0
  383. helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +90 -0
  384. helm/benchmark/scenarios/dialogue_scenarios.py +13 -3
  385. helm/benchmark/scenarios/dischargeme_scenario.py +196 -0
  386. helm/benchmark/scenarios/disinformation_scenario.py +32 -1
  387. helm/benchmark/scenarios/dyck_language_scenario.py +25 -1
  388. helm/benchmark/scenarios/echr_judgment_classification_scenario.py +113 -0
  389. helm/benchmark/scenarios/ehr_sql_scenario.py +137 -0
  390. helm/benchmark/scenarios/ehrshot_scenario.py +1541 -0
  391. helm/benchmark/scenarios/enem_challenge_scenario.py +77 -0
  392. helm/benchmark/scenarios/entity_data_imputation_scenario.py +33 -3
  393. helm/benchmark/scenarios/entity_matching_scenario.py +26 -2
  394. helm/benchmark/scenarios/ewok_scenario.py +116 -0
  395. helm/benchmark/scenarios/exams_multilingual_scenario.py +115 -0
  396. helm/benchmark/scenarios/fin_qa_scenario.py +139 -0
  397. helm/benchmark/scenarios/financebench_scenario.py +74 -0
  398. helm/benchmark/scenarios/financial_phrasebank_scenario.py +115 -0
  399. helm/benchmark/scenarios/gold_commodity_news_scenario.py +145 -0
  400. helm/benchmark/scenarios/gpqa_scenario.py +98 -0
  401. helm/benchmark/scenarios/grammar.py +2 -2
  402. helm/benchmark/scenarios/grammar_scenario.py +21 -2
  403. helm/benchmark/scenarios/gsm_scenario.py +31 -1
  404. helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +61 -0
  405. helm/benchmark/scenarios/harm_bench_scenario.py +70 -0
  406. helm/benchmark/scenarios/headqa_scenario.py +158 -0
  407. helm/benchmark/scenarios/healthqa_br_scenario.py +80 -0
  408. helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +50 -0
  409. helm/benchmark/scenarios/ice_scenario.py +28 -4
  410. helm/benchmark/scenarios/ifeval_scenario.py +71 -0
  411. helm/benchmark/scenarios/image_generation/__init__.py +0 -0
  412. helm/benchmark/scenarios/image_generation/common_syntactic_processes_scenario.py +105 -0
  413. helm/benchmark/scenarios/image_generation/cub200_scenario.py +95 -0
  414. helm/benchmark/scenarios/image_generation/daily_dalle_scenario.py +124 -0
  415. helm/benchmark/scenarios/image_generation/demographic_stereotypes_scenario.py +82 -0
  416. helm/benchmark/scenarios/image_generation/detection_scenario.py +83 -0
  417. helm/benchmark/scenarios/image_generation/draw_bench_scenario.py +74 -0
  418. helm/benchmark/scenarios/image_generation/i2p_scenario.py +57 -0
  419. helm/benchmark/scenarios/image_generation/landing_page_scenario.py +46 -0
  420. helm/benchmark/scenarios/image_generation/logos_scenario.py +223 -0
  421. helm/benchmark/scenarios/image_generation/magazine_cover_scenario.py +91 -0
  422. helm/benchmark/scenarios/image_generation/mental_disorders_scenario.py +46 -0
  423. helm/benchmark/scenarios/image_generation/mscoco_scenario.py +91 -0
  424. helm/benchmark/scenarios/image_generation/paint_skills_scenario.py +72 -0
  425. helm/benchmark/scenarios/image_generation/parti_prompts_scenario.py +94 -0
  426. helm/benchmark/scenarios/image_generation/radiology_scenario.py +42 -0
  427. helm/benchmark/scenarios/image_generation/relational_understanding_scenario.py +52 -0
  428. helm/benchmark/scenarios/image_generation/time_most_significant_historical_figures_scenario.py +124 -0
  429. helm/benchmark/scenarios/image_generation/winoground_scenario.py +62 -0
  430. helm/benchmark/scenarios/imdb_ptbr_scenario.py +60 -0
  431. helm/benchmark/scenarios/imdb_scenario.py +26 -3
  432. helm/benchmark/scenarios/infinite_bench_en_mc_scenario.py +111 -0
  433. helm/benchmark/scenarios/infinite_bench_en_qa_scenario.py +85 -0
  434. helm/benchmark/scenarios/infinite_bench_en_sum_scenario.py +98 -0
  435. helm/benchmark/scenarios/interactive_qa_mmlu_scenario.py +2 -2
  436. helm/benchmark/scenarios/koala_scenario.py +21 -1
  437. helm/benchmark/scenarios/kpi_edgar_scenario.py +172 -0
  438. helm/benchmark/scenarios/legal_contract_summarization_scenario.py +149 -0
  439. helm/benchmark/scenarios/legal_opinion_sentiment_classification_scenario.py +77 -0
  440. helm/benchmark/scenarios/legal_summarization_scenario.py +61 -1
  441. helm/benchmark/scenarios/legal_support_scenario.py +24 -1
  442. helm/benchmark/scenarios/legalbench_scenario.py +45 -3
  443. helm/benchmark/scenarios/lex_glue_scenario.py +23 -2
  444. helm/benchmark/scenarios/lextreme_scenario.py +22 -1
  445. helm/benchmark/scenarios/live_qa_scenario.py +94 -0
  446. helm/benchmark/scenarios/lm_entry_scenario.py +185 -0
  447. helm/benchmark/scenarios/lmkt_scenarios.py +288 -0
  448. helm/benchmark/scenarios/lsat_qa_scenario.py +15 -1
  449. helm/benchmark/scenarios/madinah_qa_scenario.py +73 -0
  450. helm/benchmark/scenarios/math_scenario.py +81 -22
  451. helm/benchmark/scenarios/mbzuai_human_translated_arabic_mmlu.py +68 -0
  452. helm/benchmark/scenarios/me_q_sum_scenario.py +10 -1
  453. helm/benchmark/scenarios/med_dialog_scenario.py +56 -22
  454. helm/benchmark/scenarios/med_mcqa_scenario.py +24 -1
  455. helm/benchmark/scenarios/med_paragraph_simplification_scenario.py +10 -1
  456. helm/benchmark/scenarios/med_qa_scenario.py +30 -1
  457. helm/benchmark/scenarios/medalign_scenario.py +117 -0
  458. helm/benchmark/scenarios/medalign_scenario_helper.py +326 -0
  459. helm/benchmark/scenarios/medbullets_scenario.py +167 -0
  460. helm/benchmark/scenarios/medcalc_bench_scenario.py +149 -0
  461. helm/benchmark/scenarios/medec_scenario.py +148 -0
  462. helm/benchmark/scenarios/medhallu_scenario.py +95 -0
  463. helm/benchmark/scenarios/medhelm/__init__.py +0 -0
  464. helm/benchmark/scenarios/medhelm/judges.yaml +14 -0
  465. helm/benchmark/scenarios/medhelm_configurable_scenario.py +101 -0
  466. helm/benchmark/scenarios/medi_qa_scenario.py +134 -0
  467. helm/benchmark/scenarios/medication_qa_scenario.py +96 -0
  468. helm/benchmark/scenarios/melt_ir_scenario.py +171 -0
  469. helm/benchmark/scenarios/melt_knowledge_scenario.py +246 -0
  470. helm/benchmark/scenarios/melt_lm_scenarios.py +252 -0
  471. helm/benchmark/scenarios/melt_scenarios.py +793 -0
  472. helm/benchmark/scenarios/melt_srn_scenario.py +342 -0
  473. helm/benchmark/scenarios/melt_synthetic_reasoning_scenario.py +222 -0
  474. helm/benchmark/scenarios/melt_translation_scenario.py +152 -0
  475. helm/benchmark/scenarios/mental_health_scenario.py +146 -0
  476. helm/benchmark/scenarios/mimic_bhc_scenario.py +127 -0
  477. helm/benchmark/scenarios/mimic_rrs_scenario.py +121 -0
  478. helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +99 -0
  479. helm/benchmark/scenarios/mmlu_clinical_afr_scenario.py +74 -0
  480. helm/benchmark/scenarios/mmlu_pro_scenario.py +113 -0
  481. helm/benchmark/scenarios/mmlu_scenario.py +32 -1
  482. helm/benchmark/scenarios/mmmlu_scenario.py +85 -0
  483. helm/benchmark/scenarios/msmarco_scenario.py +31 -1
  484. helm/benchmark/scenarios/mtsamples_procedures_scenario.py +166 -0
  485. helm/benchmark/scenarios/mtsamples_replicate_scenario.py +164 -0
  486. helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +297 -0
  487. helm/benchmark/scenarios/narrativeqa_scenario.py +20 -1
  488. helm/benchmark/scenarios/natural_qa_scenario.py +33 -1
  489. helm/benchmark/scenarios/newsqa_scenario.py +1 -1
  490. helm/benchmark/scenarios/oab_exams_scenario.py +57 -0
  491. helm/benchmark/scenarios/omni_math_scenario.py +71 -0
  492. helm/benchmark/scenarios/open_assistant_scenario.py +33 -2
  493. helm/benchmark/scenarios/openai_mrcr_scenario.py +94 -0
  494. helm/benchmark/scenarios/opinions_qa_scenario.py +1 -5
  495. helm/benchmark/scenarios/pubmed_qa_scenario.py +81 -43
  496. helm/benchmark/scenarios/quac_scenario.py +24 -1
  497. helm/benchmark/scenarios/race_based_med_scenario.py +175 -0
  498. helm/benchmark/scenarios/raft_scenario.py +33 -3
  499. helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +14 -1
  500. helm/benchmark/scenarios/ruler_qa_scenario_helper.py +171 -0
  501. helm/benchmark/scenarios/ruler_qa_scenarios.py +128 -0
  502. helm/benchmark/scenarios/scenario.py +44 -1
  503. helm/benchmark/scenarios/seahelm_scenario.py +2295 -0
  504. helm/benchmark/scenarios/self_instruct_scenario.py +29 -1
  505. helm/benchmark/scenarios/shc_bmt_scenario.py +97 -0
  506. helm/benchmark/scenarios/shc_cdi_scenario.py +95 -0
  507. helm/benchmark/scenarios/shc_conf_scenario.py +99 -0
  508. helm/benchmark/scenarios/shc_ent_scenario.py +98 -0
  509. helm/benchmark/scenarios/shc_gip_scenario.py +94 -0
  510. helm/benchmark/scenarios/shc_privacy_scenario.py +100 -0
  511. helm/benchmark/scenarios/shc_proxy_scenario.py +98 -0
  512. helm/benchmark/scenarios/shc_ptbm_scenario.py +104 -0
  513. helm/benchmark/scenarios/shc_sei_scenario.py +94 -0
  514. helm/benchmark/scenarios/shc_sequoia_scenario.py +98 -0
  515. helm/benchmark/scenarios/simple_safety_tests_scenario.py +44 -0
  516. helm/benchmark/scenarios/simple_scenarios.py +122 -1
  517. helm/benchmark/scenarios/situation_prompts.yaml +49 -0
  518. helm/benchmark/scenarios/spider_scenario.py +109 -0
  519. helm/benchmark/scenarios/starr_patient_instructions_scenario.py +119 -0
  520. helm/benchmark/scenarios/summarization_scenario.py +48 -1
  521. helm/benchmark/scenarios/sumosum_scenario.py +157 -0
  522. helm/benchmark/scenarios/synthetic_efficiency_scenario.py +22 -1
  523. helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +24 -1
  524. helm/benchmark/scenarios/synthetic_reasoning_scenario.py +11 -1
  525. helm/benchmark/scenarios/test_air_bench_scenario.py +27 -0
  526. helm/benchmark/scenarios/test_alghafa_scenario.py +29 -0
  527. helm/benchmark/scenarios/test_alrage_scenario.py +23 -0
  528. helm/benchmark/scenarios/test_arabic_exams_scenario.py +21 -0
  529. helm/benchmark/scenarios/test_aratrust_scenario.py +21 -0
  530. helm/benchmark/scenarios/test_bigcodebench_scenario.py +26 -0
  531. helm/benchmark/scenarios/test_bluex_scenario.py +59 -0
  532. helm/benchmark/scenarios/test_commonsense_scenario.py +21 -0
  533. helm/benchmark/scenarios/test_czech_bank_qa_scenario.py +18 -0
  534. helm/benchmark/scenarios/test_enem_challenge_scenario.py +53 -0
  535. helm/benchmark/scenarios/test_ewok_scenario.py +29 -0
  536. helm/benchmark/scenarios/test_exams_multilingual_scenario.py +29 -0
  537. helm/benchmark/scenarios/test_financebench_scenario.py +26 -0
  538. helm/benchmark/scenarios/test_gold_commodity_news_scenario.py +18 -0
  539. helm/benchmark/scenarios/test_gpqa_scenario.py +44 -0
  540. helm/benchmark/scenarios/test_gsm_scenario.py +31 -0
  541. helm/benchmark/scenarios/test_healtha_br_scenario.py +57 -0
  542. helm/benchmark/scenarios/test_ifeval_scenario.py +36 -0
  543. helm/benchmark/scenarios/test_imdb_ptbr_scenario.py +27 -0
  544. helm/benchmark/scenarios/test_infinite_bench_en_qa_scenario.py +18 -0
  545. helm/benchmark/scenarios/test_infinite_bench_en_sum_scenario.py +31 -0
  546. helm/benchmark/scenarios/test_legalbench_scenario.py +30 -0
  547. helm/benchmark/scenarios/test_math_scenario.py +4 -3
  548. helm/benchmark/scenarios/test_med_qa_scenario.py +30 -0
  549. helm/benchmark/scenarios/test_mmlu_clinical_afr_scenario.py +21 -0
  550. helm/benchmark/scenarios/test_mmlu_pro_scenario.py +53 -0
  551. helm/benchmark/scenarios/test_mmlu_scenario.py +33 -0
  552. helm/benchmark/scenarios/test_narrativeqa_scenario.py +73 -0
  553. helm/benchmark/scenarios/test_oab_exams_scenario.py +51 -0
  554. helm/benchmark/scenarios/test_omni_math_scenario.py +27 -0
  555. helm/benchmark/scenarios/test_scenario.py +6 -3
  556. helm/benchmark/scenarios/test_simple_scenarios.py +50 -0
  557. helm/benchmark/scenarios/test_tweetsentbr_scenario.py +24 -0
  558. helm/benchmark/scenarios/test_wildbench_scenario.py +15 -0
  559. helm/benchmark/scenarios/test_winogrande_afr_scenario.py +19 -0
  560. helm/benchmark/scenarios/thai_exam_scenario.py +239 -0
  561. helm/benchmark/scenarios/the_pile_scenario.py +13 -1
  562. helm/benchmark/scenarios/truthful_qa_scenario.py +26 -2
  563. helm/benchmark/scenarios/tweetsentbr_scenario.py +66 -0
  564. helm/benchmark/scenarios/twitter_aae_scenario.py +20 -1
  565. helm/benchmark/scenarios/unitxt_scenario.py +62 -0
  566. helm/benchmark/scenarios/verifiability_judgment_scenario.py +4 -2
  567. helm/benchmark/scenarios/vicuna_scenario.py +22 -2
  568. helm/benchmark/scenarios/vision_language/a_okvqa_scenario.py +83 -0
  569. helm/benchmark/scenarios/vision_language/bingo_scenario.py +103 -0
  570. helm/benchmark/scenarios/vision_language/blink_scenario.py +140 -0
  571. helm/benchmark/scenarios/vision_language/crossmodal_3600_scenario.py +135 -0
  572. helm/benchmark/scenarios/vision_language/exams_v_scenario.py +104 -0
  573. helm/benchmark/scenarios/vision_language/fair_face_scenario.py +136 -0
  574. helm/benchmark/scenarios/vision_language/flickr30k_scenario.py +74 -0
  575. helm/benchmark/scenarios/vision_language/gqa_scenario.py +91 -0
  576. helm/benchmark/scenarios/vision_language/hateful_memes_scenario.py +94 -0
  577. helm/benchmark/scenarios/vision_language/heim_human_eval_scenario.py +113 -0
  578. helm/benchmark/scenarios/vision_language/image2struct/__init__.py +0 -0
  579. helm/benchmark/scenarios/vision_language/image2struct/chart2csv_scenario.py +55 -0
  580. helm/benchmark/scenarios/vision_language/image2struct/image2struct_scenario.py +225 -0
  581. helm/benchmark/scenarios/vision_language/image2struct/latex_scenario.py +21 -0
  582. helm/benchmark/scenarios/vision_language/image2struct/musicsheet_scenario.py +16 -0
  583. helm/benchmark/scenarios/vision_language/image2struct/utils_latex.py +339 -0
  584. helm/benchmark/scenarios/vision_language/image2struct/webpage/__init__.py +0 -0
  585. helm/benchmark/scenarios/vision_language/image2struct/webpage/driver.py +84 -0
  586. helm/benchmark/scenarios/vision_language/image2struct/webpage/jekyll_server.py +182 -0
  587. helm/benchmark/scenarios/vision_language/image2struct/webpage/utils.py +31 -0
  588. helm/benchmark/scenarios/vision_language/image2struct/webpage_scenario.py +256 -0
  589. helm/benchmark/scenarios/vision_language/math_vista_scenario.py +117 -0
  590. helm/benchmark/scenarios/vision_language/mementos_scenario.py +124 -0
  591. helm/benchmark/scenarios/vision_language/mm_safety_bench_scenario.py +103 -0
  592. helm/benchmark/scenarios/vision_language/mm_star_scenario.py +95 -0
  593. helm/benchmark/scenarios/vision_language/mme_scenario.py +148 -0
  594. helm/benchmark/scenarios/vision_language/mmmu_scenario.py +187 -0
  595. helm/benchmark/scenarios/vision_language/mscoco_captioning_scenario.py +92 -0
  596. helm/benchmark/scenarios/vision_language/mscoco_categorization_scenario.py +117 -0
  597. helm/benchmark/scenarios/vision_language/msr_vtt_scenario.py +75 -0
  598. helm/benchmark/scenarios/vision_language/multipanelvqa_scenario.py +169 -0
  599. helm/benchmark/scenarios/vision_language/originality_scenario.py +35 -0
  600. helm/benchmark/scenarios/vision_language/pairs_scenario.py +247 -0
  601. helm/benchmark/scenarios/vision_language/pope_scenario.py +105 -0
  602. helm/benchmark/scenarios/vision_language/real_world_qa_scenario.py +57 -0
  603. helm/benchmark/scenarios/vision_language/seed_bench_scenario.py +131 -0
  604. helm/benchmark/scenarios/vision_language/unicorn_scenario.py +108 -0
  605. helm/benchmark/scenarios/vision_language/vibe_eval_scenario.py +98 -0
  606. helm/benchmark/scenarios/vision_language/viz_wiz_scenario.py +4 -5
  607. helm/benchmark/scenarios/vision_language/vqa_rad_scenario.py +88 -0
  608. helm/benchmark/scenarios/vision_language/vqa_scenario.py +8 -4
  609. helm/benchmark/scenarios/wikifact_scenario.py +31 -1
  610. helm/benchmark/scenarios/wikitext_103_scenario.py +1 -1
  611. helm/benchmark/scenarios/wildbench_scenario.py +101 -0
  612. helm/benchmark/scenarios/winogrande_afr_scenario.py +78 -0
  613. helm/benchmark/scenarios/wmt_14_scenario.py +33 -2
  614. helm/benchmark/scenarios/xstest_scenario.py +35 -0
  615. helm/benchmark/server.py +32 -2
  616. helm/benchmark/slurm_jobs.py +1 -2
  617. helm/benchmark/slurm_runner.py +78 -50
  618. helm/benchmark/static/schema_air_bench.yaml +3149 -0
  619. helm/benchmark/static/schema_arabic.yaml +271 -0
  620. helm/benchmark/static/schema_audio.yaml +763 -0
  621. helm/benchmark/static/schema_autobencher.yaml +150 -0
  622. helm/benchmark/static/schema_call_center.yaml +269 -0
  623. helm/benchmark/static/schema_capabilities.yaml +254 -0
  624. helm/benchmark/static/schema_classic.yaml +259 -1140
  625. helm/benchmark/static/schema_cleva.yaml +768 -0
  626. helm/benchmark/static/schema_czech_bank.yaml +148 -0
  627. helm/benchmark/static/schema_decodingtrust.yaml +444 -0
  628. helm/benchmark/static/schema_enem_challenge.yaml +146 -0
  629. helm/benchmark/static/schema_enterprise.yaml +319 -0
  630. helm/benchmark/static/schema_ewok.yaml +367 -0
  631. helm/benchmark/static/schema_finance.yaml +191 -0
  632. helm/benchmark/static/schema_heim.yaml +1389 -0
  633. helm/benchmark/static/schema_image2struct.yaml +588 -0
  634. helm/benchmark/static/schema_instruction_following.yaml +161 -0
  635. helm/benchmark/static/schema_legal.yaml +566 -0
  636. helm/benchmark/static/schema_lite.yaml +3 -286
  637. helm/benchmark/static/schema_long_context.yaml +282 -0
  638. helm/benchmark/static/schema_medhelm.yaml +1176 -0
  639. helm/benchmark/static/schema_melt.yaml +1257 -0
  640. helm/benchmark/static/schema_mmlu.yaml +1449 -0
  641. helm/benchmark/static/schema_mmlu_winogrande_afr.yaml +1045 -0
  642. helm/benchmark/static/schema_safety.yaml +283 -0
  643. helm/benchmark/static/schema_seahelm.yaml +723 -0
  644. helm/benchmark/static/schema_slp.yaml +219 -0
  645. helm/benchmark/static/schema_slphelm.yaml +162 -0
  646. helm/benchmark/static/schema_social_audio.yaml +224 -0
  647. helm/benchmark/static/schema_sql.yaml +171 -0
  648. helm/benchmark/static/schema_thai.yaml +244 -0
  649. helm/benchmark/static/schema_torr.yaml +474 -0
  650. helm/benchmark/static/schema_tweetsentbr.yaml +146 -0
  651. helm/benchmark/static/schema_unitxt.yaml +370 -0
  652. helm/benchmark/static/schema_vhelm.yaml +933 -0
  653. helm/benchmark/static/schema_vhelm_lite.yaml +109 -0
  654. helm/benchmark/static/schema_video.yaml +219 -0
  655. helm/benchmark/static_build/assets/air-overview-DpBbyagA.png +0 -0
  656. helm/benchmark/static_build/assets/audio-table-Dn5NMMeJ.png +0 -0
  657. helm/benchmark/static_build/assets/heim-logo-BJtQlEbV.png +0 -0
  658. helm/benchmark/static_build/assets/helm-safety-COfndXuS.png +0 -0
  659. helm/benchmark/static_build/assets/helmhero-D9TvmJsp.png +0 -0
  660. helm/benchmark/static_build/assets/index-oIeiQW2g.css +1 -0
  661. helm/benchmark/static_build/assets/index-qOFpOyHb.js +10 -0
  662. helm/benchmark/static_build/assets/medhelm-overview-CND0EIsy.png +0 -0
  663. helm/benchmark/static_build/assets/medhelm-v1-overview-Cu2tphBB.png +0 -0
  664. helm/benchmark/static_build/assets/overview-BwypNWnk.png +0 -0
  665. helm/benchmark/static_build/assets/process-flow-DWDJC733.png +0 -0
  666. helm/benchmark/static_build/assets/react-BteFIppM.js +85 -0
  667. helm/benchmark/static_build/assets/recharts-DxuQtTOs.js +97 -0
  668. helm/benchmark/static_build/assets/tremor-DR4fE7ko.js +10 -0
  669. helm/benchmark/static_build/assets/vhelm-aspects-NiDQofvP.png +0 -0
  670. helm/benchmark/static_build/assets/vhelm-framework-NxJE4fdA.png +0 -0
  671. helm/benchmark/static_build/assets/vhelm-model-ypCL5Yvq.png +0 -0
  672. helm/benchmark/static_build/config.js +4 -0
  673. helm/benchmark/static_build/index.html +19 -0
  674. helm/benchmark/test_data_preprocessor.py +3 -3
  675. helm/benchmark/test_run_expander.py +1 -1
  676. helm/benchmark/window_services/default_window_service.py +3 -45
  677. helm/benchmark/window_services/encoder_decoder_window_service.py +4 -15
  678. helm/benchmark/window_services/ice_window_service.py +1 -35
  679. helm/benchmark/window_services/image_generation/__init__.py +0 -0
  680. helm/benchmark/window_services/image_generation/clip_window_service.py +13 -0
  681. helm/benchmark/window_services/image_generation/lexica_search_window_service.py +9 -0
  682. helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +9 -0
  683. helm/benchmark/window_services/image_generation/test_clip_window_service.py +29 -0
  684. helm/benchmark/window_services/image_generation/test_openai_dalle_window_service.py +30 -0
  685. helm/benchmark/window_services/local_window_service.py +22 -5
  686. helm/benchmark/window_services/test_anthropic_window_service.py +5 -4
  687. helm/benchmark/window_services/test_bloom_window_service.py +5 -4
  688. helm/benchmark/window_services/test_flan_t5_window_service.py +2 -1
  689. helm/benchmark/window_services/test_gpt2_window_service.py +9 -4
  690. helm/benchmark/window_services/test_gpt4_window_service.py +10 -4
  691. helm/benchmark/window_services/test_gptj_window_service.py +11 -5
  692. helm/benchmark/window_services/test_gptneox_window_service.py +6 -5
  693. helm/benchmark/window_services/test_openai_window_service.py +18 -12
  694. helm/benchmark/window_services/test_opt_window_service.py +6 -5
  695. helm/benchmark/window_services/test_palmyra_window_service.py +5 -4
  696. helm/benchmark/window_services/test_t0pp_window_service.py +5 -4
  697. helm/benchmark/window_services/test_t511b_window_service.py +5 -4
  698. helm/benchmark/window_services/test_ul2_window_service.py +5 -4
  699. helm/benchmark/window_services/test_utils.py +6 -6
  700. helm/benchmark/window_services/test_yalm_window_service.py +5 -4
  701. helm/benchmark/window_services/tokenizer_service.py +7 -13
  702. helm/benchmark/window_services/window_service.py +42 -0
  703. helm/benchmark/window_services/window_service_factory.py +4 -1
  704. helm/benchmark/window_services/yalm_window_service.py +1 -28
  705. helm/clients/__init__.py +0 -0
  706. helm/{proxy/clients → clients}/ai21_client.py +78 -12
  707. helm/clients/aleph_alpha_client.py +114 -0
  708. helm/{proxy/clients → clients}/anthropic_client.py +304 -21
  709. helm/clients/audio_language/__init__.py +0 -0
  710. helm/clients/audio_language/diva_llama_client.py +122 -0
  711. helm/clients/audio_language/llama_omni/arguments.py +61 -0
  712. helm/clients/audio_language/llama_omni/constants.py +9 -0
  713. helm/clients/audio_language/llama_omni/conversation.py +213 -0
  714. helm/clients/audio_language/llama_omni/model/__init__.py +0 -0
  715. helm/clients/audio_language/llama_omni/model/builder.py +88 -0
  716. helm/clients/audio_language/llama_omni/model/language_model/omni_speech2s_llama.py +190 -0
  717. helm/clients/audio_language/llama_omni/model/language_model/omni_speech_llama.py +118 -0
  718. helm/clients/audio_language/llama_omni/model/omni_speech_arch.py +249 -0
  719. helm/clients/audio_language/llama_omni/model/speech_encoder/builder.py +9 -0
  720. helm/clients/audio_language/llama_omni/model/speech_encoder/speech_encoder.py +27 -0
  721. helm/clients/audio_language/llama_omni/model/speech_generator/builder.py +9 -0
  722. helm/clients/audio_language/llama_omni/model/speech_generator/generation.py +622 -0
  723. helm/clients/audio_language/llama_omni/model/speech_generator/speech_generator.py +104 -0
  724. helm/clients/audio_language/llama_omni/model/speech_projector/builder.py +9 -0
  725. helm/clients/audio_language/llama_omni/model/speech_projector/speech_projector.py +27 -0
  726. helm/clients/audio_language/llama_omni/preprocess.py +295 -0
  727. helm/clients/audio_language/llama_omni/utils.py +202 -0
  728. helm/clients/audio_language/llama_omni_client.py +199 -0
  729. helm/clients/audio_language/qwen2_5_omni_client.py +210 -0
  730. helm/clients/audio_language/qwen2_audiolm_client.py +191 -0
  731. helm/clients/audio_language/qwen_audiolm_client.py +153 -0
  732. helm/clients/audio_language/qwen_omni/configuration_qwen2_5_omni.py +519 -0
  733. helm/clients/audio_language/qwen_omni/modeling_qwen2_5_omni.py +4308 -0
  734. helm/clients/audio_language/qwen_omni/processing_qwen2_5_omni.py +270 -0
  735. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/__init__.py +0 -0
  736. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/__init__.py +8 -0
  737. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/audio_process.py +56 -0
  738. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/vision_process.py +380 -0
  739. helm/clients/audio_language/test.py +62 -0
  740. helm/{proxy/clients → clients}/auto_client.py +72 -31
  741. helm/clients/azure_openai_client.py +55 -0
  742. helm/clients/bedrock_client.py +381 -0
  743. helm/clients/bedrock_utils.py +105 -0
  744. helm/{proxy/clients → clients}/client.py +92 -17
  745. helm/clients/clip_score_client.py +49 -0
  746. helm/clients/clip_scorers/__init__.py +0 -0
  747. helm/clients/clip_scorers/base_clip_scorer.py +18 -0
  748. helm/clients/clip_scorers/clip_scorer.py +50 -0
  749. helm/clients/clip_scorers/multilingual_clip_scorer.py +50 -0
  750. helm/{proxy/clients → clients}/cohere_client.py +105 -14
  751. helm/clients/dspy_client.py +135 -0
  752. helm/clients/gcs_client.py +82 -0
  753. helm/{proxy/clients → clients}/google_client.py +8 -6
  754. helm/clients/google_translate_client.py +35 -0
  755. helm/clients/grok_client.py +36 -0
  756. helm/{proxy/clients → clients}/http_model_client.py +8 -8
  757. helm/{proxy/clients → clients}/huggingface_client.py +157 -86
  758. helm/clients/huggingface_pipeline_client.py +138 -0
  759. helm/clients/ibm_client.py +269 -0
  760. helm/clients/image_generation/__init__.py +0 -0
  761. helm/clients/image_generation/adobe_vision_client.py +80 -0
  762. helm/clients/image_generation/aleph_alpha_image_generation_client.py +100 -0
  763. helm/clients/image_generation/cogview2/__init__.py +0 -0
  764. helm/clients/image_generation/cogview2/coglm_strategy.py +96 -0
  765. helm/clients/image_generation/cogview2/coglm_utils.py +82 -0
  766. helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +15 -0
  767. helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +99 -0
  768. helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +254 -0
  769. helm/clients/image_generation/cogview2/sr_pipeline/dsr_sampling.py +190 -0
  770. helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +144 -0
  771. helm/clients/image_generation/cogview2/sr_pipeline/itersr_model.py +269 -0
  772. helm/clients/image_generation/cogview2/sr_pipeline/itersr_sampling.py +120 -0
  773. helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +42 -0
  774. helm/clients/image_generation/cogview2_client.py +192 -0
  775. helm/clients/image_generation/dalle2_client.py +194 -0
  776. helm/clients/image_generation/dalle3_client.py +108 -0
  777. helm/clients/image_generation/dalle_mini/__init__.py +3 -0
  778. helm/clients/image_generation/dalle_mini/data.py +442 -0
  779. helm/clients/image_generation/dalle_mini/model/__init__.py +5 -0
  780. helm/clients/image_generation/dalle_mini/model/configuration.py +175 -0
  781. helm/clients/image_generation/dalle_mini/model/modeling.py +1834 -0
  782. helm/clients/image_generation/dalle_mini/model/partitions.py +84 -0
  783. helm/clients/image_generation/dalle_mini/model/processor.py +63 -0
  784. helm/clients/image_generation/dalle_mini/model/text.py +251 -0
  785. helm/clients/image_generation/dalle_mini/model/tokenizer.py +9 -0
  786. helm/clients/image_generation/dalle_mini/model/utils.py +29 -0
  787. helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -0
  788. helm/clients/image_generation/dalle_mini/vqgan_jax/configuration_vqgan.py +40 -0
  789. helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +107 -0
  790. helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +610 -0
  791. helm/clients/image_generation/dalle_mini_client.py +191 -0
  792. helm/clients/image_generation/deep_floyd_client.py +80 -0
  793. helm/clients/image_generation/huggingface_diffusers_client.py +250 -0
  794. helm/clients/image_generation/image_generation_client_utils.py +9 -0
  795. helm/clients/image_generation/lexica_client.py +88 -0
  796. helm/clients/image_generation/mindalle/__init__.py +0 -0
  797. helm/clients/image_generation/mindalle/models/__init__.py +216 -0
  798. helm/clients/image_generation/mindalle/models/stage1/__init__.py +0 -0
  799. helm/clients/image_generation/mindalle/models/stage1/layers.py +312 -0
  800. helm/clients/image_generation/mindalle/models/stage1/vqgan.py +103 -0
  801. helm/clients/image_generation/mindalle/models/stage2/__init__.py +0 -0
  802. helm/clients/image_generation/mindalle/models/stage2/layers.py +144 -0
  803. helm/clients/image_generation/mindalle/models/stage2/transformer.py +268 -0
  804. helm/clients/image_generation/mindalle/models/tokenizer.py +30 -0
  805. helm/clients/image_generation/mindalle/utils/__init__.py +3 -0
  806. helm/clients/image_generation/mindalle/utils/config.py +129 -0
  807. helm/clients/image_generation/mindalle/utils/sampling.py +149 -0
  808. helm/clients/image_generation/mindalle/utils/utils.py +89 -0
  809. helm/clients/image_generation/mindalle_client.py +116 -0
  810. helm/clients/image_generation/nudity_check_client.py +64 -0
  811. helm/clients/image_generation/together_image_generation_client.py +113 -0
  812. helm/{proxy/clients → clients}/lit_gpt_client.py +6 -6
  813. helm/{proxy/clients → clients}/megatron_client.py +7 -5
  814. helm/clients/mistral_client.py +180 -0
  815. helm/clients/moderation_api_client.py +111 -0
  816. helm/clients/nvidia_nim_client.py +32 -0
  817. helm/clients/open_lm_client.py +43 -0
  818. helm/clients/openai_client.py +604 -0
  819. helm/clients/openai_responses_client.py +200 -0
  820. helm/clients/openrouter_client.py +31 -0
  821. helm/{proxy/clients → clients}/palmyra_client.py +31 -14
  822. helm/{proxy/clients → clients}/perspective_api_client.py +18 -14
  823. helm/clients/reka_client.py +190 -0
  824. helm/clients/simple_client.py +64 -0
  825. helm/clients/stanfordhealthcare_azure_openai_client.py +58 -0
  826. helm/clients/stanfordhealthcare_claude_client.py +31 -0
  827. helm/clients/stanfordhealthcare_google_client.py +43 -0
  828. helm/clients/stanfordhealthcare_http_model_client.py +95 -0
  829. helm/clients/stanfordhealthcare_openai_client.py +62 -0
  830. helm/clients/stanfordhealthcare_shc_openai_client.py +42 -0
  831. helm/{proxy/clients → clients}/test_auto_client.py +13 -15
  832. helm/clients/test_client.py +98 -0
  833. helm/{proxy/clients → clients}/test_huggingface_client.py +31 -16
  834. helm/clients/test_openrouter_client.py +69 -0
  835. helm/clients/test_simple_client.py +19 -0
  836. helm/clients/test_together_client.py +184 -0
  837. helm/clients/together_client.py +599 -0
  838. helm/clients/upstage_client.py +23 -0
  839. helm/clients/vertexai_client.py +488 -0
  840. helm/clients/vision_language/__init__.py +0 -0
  841. helm/clients/vision_language/huggingface_vision2seq_client.py +148 -0
  842. helm/clients/vision_language/huggingface_vlm_client.py +114 -0
  843. helm/{proxy/clients → clients}/vision_language/idefics_client.py +61 -51
  844. helm/clients/vision_language/open_flamingo/__init__.py +2 -0
  845. helm/clients/vision_language/open_flamingo/src/__init__.py +0 -0
  846. helm/clients/vision_language/open_flamingo/src/factory.py +147 -0
  847. helm/clients/vision_language/open_flamingo/src/flamingo.py +337 -0
  848. helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +155 -0
  849. helm/clients/vision_language/open_flamingo/src/helpers.py +267 -0
  850. helm/clients/vision_language/open_flamingo/src/utils.py +47 -0
  851. helm/clients/vision_language/open_flamingo_client.py +155 -0
  852. helm/clients/vision_language/paligemma_client.py +147 -0
  853. helm/clients/vision_language/palmyra_vision_client.py +101 -0
  854. helm/clients/vision_language/qwen2_vlm_client.py +189 -0
  855. helm/clients/vision_language/qwen_vlm_client.py +174 -0
  856. helm/clients/vllm_client.py +80 -0
  857. helm/clients/vllm_granite_thinking_client.py +56 -0
  858. helm/clients/writer_client.py +105 -0
  859. helm/clients/yi_client.py +28 -0
  860. helm/common/audio_utils.py +111 -0
  861. helm/common/cache.py +23 -33
  862. helm/common/cache_backend_config.py +47 -0
  863. helm/common/clip_score_request.py +41 -0
  864. helm/common/context.py +80 -0
  865. helm/common/credentials_utils.py +5 -5
  866. helm/common/critique_request.py +10 -2
  867. helm/common/file_caches/__init__.py +0 -0
  868. helm/common/file_caches/file_cache.py +16 -0
  869. helm/common/file_caches/local_file_cache.py +61 -0
  870. helm/common/file_caches/test_local_file_cache.py +25 -0
  871. helm/common/file_upload_request.py +27 -0
  872. helm/common/general.py +10 -3
  873. helm/common/hierarchical_logger.py +124 -12
  874. helm/common/image_generation_parameters.py +25 -0
  875. helm/common/images_utils.py +60 -5
  876. helm/common/key_value_store.py +41 -10
  877. helm/common/local_context.py +140 -0
  878. helm/common/media_object.py +14 -1
  879. helm/common/moderations_api_request.py +71 -0
  880. helm/common/mongo_key_value_store.py +8 -7
  881. helm/common/multimodal_request_utils.py +57 -0
  882. helm/common/nudity_check_request.py +29 -0
  883. helm/common/object_spec.py +23 -8
  884. helm/common/optional_dependencies.py +1 -1
  885. helm/common/reeval_parameters.py +12 -0
  886. helm/common/remote_context.py +61 -0
  887. helm/common/request.py +45 -19
  888. helm/common/response_format.py +18 -0
  889. helm/common/test_cache.py +1 -48
  890. helm/common/test_general.py +10 -0
  891. helm/common/test_logging.py +94 -0
  892. helm/common/test_media_object.py +1 -1
  893. helm/common/tokenization_request.py +1 -10
  894. helm/config/model_deployments.yaml +4713 -1005
  895. helm/config/model_metadata.yaml +4045 -255
  896. helm/config/tokenizer_configs.yaml +1091 -50
  897. helm/proxy/accounts.py +31 -4
  898. helm/proxy/cli.py +6 -4
  899. helm/proxy/critique/mechanical_turk_critique_importer.py +3 -0
  900. helm/proxy/critique/mechanical_turk_utils.py +1 -1
  901. helm/proxy/critique/model_critique_client.py +40 -10
  902. helm/proxy/example_queries.py +33 -28
  903. helm/proxy/retry.py +5 -0
  904. helm/proxy/server.py +82 -18
  905. helm/proxy/services/remote_service.py +32 -7
  906. helm/proxy/services/server_service.py +71 -69
  907. helm/proxy/services/service.py +30 -6
  908. helm/proxy/services/test_remote_service.py +6 -5
  909. helm/proxy/services/test_service.py +1 -13
  910. helm/proxy/static/help.html +99 -0
  911. helm/proxy/static/index.css +61 -0
  912. helm/proxy/static/index.html +40 -0
  913. helm/proxy/static/index.js +462 -0
  914. helm/proxy/test_accounts.py +32 -0
  915. helm/proxy/test_retry.py +1 -1
  916. helm/proxy/token_counters/auto_token_counter.py +37 -37
  917. helm/proxy/token_counters/test_auto_token_counter.py +164 -0
  918. helm/proxy/token_counters/token_counter.py +3 -5
  919. helm/tokenizers/__init__.py +0 -0
  920. helm/tokenizers/ai21_tokenizer.py +52 -0
  921. helm/{proxy/tokenizers → tokenizers}/aleph_alpha_tokenizer.py +1 -1
  922. helm/{proxy/tokenizers → tokenizers}/auto_tokenizer.py +9 -12
  923. helm/{proxy/tokenizers → tokenizers}/caching_tokenizer.py +2 -30
  924. helm/tokenizers/cohere_tokenizer.py +50 -0
  925. helm/tokenizers/grok_tokenizer.py +55 -0
  926. helm/{proxy/tokenizers → tokenizers}/http_model_tokenizer.py +4 -4
  927. helm/{proxy/tokenizers → tokenizers}/huggingface_tokenizer.py +44 -41
  928. helm/{proxy/tokenizers → tokenizers}/lit_gpt_tokenizer.py +1 -1
  929. helm/tokenizers/simple_tokenizer.py +33 -0
  930. helm/tokenizers/test_ai21_tokenizer.py +48 -0
  931. helm/{proxy/tokenizers → tokenizers}/test_anthropic_tokenizer.py +6 -2
  932. helm/tokenizers/test_cohere_tokenizer.py +39 -0
  933. helm/tokenizers/test_grok_tokenizer.py +33 -0
  934. helm/{proxy/tokenizers → tokenizers}/test_huggingface_tokenizer.py +9 -2
  935. helm/tokenizers/test_simple_tokenizer.py +33 -0
  936. helm/{proxy/tokenizers → tokenizers}/test_yalm_tokenizer.py +1 -1
  937. helm/{proxy/tokenizers → tokenizers}/tiktoken_tokenizer.py +1 -1
  938. helm/{proxy/tokenizers → tokenizers}/tokenizer.py +3 -1
  939. helm/{proxy/tokenizers → tokenizers}/vertexai_tokenizer.py +1 -1
  940. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer.py +8 -6
  941. helm/tokenizers/yalm_tokenizer_data/__init__.py +0 -0
  942. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/test_yalm_tokenizer.py +1 -1
  943. helm/tokenizers/yalm_tokenizer_data/voc_100b.sp +0 -0
  944. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/yalm_tokenizer.py +1 -1
  945. crfm_helm-0.4.0.dist-info/METADATA +0 -264
  946. crfm_helm-0.4.0.dist-info/RECORD +0 -397
  947. helm/benchmark/data_overlap/data_overlap_spec.py +0 -86
  948. helm/benchmark/data_overlap/export_scenario_text.py +0 -119
  949. helm/benchmark/data_overlap/light_scenario.py +0 -60
  950. helm/benchmark/metrics/numeracy_metrics.py +0 -72
  951. helm/benchmark/metrics/test_numeracy_metrics.py +0 -95
  952. helm/benchmark/run_specs.py +0 -2762
  953. helm/benchmark/scenarios/numeracy_scenario.py +0 -784
  954. helm/benchmark/static/benchmarking.css +0 -156
  955. helm/benchmark/static/benchmarking.js +0 -1705
  956. helm/benchmark/static/config.js +0 -3
  957. helm/benchmark/static/images/helm-logo.png +0 -0
  958. helm/benchmark/static/images/language-model-helm.png +0 -0
  959. helm/benchmark/static/images/organizations/ai21.png +0 -0
  960. helm/benchmark/static/images/organizations/anthropic.png +0 -0
  961. helm/benchmark/static/images/organizations/bigscience.png +0 -0
  962. helm/benchmark/static/images/organizations/cohere.png +0 -0
  963. helm/benchmark/static/images/organizations/eleutherai.png +0 -0
  964. helm/benchmark/static/images/organizations/google.png +0 -0
  965. helm/benchmark/static/images/organizations/meta.png +0 -0
  966. helm/benchmark/static/images/organizations/microsoft.png +0 -0
  967. helm/benchmark/static/images/organizations/nvidia.png +0 -0
  968. helm/benchmark/static/images/organizations/openai.png +0 -0
  969. helm/benchmark/static/images/organizations/together.png +0 -0
  970. helm/benchmark/static/images/organizations/tsinghua-keg.png +0 -0
  971. helm/benchmark/static/images/organizations/yandex.png +0 -0
  972. helm/benchmark/static/images/scenarios-by-metrics.png +0 -0
  973. helm/benchmark/static/images/taxonomy-scenarios.png +0 -0
  974. helm/benchmark/static/index.html +0 -68
  975. helm/benchmark/static/json-urls.js +0 -69
  976. helm/benchmark/static/plot-captions.js +0 -27
  977. helm/benchmark/static/utils.js +0 -285
  978. helm/benchmark/test_model_deployment_definition.py +0 -92
  979. helm/benchmark/test_model_properties.py +0 -1570
  980. helm/benchmark/vlm_run_specs.py +0 -97
  981. helm/benchmark/window_services/ai21_window_service.py +0 -258
  982. helm/benchmark/window_services/cohere_window_service.py +0 -163
  983. helm/benchmark/window_services/flan_t5_window_service.py +0 -29
  984. helm/benchmark/window_services/gpt2_window_service.py +0 -32
  985. helm/benchmark/window_services/huggingface_window_service.py +0 -60
  986. helm/benchmark/window_services/t0pp_window_service.py +0 -35
  987. helm/benchmark/window_services/t511b_window_service.py +0 -30
  988. helm/benchmark/window_services/test_ai21_window_service.py +0 -163
  989. helm/benchmark/window_services/test_cohere_window_service.py +0 -74
  990. helm/benchmark/window_services/test_cohere_window_service_utils.py +0 -8328
  991. helm/benchmark/window_services/test_ice_window_service.py +0 -326
  992. helm/benchmark/window_services/test_mt_nlg_window_service.py +0 -48
  993. helm/benchmark/window_services/ul2_window_service.py +0 -30
  994. helm/benchmark/window_services/wider_ai21_window_service.py +0 -24
  995. helm/common/cache_utils.py +0 -14
  996. helm/proxy/clients/aleph_alpha_client.py +0 -95
  997. helm/proxy/clients/goose_ai_client.py +0 -99
  998. helm/proxy/clients/microsoft_client.py +0 -180
  999. helm/proxy/clients/openai_client.py +0 -206
  1000. helm/proxy/clients/simple_client.py +0 -60
  1001. helm/proxy/clients/test_client.py +0 -49
  1002. helm/proxy/clients/test_together_client.py +0 -97
  1003. helm/proxy/clients/together_client.py +0 -334
  1004. helm/proxy/clients/vertexai_client.py +0 -115
  1005. helm/proxy/token_counters/ai21_token_counter.py +0 -20
  1006. helm/proxy/token_counters/cohere_token_counter.py +0 -13
  1007. helm/proxy/token_counters/free_token_counter.py +0 -12
  1008. helm/proxy/token_counters/gooseai_token_counter.py +0 -24
  1009. helm/proxy/token_counters/openai_token_counter.py +0 -22
  1010. helm/proxy/token_counters/test_ai21_token_counter.py +0 -88
  1011. helm/proxy/token_counters/test_openai_token_counter.py +0 -81
  1012. helm/proxy/tokenizers/ai21_tokenizer.py +0 -60
  1013. helm/proxy/tokenizers/anthropic_tokenizer.py +0 -52
  1014. helm/proxy/tokenizers/cohere_tokenizer.py +0 -83
  1015. helm/proxy/tokenizers/ice_tokenizer.py +0 -30
  1016. helm/proxy/tokenizers/simple_tokenizer.py +0 -32
  1017. helm/proxy/tokenizers/test_ice_tokenizer.py +0 -57
  1018. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info}/entry_points.txt +0 -0
  1019. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info/licenses}/LICENSE +0 -0
  1020. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info}/top_level.txt +0 -0
  1021. /helm/benchmark/{data_overlap → annotation}/__init__.py +0 -0
  1022. /helm/{proxy/clients → benchmark/annotation/image2struct}/__init__.py +0 -0
  1023. /helm/{proxy/clients/vision_language → benchmark/metrics/ifeval}/__init__.py +0 -0
  1024. /helm/{proxy/tokenizers → benchmark/metrics/image_generation}/__init__.py +0 -0
  1025. /helm/{proxy/tokenizers/yalm_tokenizer_data → benchmark/metrics/image_generation/detectors}/__init__.py +0 -0
  1026. /helm/benchmark/{static/images/crfm-logo.png → static_build/assets/crfm-logo-Du4T1uWZ.png} +0 -0
  1027. /helm/benchmark/{static/images/helm-logo-simple.png → static_build/assets/helm-logo-simple-DzOhNN41.png} +0 -0
  1028. /helm/{proxy/clients → clients}/ai21_utils.py +0 -0
  1029. /helm/{proxy/clients → clients}/cohere_utils.py +0 -0
  1030. /helm/{proxy/clients → clients}/lit_gpt_generate.py +0 -0
  1031. /helm/{proxy/clients → clients}/toxicity_classifier_client.py +0 -0
  1032. /helm/{benchmark → proxy}/static/general.js +0 -0
  1033. /helm/{benchmark → proxy}/static/info-icon.png +0 -0
@@ -0,0 +1,1570 @@
1
+ """Run spec functions for the MedHELM leaderboard.
2
+
3
+ Website: https://crfm.stanford.edu/helm/medhelm/
4
+ """
5
+
6
+ import importlib.resources as pkg_resources
7
+
8
+ import os
9
+ from typing import Dict, Union, Optional
10
+
11
+ import yaml
12
+
13
+ from helm.benchmark.adaptation.adapter_spec import (
14
+ ADAPT_MULTIPLE_CHOICE_JOINT,
15
+ )
16
+ from helm.benchmark.adaptation.common_adapter_specs import (
17
+ get_generation_adapter_spec,
18
+ get_multiple_choice_adapter_spec,
19
+ )
20
+ from helm.benchmark.annotation.annotator import AnnotatorSpec
21
+ from helm.benchmark.annotation.model_as_judge import AnnotatorModelInfo
22
+ from helm.benchmark.metrics.common_metric_specs import (
23
+ get_basic_metric_specs,
24
+ get_exact_match_metric_specs,
25
+ get_open_ended_generation_metric_specs,
26
+ get_summarization_metric_specs,
27
+ get_generic_metric_specs,
28
+ )
29
+ from helm.benchmark.metrics.metric import MetricSpec
30
+ from helm.benchmark.run_spec import RunSpec, run_spec_function
31
+ from helm.benchmark.run_specs.medhelm.benchmark_config import get_benchmark_config_from_path
32
+ from helm.benchmark.scenarios.scenario import ScenarioSpec
33
+ from helm.common.gpu_utils import get_torch_device_name
34
+
35
+
36
+ def get_judges_config(jury_config_path: Optional[str]) -> dict:
37
+ package = "helm.benchmark.scenarios.medhelm"
38
+ default_config_path = str(pkg_resources.files(package).joinpath("judges.yaml"))
39
+
40
+ if jury_config_path is None:
41
+ # Use the default config bundled with the package
42
+ jury_config_path = default_config_path
43
+
44
+ assert os.path.exists(jury_config_path), (
45
+ f"Judges config file not found: {jury_config_path}. "
46
+ f"If you are providing a custom config, make sure it follows the format specified in "
47
+ f"the default file: {default_config_path}"
48
+ )
49
+
50
+ with open(jury_config_path, "r") as f:
51
+ config = yaml.safe_load(f)
52
+
53
+ return config
54
+
55
+
56
+ def get_annotator_models_from_config(jury_config_path: Optional[str]) -> Dict[str, AnnotatorModelInfo]:
57
+ config = get_judges_config(jury_config_path)
58
+ annotator_models = {
59
+ judge["name"]: AnnotatorModelInfo(
60
+ model_name=judge["model"],
61
+ model_deployment=judge["model_deployment"],
62
+ )
63
+ for judge in config["judges"]
64
+ }
65
+ return annotator_models
66
+
67
+
68
+ @run_spec_function("medhelm_configurable_benchmark")
69
+ def get_medhelm_configurable_benchmark_spec(config_path: str) -> RunSpec:
70
+ benchmark_config = get_benchmark_config_from_path(config_path)
71
+ scenario_spec = ScenarioSpec(
72
+ class_name="helm.benchmark.scenarios.medhelm_configurable_scenario.MedHELMConfigurableScenario",
73
+ args={"name": benchmark_config.name, "config_path": config_path},
74
+ )
75
+
76
+ adapter_spec = get_generation_adapter_spec(
77
+ max_tokens=benchmark_config.max_tokens,
78
+ max_train_instances=0,
79
+ stop_sequences=[],
80
+ )
81
+ annotator_specs = benchmark_config.get_annotator_specs()
82
+ metric_specs = benchmark_config.get_metric_specs()
83
+
84
+ return RunSpec(
85
+ name=benchmark_config.name,
86
+ scenario_spec=scenario_spec,
87
+ adapter_spec=adapter_spec,
88
+ annotators=annotator_specs,
89
+ metric_specs=metric_specs,
90
+ groups=[benchmark_config.name],
91
+ )
92
+
93
+
94
+ @run_spec_function("medcalc_bench")
95
+ def get_medcalc_bench_spec() -> RunSpec:
96
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.medcalc_bench_scenario.MedCalcBenchScenario")
97
+
98
+ adapter_spec = get_generation_adapter_spec(
99
+ instructions="Given a patient note and a clinical question, compute the requested medical value.",
100
+ input_noun=None,
101
+ newline_after_input_noun=False,
102
+ output_noun="Answer only the requested quantity without units. No explanation needed",
103
+ max_tokens=10,
104
+ max_train_instances=0,
105
+ stop_sequences=[],
106
+ )
107
+
108
+ metric_specs = [
109
+ MetricSpec(
110
+ class_name="helm.benchmark.metrics.medcalc_bench_metrics.MedCalcBenchMetric",
111
+ args={},
112
+ )
113
+ ] + get_exact_match_metric_specs()
114
+
115
+ return RunSpec(
116
+ name="medcalc_bench",
117
+ scenario_spec=scenario_spec,
118
+ adapter_spec=adapter_spec,
119
+ metric_specs=metric_specs,
120
+ groups=["medcalc_bench"],
121
+ )
122
+
123
+
124
+ @run_spec_function("clear")
125
+ def get_clear_spec(condition: str, data_path: str) -> RunSpec:
126
+ scenario_spec = ScenarioSpec(
127
+ class_name="helm.benchmark.scenarios.clear_scenario.CLEARScenario",
128
+ args={
129
+ "condition": condition,
130
+ "data_path": data_path,
131
+ },
132
+ )
133
+
134
+ condition_display = condition.replace("_", " ")
135
+
136
+ adapter_spec = get_multiple_choice_adapter_spec(
137
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
138
+ instructions=(
139
+ f"Answer 'A' for 'Has a history of {condition_display}', "
140
+ f"'B' for 'Does not have a history of {condition_display}', or "
141
+ f"'C' for 'Uncertain'"
142
+ ),
143
+ input_noun=None,
144
+ output_noun="Respond only with 'A', 'B', or 'C'. Do not add any other text, punctuation, or symbols",
145
+ max_train_instances=0,
146
+ max_tokens=1,
147
+ )
148
+
149
+ return RunSpec(
150
+ name=f"clear:condition={condition}",
151
+ scenario_spec=scenario_spec,
152
+ adapter_spec=adapter_spec,
153
+ metric_specs=get_exact_match_metric_specs(),
154
+ groups=["clear"],
155
+ )
156
+
157
+
158
+ @run_spec_function("mtsamples_replicate")
159
+ def get_mtsamples_spec(jury_config_path: Optional[str] = None) -> RunSpec:
160
+ scenario_spec = ScenarioSpec(
161
+ class_name="helm.benchmark.scenarios.mtsamples_replicate_scenario.MTSamplesReplicateScenario"
162
+ )
163
+
164
+ adapter_spec = get_generation_adapter_spec(
165
+ instructions="Given various information about a patient, return a reasonable treatment plan for the patient.",
166
+ input_noun=None,
167
+ newline_after_input_noun=False,
168
+ output_noun="Answer",
169
+ max_tokens=512,
170
+ max_train_instances=0,
171
+ stop_sequences=[],
172
+ )
173
+
174
+ annotator_models = get_annotator_models_from_config(jury_config_path)
175
+
176
+ annotator_specs = [
177
+ AnnotatorSpec(
178
+ class_name="helm.benchmark.annotation.mtsamples_replicate_annotator.MTSamplesReplicateAnnotator",
179
+ args={
180
+ "annotator_models": annotator_models,
181
+ },
182
+ )
183
+ ]
184
+
185
+ metric_args = {
186
+ "task": "mtsamples_replicate",
187
+ "device": get_torch_device_name(),
188
+ "bertscore_model": "distilbert-base-uncased",
189
+ "rescale_with_baseline": False,
190
+ }
191
+
192
+ metric_specs = get_summarization_metric_specs(metric_args) + [
193
+ MetricSpec(
194
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
195
+ args={
196
+ "metric_name": "mtsamples_replicate_accuracy",
197
+ "scenario_name": "mtsamples_replicate",
198
+ "annotator_models": annotator_models,
199
+ "default_score": 1.0,
200
+ },
201
+ )
202
+ ]
203
+
204
+ return RunSpec(
205
+ name="mtsamples_replicate",
206
+ scenario_spec=scenario_spec,
207
+ adapter_spec=adapter_spec,
208
+ annotators=annotator_specs,
209
+ metric_specs=metric_specs,
210
+ groups=["mtsamples_replicate"],
211
+ )
212
+
213
+
214
+ @run_spec_function("medec")
215
+ def get_medec_run_spec() -> RunSpec:
216
+ """
217
+ RunSpec for the MEDEC dataset.
218
+ This configuration evaluates the model's ability to summarize doctor-patient
219
+ dialogues into structured clinical notes.
220
+ """
221
+ # Define the scenario
222
+ scenario_spec = ScenarioSpec(
223
+ class_name="helm.benchmark.scenarios.medec_scenario.MedecScenario",
224
+ args={},
225
+ )
226
+
227
+ # Define the adapter
228
+ adapter_spec = get_generation_adapter_spec(
229
+ instructions=(
230
+ "The following is a medical narrative about a patient. "
231
+ "You are a skilled medical doctor reviewing the clinical text. "
232
+ "The text is either correct or contains one error. "
233
+ "The text has a sentence per line. Each line starts with the "
234
+ "sentence ID, followed by a space character then the sentence to check. "
235
+ "Check every sentence of the text. "
236
+ "If the text is correct return the following output: CORRECT. "
237
+ "If the text has a medical error, return the sentence ID of the "
238
+ "sentence containing the error, followed by a space, "
239
+ "and a corrected version of the sentence."
240
+ ),
241
+ input_noun="Clinical Note",
242
+ output_noun="Answer",
243
+ max_tokens=256,
244
+ max_train_instances=0,
245
+ stop_sequences=[],
246
+ )
247
+
248
+ # Define the metrics
249
+ metric_specs = [
250
+ MetricSpec(
251
+ class_name="helm.benchmark.metrics.medec_metrics.MedecMetric",
252
+ args={},
253
+ )
254
+ ] + get_basic_metric_specs([])
255
+
256
+ # Return the RunSpec
257
+ return RunSpec(
258
+ name="medec",
259
+ scenario_spec=scenario_spec,
260
+ adapter_spec=adapter_spec,
261
+ metric_specs=metric_specs,
262
+ groups=["clinical", "medec"],
263
+ )
264
+
265
+
266
+ @run_spec_function("ehrshot")
267
+ def get_ehrshot_spec(subject: str, data_path: str, max_length: int = 100000) -> RunSpec:
268
+ scenario_spec = ScenarioSpec(
269
+ class_name="helm.benchmark.scenarios.ehrshot_scenario.EHRSHOTScenario",
270
+ args={
271
+ "subject": subject,
272
+ "max_length": max_length,
273
+ "data_path": data_path,
274
+ },
275
+ )
276
+
277
+ adapter_spec = get_multiple_choice_adapter_spec(
278
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
279
+ instructions="Answer A for yes, B for no.",
280
+ input_noun="",
281
+ output_noun="Respond with only 'A' for yes or 'B' for no. Do not add any other text, punctuation, or symbols",
282
+ max_train_instances=0,
283
+ max_tokens=1,
284
+ )
285
+
286
+ return RunSpec(
287
+ name=f"ehrshot:subject={subject}",
288
+ scenario_spec=scenario_spec,
289
+ adapter_spec=adapter_spec,
290
+ metric_specs=get_exact_match_metric_specs(),
291
+ groups=["ehrshot"],
292
+ )
293
+
294
+
295
+ @run_spec_function("head_qa")
296
+ def get_head_qa_run_spec(language: str = "en", category: Union[str, None] = None) -> RunSpec:
297
+ """
298
+ RunSpec for the HEAD-QA dataset.
299
+ This configuration evaluates the model's ability to answer challenging multiple-choice biomedical questions.
300
+ """
301
+ # Define the scenario
302
+ scenario_spec = ScenarioSpec(
303
+ class_name="helm.benchmark.scenarios.headqa_scenario.HeadQAScenario",
304
+ args={"language": language, "category": category},
305
+ )
306
+
307
+ # Define the adapter
308
+ adapter_spec = get_multiple_choice_adapter_spec(
309
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
310
+ instructions=(
311
+ "You are a highly knowledgeable AI assistant specializing in biomedical sciences. Your task is to answer "
312
+ "multiple-choice questions accurately based on the options provided. "
313
+ "Each question will relate to biomedical concepts, "
314
+ "and you will be asked to choose the most appropriate answer.\n\n"
315
+ "Select the correct answer by outputting only the letter corresponding to your choice (A, B, C, or D)."
316
+ ),
317
+ input_noun="Question",
318
+ output_noun="Answer",
319
+ max_tokens=1,
320
+ max_train_instances=0,
321
+ )
322
+
323
+ # Define the metrics
324
+ metric_specs = get_exact_match_metric_specs()
325
+
326
+ # Return the RunSpec
327
+ return RunSpec(
328
+ name=f"head_qa:language={language},category={category}",
329
+ scenario_spec=scenario_spec,
330
+ adapter_spec=adapter_spec,
331
+ metric_specs=metric_specs,
332
+ groups=["biomedical", "head_qa"],
333
+ )
334
+
335
+
336
+ @run_spec_function("medbullets")
337
+ def get_medbullets_run_spec() -> RunSpec:
338
+ """
339
+ RunSpec for the MedBullets dataset.
340
+ This configuration evaluates the model's ability to answer challenging multiple-choice clinical questions.
341
+ """
342
+ # Define the scenario
343
+ scenario_spec = ScenarioSpec(
344
+ class_name="helm.benchmark.scenarios.medbullets_scenario.MedBulletsScenario",
345
+ args={},
346
+ )
347
+
348
+ # Define the adapter
349
+ adapter_spec = get_multiple_choice_adapter_spec(
350
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
351
+ instructions=(
352
+ "You are a highly knowledgeable AI assistant specializing in medicine. "
353
+ "Your task is to answer medical questions similar to those found on the USMLE Step 2/3 exams. "
354
+ "You will be provided with a clinical scenario followed by several multiple-choice options.\n\n"
355
+ "Select the correct answer by outputting only the letter corresponding to your choice (A, B, C, D, or E)."
356
+ ),
357
+ input_noun="Clinical Scenario",
358
+ output_noun="Answer",
359
+ max_tokens=1,
360
+ max_train_instances=0,
361
+ )
362
+
363
+ # Define the metrics
364
+ metric_specs = get_exact_match_metric_specs()
365
+
366
+ # Return the RunSpec
367
+ return RunSpec(
368
+ name="medbullets",
369
+ scenario_spec=scenario_spec,
370
+ adapter_spec=adapter_spec,
371
+ metric_specs=metric_specs,
372
+ groups=["clinical", "medbullets"],
373
+ )
374
+
375
+
376
+ @run_spec_function("medhelm_med_qa")
377
+ def get_medhelm_med_qa_spec() -> RunSpec:
378
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.med_qa_scenario.MedQAScenario", args={})
379
+
380
+ adapter_spec = get_multiple_choice_adapter_spec(
381
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
382
+ instructions="Give a letter answer among A, B, C or D. Do not include any explanation or additional text.",
383
+ input_noun="Question",
384
+ output_noun="Respond only with 'A', 'B', 'C' or 'D'. Do not add any other text, punctuation, or symbols.",
385
+ max_tokens=1,
386
+ max_train_instances=0,
387
+ )
388
+
389
+ return RunSpec(
390
+ name="med_qa",
391
+ scenario_spec=scenario_spec,
392
+ adapter_spec=adapter_spec,
393
+ metric_specs=get_exact_match_metric_specs(),
394
+ groups=["med_qa"],
395
+ )
396
+
397
+
398
+ @run_spec_function("medhelm_med_mcqa")
399
+ def get_medhelm_med_mcqa_spec() -> RunSpec:
400
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.med_mcqa_scenario.MedMCQAScenario", args={})
401
+
402
+ adapter_spec = get_multiple_choice_adapter_spec(
403
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
404
+ instructions="Give a letter answer among A, B, C or D. Do not include any explanation or additional text.",
405
+ input_noun="Question",
406
+ output_noun="Respond only with 'A', 'B', 'C' or 'D'. Do not add any other text, punctuation, or symbols.",
407
+ max_tokens=1,
408
+ max_train_instances=0,
409
+ )
410
+
411
+ return RunSpec(
412
+ name="med_mcqa",
413
+ scenario_spec=scenario_spec,
414
+ adapter_spec=adapter_spec,
415
+ metric_specs=get_exact_match_metric_specs(),
416
+ groups=["med_mcqa"],
417
+ )
418
+
419
+
420
+ @run_spec_function("medbullets_freetext")
421
+ def get_medbullets_freetext_run_spec() -> RunSpec:
422
+ """RunSpec for the MedBullets Free-text dataset."""
423
+ # Define the scenario
424
+ scenario_spec = ScenarioSpec(
425
+ class_name="helm.benchmark.scenarios.medbullets_scenario.MedBulletsFreeTextScenario",
426
+ args={},
427
+ )
428
+
429
+ # Define the adapter
430
+ adapter_spec = get_generation_adapter_spec(
431
+ instructions=(
432
+ "You are a helpful and highly knowledgeable AI assistant specializing in medicine. "
433
+ "Your task is to answer medical questions similar to those found on the USMLE Step 2/3 exams. "
434
+ "You will be provided with a clinical scenario, "
435
+ "and for each question, you must:\n"
436
+ "- Provide an answer to the question.\n"
437
+ "- Give a concise explanation for why that answer is correct, based on the clinical scenario provided."
438
+ ),
439
+ input_noun="Clinical Scenario",
440
+ output_noun="Answer",
441
+ )
442
+
443
+ # Define the metrics
444
+ metric_specs = get_open_ended_generation_metric_specs()
445
+
446
+ # Return the RunSpec
447
+ return RunSpec(
448
+ name="medbullets-freetext",
449
+ scenario_spec=scenario_spec,
450
+ adapter_spec=adapter_spec,
451
+ metric_specs=metric_specs,
452
+ groups=["clinical", "medbullets-freetext"],
453
+ )
454
+
455
+
456
+ @run_spec_function("medalign")
457
+ def get_medalign_spec(data_path: str, jury_config_path: Optional[str] = None, max_length: int = 100000) -> RunSpec:
458
+ scenario_spec = ScenarioSpec(
459
+ class_name="helm.benchmark.scenarios.medalign_scenario.MedalignScenario",
460
+ args={
461
+ "max_length": max_length,
462
+ "data_path": data_path,
463
+ },
464
+ )
465
+
466
+ adapter_spec = get_generation_adapter_spec(
467
+ instructions="",
468
+ input_noun=None,
469
+ newline_after_input_noun=False,
470
+ output_noun=None,
471
+ max_tokens=256,
472
+ stop_sequences=[],
473
+ max_train_instances=0,
474
+ )
475
+
476
+ annotator_models = get_annotator_models_from_config(jury_config_path)
477
+
478
+ annotator_specs = [
479
+ AnnotatorSpec(
480
+ class_name="helm.benchmark.annotation.medalign_annotator.MedalignAnnotator",
481
+ args={
482
+ "annotator_models": annotator_models,
483
+ },
484
+ )
485
+ ]
486
+
487
+ metric_args = {
488
+ "task": "medalign",
489
+ "device": get_torch_device_name(),
490
+ "bertscore_model": "distilbert-base-uncased",
491
+ "rescale_with_baseline": False,
492
+ }
493
+ metric_specs = get_summarization_metric_specs(metric_args) + [
494
+ MetricSpec(
495
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
496
+ args={
497
+ "metric_name": "medalign_accuracy",
498
+ "scenario_name": "medalign",
499
+ "annotator_models": annotator_models,
500
+ "default_score": 1.0,
501
+ },
502
+ )
503
+ ]
504
+
505
+ return RunSpec(
506
+ name="medalign",
507
+ scenario_spec=scenario_spec,
508
+ adapter_spec=adapter_spec,
509
+ annotators=annotator_specs,
510
+ metric_specs=metric_specs,
511
+ groups=["medalign"],
512
+ )
513
+
514
+
515
+ @run_spec_function("shc_ptbm_med")
516
+ def get_shc_ptbm_spec(data_path: str) -> RunSpec:
517
+ scenario_spec = ScenarioSpec(
518
+ class_name="helm.benchmark.scenarios.shc_ptbm_scenario.SHCPTBMMedScenario",
519
+ args={"data_path": data_path},
520
+ )
521
+
522
+ adapter_spec = get_multiple_choice_adapter_spec(
523
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
524
+ instructions="Answer A or B.",
525
+ input_noun="",
526
+ output_noun="",
527
+ )
528
+
529
+ return RunSpec(
530
+ name="shc_ptbm_med",
531
+ scenario_spec=scenario_spec,
532
+ adapter_spec=adapter_spec,
533
+ metric_specs=get_exact_match_metric_specs(),
534
+ groups=["shc_ptbm_med"],
535
+ )
536
+
537
+
538
+ @run_spec_function("shc_sei_med")
539
+ def get_shc_sei_spec(data_path: str) -> RunSpec:
540
+ scenario_spec = ScenarioSpec(
541
+ class_name="helm.benchmark.scenarios.shc_sei_scenario.SHCSEIMedScenario",
542
+ args={"data_path": data_path},
543
+ )
544
+
545
+ adapter_spec = get_multiple_choice_adapter_spec(
546
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
547
+ instructions="Answer A or B.",
548
+ input_noun="",
549
+ output_noun="",
550
+ )
551
+
552
+ return RunSpec(
553
+ name="shc_sei_med",
554
+ scenario_spec=scenario_spec,
555
+ adapter_spec=adapter_spec,
556
+ metric_specs=get_exact_match_metric_specs(),
557
+ groups=["shc_sei_med"],
558
+ )
559
+
560
+
561
+ @run_spec_function("dischargeme")
562
+ def get_dischargeme_spec(data_path: str, jury_config_path: Optional[str] = None) -> RunSpec:
563
+ scenario_spec = ScenarioSpec(
564
+ class_name="helm.benchmark.scenarios.dischargeme_scenario.DischargeMeScenario",
565
+ args={
566
+ "data_path": data_path,
567
+ },
568
+ )
569
+
570
+ adapter_spec = get_generation_adapter_spec(
571
+ instructions=(
572
+ "Given a discharge text, a radiology report text, and a target "
573
+ "document of either discharge instructions or a brief hospital course, "
574
+ "return the generated target document from the context provided."
575
+ ),
576
+ input_noun=None,
577
+ newline_after_input_noun=False,
578
+ output_noun="Answer",
579
+ max_tokens=300,
580
+ stop_sequences=[],
581
+ max_train_instances=0,
582
+ )
583
+
584
+ annotator_models = get_annotator_models_from_config(jury_config_path)
585
+
586
+ annotator_specs = [
587
+ AnnotatorSpec(
588
+ class_name="helm.benchmark.annotation.dischargeme_annotator.DischargeMeAnnotator",
589
+ args={
590
+ "annotator_models": annotator_models,
591
+ },
592
+ )
593
+ ]
594
+
595
+ metric_args = {
596
+ "task": "dischargeme",
597
+ "device": get_torch_device_name(),
598
+ "bertscore_model": "distilbert-base-uncased",
599
+ "rescale_with_baseline": False,
600
+ }
601
+ metric_specs = get_summarization_metric_specs(metric_args) + [
602
+ MetricSpec(
603
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
604
+ args={
605
+ "metric_name": "dischargeme_accuracy",
606
+ "scenario_name": "dischargeme",
607
+ "annotator_models": annotator_models,
608
+ "default_score": 1.0,
609
+ },
610
+ )
611
+ ]
612
+ return RunSpec(
613
+ name="dischargeme",
614
+ scenario_spec=scenario_spec,
615
+ adapter_spec=adapter_spec,
616
+ annotators=annotator_specs,
617
+ metric_specs=metric_specs,
618
+ groups=["dischargeme"],
619
+ )
620
+
621
+
622
+ @run_spec_function("aci_bench")
623
+ def get_aci_bench_run_spec(jury_config_path: Optional[str] = None) -> RunSpec:
624
+ """
625
+ RunSpec for the ACI-Bench dataset.
626
+ This configuration evaluates the model's ability to summarize
627
+ doctor-patient dialogues into structured clinical notes.
628
+ """
629
+ scenario_spec = ScenarioSpec(
630
+ class_name="helm.benchmark.scenarios.aci_bench_scenario.ACIBenchScenario",
631
+ args={},
632
+ )
633
+
634
+ # Define the adapter
635
+ adapter_spec = get_generation_adapter_spec(
636
+ instructions=(
637
+ "Summarize the conversation to generate a clinical note with four sections:\n"
638
+ "1. HISTORY OF PRESENT ILLNESS\n"
639
+ "2. PHYSICAL EXAM\n"
640
+ "3. RESULTS\n"
641
+ "4. ASSESSMENT AND PLAN\n\n"
642
+ "The conversation is:"
643
+ ),
644
+ input_noun="Conversation",
645
+ output_noun="Clinical Note",
646
+ max_tokens=768, # avg tokens in response is 618.9
647
+ max_train_instances=0,
648
+ stop_sequences=[],
649
+ )
650
+
651
+ annotator_models = get_annotator_models_from_config(jury_config_path)
652
+
653
+ annotator_specs = [
654
+ AnnotatorSpec(
655
+ class_name="helm.benchmark.annotation.aci_bench_annotator.ACIBenchAnnotator",
656
+ args={
657
+ "annotator_models": annotator_models,
658
+ },
659
+ )
660
+ ]
661
+
662
+ # Define the metrics
663
+ metric_args = {
664
+ "task": "aci_bench",
665
+ "device": get_torch_device_name(),
666
+ "bertscore_model": "distilbert-base-uncased",
667
+ "rescale_with_baseline": False,
668
+ }
669
+ metric_specs = get_summarization_metric_specs(metric_args) + [
670
+ MetricSpec(
671
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
672
+ args={
673
+ "metric_name": "aci_bench_accuracy",
674
+ "scenario_name": "aci_bench",
675
+ "annotator_models": annotator_models,
676
+ "default_score": 1.0,
677
+ },
678
+ )
679
+ ]
680
+
681
+ # Return the RunSpec
682
+ return RunSpec(
683
+ name="aci_bench",
684
+ scenario_spec=scenario_spec,
685
+ adapter_spec=adapter_spec,
686
+ annotators=annotator_specs,
687
+ metric_specs=metric_specs,
688
+ groups=["clinical", "aci_bench"],
689
+ )
690
+
691
+
692
+ @run_spec_function("mtsamples_procedures")
693
+ def get_mtsamples_procedures_spec(jury_config_path: Optional[str] = None) -> RunSpec:
694
+ scenario_spec = ScenarioSpec(
695
+ class_name="helm.benchmark.scenarios.mtsamples_procedures_scenario.MTSamplesProceduresScenario"
696
+ )
697
+
698
+ adapter_spec = get_generation_adapter_spec(
699
+ instructions="Here are information about a patient, return a reasonable treatment plan for the patient.",
700
+ input_noun="Patient Notes",
701
+ newline_after_input_noun=False,
702
+ output_noun="Answer",
703
+ max_tokens=512,
704
+ max_train_instances=0,
705
+ stop_sequences=[],
706
+ )
707
+ annotator_models = get_annotator_models_from_config(jury_config_path)
708
+
709
+ annotator_specs = [
710
+ AnnotatorSpec(
711
+ class_name="helm.benchmark.annotation.mtsamples_procedures_annotator.MTSamplesProceduresAnnotator",
712
+ args={
713
+ "annotator_models": annotator_models,
714
+ },
715
+ )
716
+ ]
717
+
718
+ metric_args = {
719
+ "task": "mtsamples_procedures",
720
+ "device": get_torch_device_name(),
721
+ "bertscore_model": "distilbert-base-uncased",
722
+ "rescale_with_baseline": False,
723
+ }
724
+
725
+ metric_specs = get_summarization_metric_specs(metric_args) + [
726
+ MetricSpec(
727
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
728
+ args={
729
+ "metric_name": "mtsamples_procedures_accuracy",
730
+ "scenario_name": "mtsamples_procedures",
731
+ "annotator_models": annotator_models,
732
+ "default_score": 1.0,
733
+ },
734
+ )
735
+ ]
736
+
737
+ return RunSpec(
738
+ name="mtsamples_procedures",
739
+ scenario_spec=scenario_spec,
740
+ adapter_spec=adapter_spec,
741
+ annotators=annotator_specs,
742
+ metric_specs=metric_specs,
743
+ groups=["mtsamples_procedures"],
744
+ )
745
+
746
+
747
+ @run_spec_function("mimic_rrs")
748
+ def get_mimic_rrs_spec(data_path: str, jury_config_path: Optional[str] = None) -> RunSpec:
749
+ scenario_spec = ScenarioSpec(
750
+ class_name="helm.benchmark.scenarios.mimic_rrs_scenario.MIMICRRSScenario",
751
+ args={"data_path": data_path},
752
+ )
753
+
754
+ adapter_spec = get_generation_adapter_spec(
755
+ instructions=(
756
+ "Generate the impression section of the radiology report based on its findings. "
757
+ "This will not be used to diagnose nor treat any patients. Be as concise as possible."
758
+ ),
759
+ input_noun="Findings",
760
+ output_noun="Impression",
761
+ newline_after_input_noun=True,
762
+ newline_after_output_noun=True,
763
+ max_tokens=128,
764
+ max_train_instances=0,
765
+ stop_sequences=[],
766
+ )
767
+
768
+ annotator_models = get_annotator_models_from_config(jury_config_path)
769
+
770
+ annotator_specs = [
771
+ AnnotatorSpec(
772
+ class_name="helm.benchmark.annotation.mimic_rrs_annotator.MIMICRRSAnnotator",
773
+ args={
774
+ "annotator_models": annotator_models,
775
+ },
776
+ )
777
+ ]
778
+
779
+ metric_args = {
780
+ "task": "mimic_rrs",
781
+ "device": get_torch_device_name(),
782
+ "bertscore_model": "distilbert-base-uncased",
783
+ "rescale_with_baseline": False,
784
+ }
785
+ metric_specs = get_summarization_metric_specs(metric_args) + [
786
+ MetricSpec(
787
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
788
+ args={
789
+ "metric_name": "mimic_rrs_accuracy",
790
+ "scenario_name": "mimic_rrs",
791
+ "annotator_models": annotator_models,
792
+ "default_score": 1.0,
793
+ },
794
+ )
795
+ ]
796
+ return RunSpec(
797
+ name="mimic_rrs",
798
+ scenario_spec=scenario_spec,
799
+ adapter_spec=adapter_spec,
800
+ annotators=annotator_specs,
801
+ metric_specs=metric_specs,
802
+ groups=["mimic_rrs"],
803
+ )
804
+
805
+
806
+ @run_spec_function("mimic_bhc")
807
+ def get_mimic_bhc_spec(data_path: str, jury_config_path: Optional[str] = None) -> RunSpec:
808
+ scenario_spec = ScenarioSpec(
809
+ class_name="helm.benchmark.scenarios.mimic_bhc_scenario.MIMICBHCScenario",
810
+ args={"data_path": data_path},
811
+ )
812
+
813
+ adapter_spec = get_generation_adapter_spec(
814
+ instructions=("Summarize the clinical note into a brief hospital course."),
815
+ input_noun="Clinical Note",
816
+ output_noun="Brief Hospital Course",
817
+ newline_after_input_noun=True,
818
+ newline_after_output_noun=True,
819
+ max_tokens=1024,
820
+ max_train_instances=0,
821
+ stop_sequences=[],
822
+ )
823
+
824
+ annotator_models = get_annotator_models_from_config(jury_config_path)
825
+
826
+ annotator_specs = [
827
+ AnnotatorSpec(
828
+ class_name="helm.benchmark.annotation.mimic_bhc_annotator.MIMICBHCAnnotator",
829
+ args={
830
+ "annotator_models": annotator_models,
831
+ },
832
+ )
833
+ ]
834
+
835
+ metric_args = {
836
+ "task": "mimic_bhc",
837
+ "device": get_torch_device_name(),
838
+ "bertscore_model": "distilbert-base-uncased",
839
+ "rescale_with_baseline": False,
840
+ }
841
+ metric_specs = get_summarization_metric_specs(metric_args) + [
842
+ MetricSpec(
843
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
844
+ args={
845
+ "metric_name": "mimic_bhc_accuracy",
846
+ "scenario_name": "mimic_bhc",
847
+ "annotator_models": annotator_models,
848
+ "default_score": 1.0,
849
+ },
850
+ )
851
+ ]
852
+ return RunSpec(
853
+ name="mimic_bhc",
854
+ annotators=annotator_specs,
855
+ scenario_spec=scenario_spec,
856
+ adapter_spec=adapter_spec,
857
+ metric_specs=metric_specs,
858
+ groups=["mimic_bhc"],
859
+ )
860
+
861
+
862
+ @run_spec_function("chw_care_plan")
863
+ def get_chw_care_plan_run_spec(data_path: str, jury_config_path: Optional[str] = None) -> RunSpec:
864
+ """
865
+ RunSpec for the chw_care_plan dataset.
866
+ This configuration evaluates the model's ability to summarize
867
+ doctor-patient dialogues into structured clinical notes.
868
+ """
869
+ scenario_spec = ScenarioSpec(
870
+ class_name="helm.benchmark.scenarios.chw_care_plan_scenario.CHWCarePlanScenario",
871
+ args={"data_path": data_path},
872
+ )
873
+
874
+ adapter_spec = get_generation_adapter_spec(
875
+ instructions=(
876
+ "Follow the instructions provided regarding conversion of a patient note into a specified format."
877
+ ),
878
+ input_noun="",
879
+ output_noun="",
880
+ max_tokens=768,
881
+ max_train_instances=0,
882
+ stop_sequences=[],
883
+ )
884
+
885
+ annotator_models = get_annotator_models_from_config(jury_config_path)
886
+
887
+ annotator_specs = [
888
+ AnnotatorSpec(
889
+ class_name="helm.benchmark.annotation.chw_care_plan_annotator.CHWCarePlanAnnotator",
890
+ args={
891
+ "annotator_models": annotator_models,
892
+ },
893
+ )
894
+ ]
895
+
896
+ metric_args = {
897
+ "task": "chw_care_plan",
898
+ "device": get_torch_device_name(),
899
+ "bertscore_model": "distilbert-base-uncased",
900
+ "rescale_with_baseline": False,
901
+ }
902
+ metric_specs = get_summarization_metric_specs(metric_args) + [
903
+ MetricSpec(
904
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
905
+ args={
906
+ "metric_name": "chw_care_plan_accuracy",
907
+ "scenario_name": "chw_care_plan",
908
+ "annotator_models": annotator_models,
909
+ "default_score": 1.0,
910
+ },
911
+ )
912
+ ]
913
+ # Return the RunSpec
914
+ return RunSpec(
915
+ name="chw_care_plan",
916
+ scenario_spec=scenario_spec,
917
+ adapter_spec=adapter_spec,
918
+ annotators=annotator_specs,
919
+ metric_specs=metric_specs,
920
+ groups=["clinical", "chw_care_plan"],
921
+ )
922
+
923
+
924
+ @run_spec_function("medication_qa")
925
+ def get_medication_qa_spec(jury_config_path: Optional[str] = None) -> RunSpec:
926
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.medication_qa_scenario.MedicationQAScenario")
927
+
928
+ adapter_spec = get_generation_adapter_spec(
929
+ instructions="Please answer the following consumer health question.",
930
+ input_noun="Question",
931
+ output_noun="Answer",
932
+ max_train_instances=0,
933
+ max_tokens=512,
934
+ stop_sequences=[],
935
+ )
936
+ annotator_models = get_annotator_models_from_config(jury_config_path)
937
+
938
+ annotator_specs = [
939
+ AnnotatorSpec(
940
+ class_name="helm.benchmark.annotation.medication_qa_annotator.MedicationQAAnnotator",
941
+ args={
942
+ "annotator_models": annotator_models,
943
+ },
944
+ )
945
+ ]
946
+ metric_args = {
947
+ "task": "medication_qa",
948
+ "device": get_torch_device_name(),
949
+ "bertscore_model": "distilbert-base-uncased",
950
+ "rescale_with_baseline": False,
951
+ }
952
+ metric_specs = get_summarization_metric_specs(metric_args) + [
953
+ MetricSpec(
954
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
955
+ args={
956
+ "metric_name": "medication_qa_accuracy",
957
+ "scenario_name": "medication_qa",
958
+ "annotator_models": annotator_models,
959
+ "default_score": 1.0,
960
+ },
961
+ )
962
+ ]
963
+ return RunSpec(
964
+ name="medication_qa",
965
+ scenario_spec=scenario_spec,
966
+ adapter_spec=adapter_spec,
967
+ annotators=annotator_specs,
968
+ metric_specs=metric_specs,
969
+ groups=["medication_qa"],
970
+ )
971
+
972
+
973
+ @run_spec_function("starr_patient_instructions")
974
+ def get_starr_patient_instructions_run_spec(data_path: str, jury_config_path: Optional[str] = None) -> RunSpec:
975
+ scenario_spec = ScenarioSpec(
976
+ class_name="helm.benchmark.scenarios.starr_patient_instructions_scenario.StarrPatientInstructionsScenario",
977
+ args={"data_path": data_path},
978
+ )
979
+
980
+ adapter_spec = get_generation_adapter_spec(
981
+ instructions=(
982
+ "You are a medical professional tasked with generating personalized post-procedure "
983
+ "patient instructions. Given the following case details which include the patient's "
984
+ "diagnosis, the planned procedure, the history & physical note, and the operative report, "
985
+ "generate clear and actionable instructions for the patient to follow after their procedure. "
986
+ "Don't worry, this information will not be used for any clinical decision making. "
987
+ "This will not be used to diagnose nor treat any patients."
988
+ ),
989
+ input_noun="Case Details",
990
+ output_noun="Patient Instructions",
991
+ max_tokens=256,
992
+ max_train_instances=0,
993
+ stop_sequences=[],
994
+ )
995
+ annotator_models = get_annotator_models_from_config(jury_config_path)
996
+
997
+ annotator_specs = [
998
+ AnnotatorSpec(
999
+ class_name=(
1000
+ "helm.benchmark.annotation.starr_patient_instructions_annotator.StarrPatientInstructionsAnnotator"
1001
+ ),
1002
+ args={
1003
+ "annotator_models": annotator_models,
1004
+ },
1005
+ )
1006
+ ]
1007
+
1008
+ metric_args = {
1009
+ "task": "starr_patient_instructions",
1010
+ "device": get_torch_device_name(),
1011
+ "bertscore_model": "distilbert-base-uncased",
1012
+ "rescale_with_baseline": False,
1013
+ }
1014
+ metric_specs = get_summarization_metric_specs(metric_args) + [
1015
+ MetricSpec(
1016
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
1017
+ args={
1018
+ "metric_name": "starr_patient_instructions_accuracy",
1019
+ "scenario_name": "starr_patient_instructions",
1020
+ "annotator_models": annotator_models,
1021
+ "default_score": 1.0,
1022
+ },
1023
+ )
1024
+ ]
1025
+ return RunSpec(
1026
+ name="starr_patient_instructions",
1027
+ scenario_spec=scenario_spec,
1028
+ adapter_spec=adapter_spec,
1029
+ annotators=annotator_specs,
1030
+ metric_specs=metric_specs,
1031
+ groups=["starr_patient_instructions"],
1032
+ )
1033
+
1034
+
1035
+ @run_spec_function("med_dialog")
1036
+ def get_med_dialog_spec(subset: str, jury_config_path: Optional[str] = None) -> RunSpec:
1037
+ scenario_spec = ScenarioSpec(
1038
+ class_name="helm.benchmark.scenarios.med_dialog_scenario.MedDialogScenario", args={"subset": subset}
1039
+ )
1040
+
1041
+ adapter_spec = get_generation_adapter_spec(
1042
+ instructions="Generate a one sentence summary of this patient-doctor conversation.",
1043
+ input_noun="Patient-Doctor",
1044
+ output_noun="Summary",
1045
+ max_tokens=80,
1046
+ max_train_instances=0,
1047
+ stop_sequences=[],
1048
+ )
1049
+
1050
+ annotator_models = get_annotator_models_from_config(jury_config_path)
1051
+
1052
+ annotator_specs = [
1053
+ AnnotatorSpec(
1054
+ class_name="helm.benchmark.annotation.med_dialog_annotator.MedDialogAnnotator",
1055
+ args={
1056
+ "annotator_models": annotator_models,
1057
+ },
1058
+ )
1059
+ ]
1060
+
1061
+ metric_args = {
1062
+ "task": "med_dialog",
1063
+ "device": get_torch_device_name(),
1064
+ "bertscore_model": "distilbert-base-uncased",
1065
+ "rescale_with_baseline": False,
1066
+ }
1067
+ metric_specs = get_summarization_metric_specs(metric_args) + [
1068
+ MetricSpec(
1069
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
1070
+ args={
1071
+ "metric_name": "med_dialog_accuracy",
1072
+ "scenario_name": "med_dialog",
1073
+ "annotator_models": annotator_models,
1074
+ "default_score": 1.0,
1075
+ },
1076
+ )
1077
+ ]
1078
+ return RunSpec(
1079
+ name=f"med_dialog,subset={subset}",
1080
+ scenario_spec=scenario_spec,
1081
+ adapter_spec=adapter_spec,
1082
+ annotators=annotator_specs,
1083
+ metric_specs=metric_specs,
1084
+ groups=["med_dialog"],
1085
+ )
1086
+
1087
+
1088
+ @run_spec_function("shc_conf_med")
1089
+ def get_shc_conf_spec(data_path: str) -> RunSpec:
1090
+ scenario_spec = ScenarioSpec(
1091
+ class_name="helm.benchmark.scenarios.shc_conf_scenario.SHCCONFMedScenario",
1092
+ args={"data_path": data_path},
1093
+ )
1094
+
1095
+ adapter_spec = get_multiple_choice_adapter_spec(
1096
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1097
+ instructions="Answer A or B.",
1098
+ input_noun="",
1099
+ output_noun="",
1100
+ )
1101
+
1102
+ return RunSpec(
1103
+ name="shc_conf_med",
1104
+ scenario_spec=scenario_spec,
1105
+ adapter_spec=adapter_spec,
1106
+ metric_specs=get_exact_match_metric_specs(),
1107
+ groups=["shc_conf_med"],
1108
+ )
1109
+
1110
+
1111
+ @run_spec_function("medi_qa")
1112
+ def get_medi_qa_spec(jury_config_path: Optional[str] = None) -> RunSpec:
1113
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.medi_qa_scenario.MediQAScenario", args={})
1114
+
1115
+ adapter_spec = get_generation_adapter_spec(
1116
+ instructions="Answer the following consumer health question.",
1117
+ input_noun="Question",
1118
+ output_noun="Answer",
1119
+ max_tokens=1024,
1120
+ max_train_instances=0,
1121
+ stop_sequences=[],
1122
+ )
1123
+
1124
+ annotator_models = get_annotator_models_from_config(jury_config_path)
1125
+
1126
+ annotator_specs = [
1127
+ AnnotatorSpec(
1128
+ class_name="helm.benchmark.annotation.medi_qa_annotator.MediQAAnnotator",
1129
+ args={
1130
+ "annotator_models": annotator_models,
1131
+ },
1132
+ )
1133
+ ]
1134
+
1135
+ metric_args = {
1136
+ "task": "medi_qa",
1137
+ "device": get_torch_device_name(),
1138
+ "bertscore_model": "distilbert-base-uncased",
1139
+ "rescale_with_baseline": False,
1140
+ }
1141
+ metric_specs = get_summarization_metric_specs(metric_args) + [
1142
+ MetricSpec(
1143
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
1144
+ args={
1145
+ "metric_name": "medi_qa_accuracy",
1146
+ "scenario_name": "medi_qa",
1147
+ "annotator_models": annotator_models,
1148
+ "default_score": 1.0,
1149
+ },
1150
+ )
1151
+ ]
1152
+ return RunSpec(
1153
+ name="medi_qa",
1154
+ scenario_spec=scenario_spec,
1155
+ adapter_spec=adapter_spec,
1156
+ annotators=annotator_specs,
1157
+ metric_specs=metric_specs,
1158
+ groups=["medi_qa"],
1159
+ )
1160
+
1161
+
1162
+ @run_spec_function("mental_health")
1163
+ def get_mental_health_spec(data_path: str, jury_config_path: Optional[str] = None) -> RunSpec:
1164
+ """
1165
+ Returns the run specification for the mental health counseling scenario.
1166
+ This scenario evaluates a model's ability to generate appropriate counseling responses
1167
+ in mental health conversations.
1168
+ """
1169
+ scenario_spec = ScenarioSpec(
1170
+ class_name="helm.benchmark.scenarios.mental_health_scenario.MentalHealthScenario",
1171
+ args={"data_path": data_path},
1172
+ )
1173
+
1174
+ adapter_spec = get_generation_adapter_spec(
1175
+ instructions=(
1176
+ "Given a mental health conversation history, generate an empathetic and appropriate counselor response."
1177
+ ),
1178
+ input_noun=None, # No specific input noun needed as format is defined in scenario
1179
+ newline_after_input_noun=False,
1180
+ output_noun="Counselor response",
1181
+ max_tokens=512,
1182
+ stop_sequences=[],
1183
+ )
1184
+ annotator_models = get_annotator_models_from_config(jury_config_path)
1185
+
1186
+ annotator_specs = [
1187
+ AnnotatorSpec(
1188
+ class_name="helm.benchmark.annotation.mental_health_annotator.MentalHealthAnnotator",
1189
+ args={
1190
+ "annotator_models": annotator_models,
1191
+ },
1192
+ )
1193
+ ]
1194
+
1195
+ metric_args = {
1196
+ "task": "mental_health",
1197
+ "device": get_torch_device_name(),
1198
+ "bertscore_model": "distilbert-base-uncased",
1199
+ "rescale_with_baseline": False,
1200
+ }
1201
+ metric_specs = get_summarization_metric_specs(metric_args) + [
1202
+ MetricSpec(
1203
+ class_name="helm.benchmark.metrics.llm_jury_metrics.LLMJuryMetric",
1204
+ args={
1205
+ "metric_name": "mental_health_accuracy",
1206
+ "scenario_name": "mental_health",
1207
+ "annotator_models": annotator_models,
1208
+ "default_score": 1.0,
1209
+ },
1210
+ )
1211
+ ]
1212
+
1213
+ return RunSpec(
1214
+ name="mental_health",
1215
+ scenario_spec=scenario_spec,
1216
+ adapter_spec=adapter_spec,
1217
+ annotators=annotator_specs,
1218
+ metric_specs=metric_specs,
1219
+ groups=["mental_health"],
1220
+ )
1221
+
1222
+
1223
+ @run_spec_function("pubmed_qa")
1224
+ def get_pubmed_qa_spec() -> RunSpec:
1225
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.pubmed_qa_scenario.PubMedQAScenario", args={})
1226
+
1227
+ adapter_spec = get_multiple_choice_adapter_spec(
1228
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1229
+ instructions=(
1230
+ "Answer A for yes, B for no or C for maybe. "
1231
+ "Do not include any explanation or additional text. "
1232
+ "Output only the letter on a single line."
1233
+ ),
1234
+ input_noun="Question",
1235
+ output_noun="Answer",
1236
+ max_train_instances=0,
1237
+ )
1238
+
1239
+ return RunSpec(
1240
+ name="pubmed_qa",
1241
+ scenario_spec=scenario_spec,
1242
+ adapter_spec=adapter_spec,
1243
+ metric_specs=get_exact_match_metric_specs(),
1244
+ groups=["pubmed_qa"],
1245
+ )
1246
+
1247
+
1248
+ @run_spec_function("ehr_sql")
1249
+ def get_ehr_sql_run_spec() -> RunSpec:
1250
+ """
1251
+ RunSpec for the EHR SQL dataset.
1252
+ This configuration evaluates the model's ability to generate accurate SQL queries from natural language questions.
1253
+ """
1254
+
1255
+ # Define the scenario
1256
+ scenario_spec = ScenarioSpec(
1257
+ class_name="helm.benchmark.scenarios.ehr_sql_scenario.EhrSqlScenario",
1258
+ args={},
1259
+ )
1260
+
1261
+ # Define the adapter
1262
+ adapter_spec = get_generation_adapter_spec(
1263
+ instructions=(
1264
+ "You are a highly skilled AI specializing in medical SQL queries. "
1265
+ "Given a database schema and a medical question, generate a valid SQL query "
1266
+ "that retrieves the required information from the database. "
1267
+ "Output only the SQL query without explanations.\n\n"
1268
+ "Input: A database schema followed by a natural language question.\n"
1269
+ "Output: A valid SQL query ending with ;. Only return SQL query, don't add additional text.\n\n"
1270
+ "If the question is unanswerable, return an empty string without additional text or comments."
1271
+ ),
1272
+ input_noun="Medical Question + Schema",
1273
+ output_noun="SQL Query",
1274
+ max_tokens=1024,
1275
+ temperature=0,
1276
+ max_train_instances=0,
1277
+ stop_sequences=[],
1278
+ )
1279
+
1280
+ annotator_specs = [AnnotatorSpec(class_name="helm.benchmark.annotation.ehr_sql_annotator.EhrSqlAnnotator")]
1281
+
1282
+ # Define the metrics
1283
+ metric_specs = [
1284
+ MetricSpec(class_name="helm.benchmark.metrics.ehr_sql_metrics.EhrSqlMetric", args={})
1285
+ ] + get_exact_match_metric_specs()
1286
+
1287
+ # Return the RunSpec
1288
+ return RunSpec(
1289
+ name="ehr_sql",
1290
+ scenario_spec=scenario_spec,
1291
+ adapter_spec=adapter_spec,
1292
+ annotators=annotator_specs,
1293
+ metric_specs=metric_specs,
1294
+ groups=["ehr_sql"],
1295
+ )
1296
+
1297
+
1298
+ @run_spec_function("shc_bmt_med")
1299
+ def get_shc_bmt_spec(data_path: str) -> RunSpec:
1300
+ scenario_spec = ScenarioSpec(
1301
+ class_name="helm.benchmark.scenarios.shc_bmt_scenario.SHCBMTMedScenario",
1302
+ args={"data_path": data_path},
1303
+ )
1304
+
1305
+ adapter_spec = get_multiple_choice_adapter_spec(
1306
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1307
+ instructions="Answer A or B.",
1308
+ input_noun="",
1309
+ output_noun="",
1310
+ )
1311
+
1312
+ return RunSpec(
1313
+ name="shc_bmt_med",
1314
+ scenario_spec=scenario_spec,
1315
+ adapter_spec=adapter_spec,
1316
+ metric_specs=get_exact_match_metric_specs(),
1317
+ groups=["shc_bmt_med"],
1318
+ )
1319
+
1320
+
1321
+ @run_spec_function("race_based_med")
1322
+ def get_race_based_med_spec() -> RunSpec:
1323
+ scenario_spec = ScenarioSpec(
1324
+ class_name="helm.benchmark.scenarios.race_based_med_scenario.RaceBasedMedScenario", args={}
1325
+ )
1326
+
1327
+ adapter_spec = get_multiple_choice_adapter_spec(
1328
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1329
+ instructions="Answer A for yes, B for no.",
1330
+ input_noun=None,
1331
+ output_noun="Respond with only 'A' for yes or 'B' for no. Do not add any other text, punctuation, or symbols",
1332
+ max_train_instances=0,
1333
+ )
1334
+
1335
+ return RunSpec(
1336
+ name="race_based_med",
1337
+ scenario_spec=scenario_spec,
1338
+ adapter_spec=adapter_spec,
1339
+ metric_specs=get_exact_match_metric_specs(),
1340
+ groups=["race_based_med"],
1341
+ )
1342
+
1343
+
1344
+ @run_spec_function("medhallu")
1345
+ def get_medhallu_spec() -> RunSpec:
1346
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.medhallu_scenario.MedHalluScenario", args={})
1347
+
1348
+ adapter_spec = get_generation_adapter_spec(
1349
+ instructions="""You are an AI assistant with extensive knowledge in the field of medicine. I want you act as an answer judge. Given a question and an answer, your objective is to determine if the provided answer contains non-factual or hallucinated information. You SHOULD give your judgement based on the following hallucination types and the world knowledge.
1350
+
1351
+ You are trying to determine if there is a factual contradiction between the answer and the world knowledge. Some information in the answer might be fabricated. Answer '0' if the answer is factual and '1' if the answer is hallucinated. You can categorize the answer as hallucinated if it falls in either of the category:
1352
+ Type: Misinterpretation of #Question#: These are hallucinated answers that misunderstands the question, leading to an off-topic or irrelevant response.
1353
+ Type: Incomplete Information: These are hallucinated answers that Point out what is not true without providing correct information.
1354
+ Type: Mechanism and Pathway Misattribution - These are hallucinated answer that falsely attribution of biological mechanisms, molecular pathways, or disease processes that contradicts established medical knowledge
1355
+ Type: Methodological and Evidence Fabrication - Inventing false research methods, statistical data, or specific clinical outcomes
1356
+
1357
+ Do not return anything else, just the answer.
1358
+ Return just an integer value, '0' if the answer is factual and '1' if the answer is hallucinated. No letter or word, just the integer value.""", # noqa: E501
1359
+ input_noun=None,
1360
+ output_noun=(
1361
+ """Return just an integer value, '0' if the answer is factual and '1' if the answer is hallucinated.
1362
+ No letter or word, just the integer value.
1363
+
1364
+ Your Judgment""" # noqa: E501
1365
+ ),
1366
+ max_train_instances=0,
1367
+ stop_sequences=[],
1368
+ )
1369
+
1370
+ return RunSpec(
1371
+ name="medhallu",
1372
+ scenario_spec=scenario_spec,
1373
+ adapter_spec=adapter_spec,
1374
+ metric_specs=get_exact_match_metric_specs(),
1375
+ groups=["medhallu"],
1376
+ )
1377
+
1378
+
1379
+ @run_spec_function("n2c2_ct_matching")
1380
+ def get_n2c2_ct_matching_spec(data_path: str, subject: str) -> RunSpec:
1381
+ scenario_spec = ScenarioSpec(
1382
+ class_name="helm.benchmark.scenarios.n2c2_ct_matching_scenario.N2C2CTMatchingScenario",
1383
+ args={"data_path": data_path, "subject": subject},
1384
+ )
1385
+
1386
+ adapter_spec = get_multiple_choice_adapter_spec(
1387
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1388
+ instructions="Answer A for yes, B for no.",
1389
+ input_noun="",
1390
+ output_noun="Answer A for yes, B for no. Do not add any other text, punctuation, or symbols",
1391
+ max_train_instances=0,
1392
+ )
1393
+
1394
+ return RunSpec(
1395
+ name=f"n2c2_ct_matching:subject={subject}",
1396
+ scenario_spec=scenario_spec,
1397
+ adapter_spec=adapter_spec,
1398
+ metric_specs=get_exact_match_metric_specs(),
1399
+ groups=["n2c2_ct_matching"],
1400
+ )
1401
+
1402
+
1403
+ @run_spec_function("shc_gip_med")
1404
+ def get_shc_gip_spec(data_path: str) -> RunSpec:
1405
+ scenario_spec = ScenarioSpec(
1406
+ class_name="helm.benchmark.scenarios.shc_gip_scenario.SHCGIPMedScenario", args={"data_path": data_path}
1407
+ )
1408
+
1409
+ adapter_spec = get_multiple_choice_adapter_spec(
1410
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1411
+ instructions="Answer A or B.",
1412
+ input_noun="",
1413
+ output_noun="",
1414
+ )
1415
+
1416
+ return RunSpec(
1417
+ name="shc_gip_med",
1418
+ scenario_spec=scenario_spec,
1419
+ adapter_spec=adapter_spec,
1420
+ metric_specs=get_exact_match_metric_specs(),
1421
+ groups=["shc_gip_med"],
1422
+ )
1423
+
1424
+
1425
+ @run_spec_function("mimiciv_billing_code")
1426
+ def get_mimiciv_billing_code_spec(data_path: str) -> RunSpec:
1427
+ scenario_spec = ScenarioSpec(
1428
+ class_name="helm.benchmark.scenarios.mimiciv_billing_code_scenario.MIMICIVBillingCodeScenario",
1429
+ args={
1430
+ "data_path": data_path,
1431
+ },
1432
+ )
1433
+ adapter_spec = get_generation_adapter_spec(
1434
+ instructions="Given the following clinical note, identify all relevant ICD-10 codes.",
1435
+ input_noun="Note",
1436
+ output_noun="Predicted ICD-10 Codes",
1437
+ newline_after_input_noun=True,
1438
+ newline_after_output_noun=True,
1439
+ max_tokens=256,
1440
+ max_train_instances=0,
1441
+ stop_sequences=[],
1442
+ )
1443
+ # Define the metrics
1444
+ metric_specs = [
1445
+ MetricSpec(
1446
+ class_name="helm.benchmark.metrics.mimiciv_billing_code_metrics.MIMICIVBillingCodeMetric",
1447
+ args={},
1448
+ )
1449
+ ] + get_generic_metric_specs()
1450
+
1451
+ # Return the RunSpec
1452
+ return RunSpec(
1453
+ name="mimiciv_billing_code",
1454
+ scenario_spec=scenario_spec,
1455
+ adapter_spec=adapter_spec,
1456
+ metric_specs=metric_specs,
1457
+ groups=["mimiciv_billing_code"],
1458
+ )
1459
+
1460
+
1461
+ @run_spec_function("shc_sequoia_med")
1462
+ def get_shc_sequoia_spec(data_path: str) -> RunSpec:
1463
+ scenario_spec = ScenarioSpec(
1464
+ class_name="helm.benchmark.scenarios.shc_sequoia_scenario.SHCSequoiaMedScenario", args={"data_path": data_path}
1465
+ )
1466
+
1467
+ adapter_spec = get_multiple_choice_adapter_spec(
1468
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1469
+ instructions="Answer A or B.",
1470
+ input_noun="",
1471
+ output_noun="",
1472
+ )
1473
+
1474
+ return RunSpec(
1475
+ name="shc_sequoia_med",
1476
+ scenario_spec=scenario_spec,
1477
+ adapter_spec=adapter_spec,
1478
+ metric_specs=get_exact_match_metric_specs(),
1479
+ groups=["shc_sequoia_med"],
1480
+ )
1481
+
1482
+
1483
+ @run_spec_function("shc_cdi_med")
1484
+ def get_shc_cdi_spec(data_path: str) -> RunSpec:
1485
+ scenario_spec = ScenarioSpec(
1486
+ class_name="helm.benchmark.scenarios.shc_cdi_scenario.SHCCDIMedScenario", args={"data_path": data_path}
1487
+ )
1488
+
1489
+ adapter_spec = get_multiple_choice_adapter_spec(
1490
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1491
+ instructions="Answer A or B.",
1492
+ input_noun="",
1493
+ output_noun="",
1494
+ )
1495
+
1496
+ return RunSpec(
1497
+ name="shc_cdi_med",
1498
+ scenario_spec=scenario_spec,
1499
+ adapter_spec=adapter_spec,
1500
+ metric_specs=get_exact_match_metric_specs(),
1501
+ groups=["shc_cdi_med"],
1502
+ )
1503
+
1504
+
1505
+ @run_spec_function("shc_ent_med")
1506
+ def get_shc_ent_spec(data_path: str) -> RunSpec:
1507
+ scenario_spec = ScenarioSpec(
1508
+ class_name="helm.benchmark.scenarios.shc_ent_scenario.SHCENTMedScenario", args={"data_path": data_path}
1509
+ )
1510
+
1511
+ adapter_spec = get_multiple_choice_adapter_spec(
1512
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1513
+ instructions="Answer A, B, or C.",
1514
+ input_noun="",
1515
+ output_noun="",
1516
+ )
1517
+
1518
+ return RunSpec(
1519
+ name="shc_ent_med",
1520
+ scenario_spec=scenario_spec,
1521
+ adapter_spec=adapter_spec,
1522
+ metric_specs=get_exact_match_metric_specs(),
1523
+ groups=["shc_ent_med"],
1524
+ )
1525
+
1526
+
1527
+ @run_spec_function("shc_privacy_med")
1528
+ def get_shc_privacy_spec(data_path: str) -> RunSpec:
1529
+ scenario_spec = ScenarioSpec(
1530
+ class_name="helm.benchmark.scenarios.shc_privacy_scenario.SHCPRIVACYMedScenario",
1531
+ args={"data_path": data_path},
1532
+ )
1533
+
1534
+ adapter_spec = get_multiple_choice_adapter_spec(
1535
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1536
+ instructions="Answer A or B.",
1537
+ input_noun="",
1538
+ output_noun="",
1539
+ )
1540
+
1541
+ return RunSpec(
1542
+ name="shc_privacy_med",
1543
+ scenario_spec=scenario_spec,
1544
+ adapter_spec=adapter_spec,
1545
+ metric_specs=get_exact_match_metric_specs(),
1546
+ groups=["shc_privacy_med"],
1547
+ )
1548
+
1549
+
1550
+ @run_spec_function("shc_proxy_med")
1551
+ def get_shc_proxy_spec(data_path: str) -> RunSpec:
1552
+ scenario_spec = ScenarioSpec(
1553
+ class_name="helm.benchmark.scenarios.shc_proxy_scenario.SHCPROXYMedScenario",
1554
+ args={"data_path": data_path},
1555
+ )
1556
+
1557
+ adapter_spec = get_multiple_choice_adapter_spec(
1558
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1559
+ instructions="Answer A, B, or C.",
1560
+ input_noun="",
1561
+ output_noun="",
1562
+ )
1563
+
1564
+ return RunSpec(
1565
+ name="shc_proxy_med",
1566
+ scenario_spec=scenario_spec,
1567
+ adapter_spec=adapter_spec,
1568
+ metric_specs=get_exact_match_metric_specs(),
1569
+ groups=["shc_proxy_med"],
1570
+ )