crfm-helm 0.4.0__py3-none-any.whl → 0.5.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (1033) hide show
  1. crfm_helm-0.5.10.dist-info/METADATA +369 -0
  2. crfm_helm-0.5.10.dist-info/RECORD +1008 -0
  3. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info}/WHEEL +1 -1
  4. helm/benchmark/adaptation/adapter_spec.py +80 -29
  5. helm/benchmark/adaptation/adapters/adapter.py +2 -2
  6. helm/benchmark/adaptation/adapters/adapter_factory.py +39 -28
  7. helm/benchmark/adaptation/adapters/binary_ranking_adapter.py +1 -1
  8. helm/benchmark/adaptation/adapters/chat_adapter.py +49 -0
  9. helm/benchmark/adaptation/adapters/ehr_instruction_adapter.py +108 -0
  10. helm/benchmark/adaptation/adapters/generation_adapter.py +2 -1
  11. helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +24 -8
  12. helm/benchmark/adaptation/adapters/language_modeling_adapter.py +3 -4
  13. helm/benchmark/adaptation/adapters/multimodal/generation_multimodal_adapter.py +4 -2
  14. helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +2 -1
  15. helm/benchmark/adaptation/adapters/multimodal/multimodal_prompt.py +7 -0
  16. helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +112 -0
  17. helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +6 -3
  18. helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +3 -1
  19. helm/benchmark/adaptation/adapters/multiple_choice_calibrated_adapter.py +1 -1
  20. helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +18 -8
  21. helm/benchmark/adaptation/adapters/multiple_choice_joint_chain_of_thought_adapter.py +87 -0
  22. helm/benchmark/adaptation/adapters/multiple_choice_separate_adapter.py +1 -1
  23. helm/benchmark/adaptation/adapters/test_adapter.py +5 -4
  24. helm/benchmark/adaptation/adapters/test_generation_adapter.py +46 -22
  25. helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +17 -29
  26. helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +138 -16
  27. helm/benchmark/adaptation/common_adapter_specs.py +443 -0
  28. helm/benchmark/adaptation/prompt.py +1 -1
  29. helm/benchmark/adaptation/request_state.py +6 -1
  30. helm/benchmark/adaptation/scenario_state.py +6 -2
  31. helm/benchmark/annotation/aci_bench_annotator.py +84 -0
  32. helm/benchmark/annotation/air_bench_annotator.py +79 -0
  33. helm/benchmark/annotation/alrage_annotator.py +90 -0
  34. helm/benchmark/annotation/annotator.py +48 -0
  35. helm/benchmark/annotation/annotator_factory.py +50 -0
  36. helm/benchmark/annotation/anthropic_red_team_annotator.py +57 -0
  37. helm/benchmark/annotation/autobencher_capabilities_annotator.py +107 -0
  38. helm/benchmark/annotation/autobencher_safety_annotator.py +98 -0
  39. helm/benchmark/annotation/bigcodebench_annotator.py +108 -0
  40. helm/benchmark/annotation/bird_sql_annotator.py +58 -0
  41. helm/benchmark/annotation/call_center_annotator.py +258 -0
  42. helm/benchmark/annotation/chw_care_plan_annotator.py +82 -0
  43. helm/benchmark/annotation/czech_bank_qa_annotator.py +78 -0
  44. helm/benchmark/annotation/dischargeme_annotator.py +96 -0
  45. helm/benchmark/annotation/ehr_sql_annotator.py +87 -0
  46. helm/benchmark/annotation/financebench_annotator.py +79 -0
  47. helm/benchmark/annotation/harm_bench_annotator.py +55 -0
  48. helm/benchmark/annotation/helpdesk_call_summarization_annotator.py +131 -0
  49. helm/benchmark/annotation/image2struct/image_compiler_annotator.py +93 -0
  50. helm/benchmark/annotation/image2struct/latex_compiler_annotator.py +59 -0
  51. helm/benchmark/annotation/image2struct/lilypond_compiler_annotator.py +86 -0
  52. helm/benchmark/annotation/image2struct/webpage_compiler_annotator.py +132 -0
  53. helm/benchmark/annotation/live_qa_annotator.py +76 -0
  54. helm/benchmark/annotation/med_dialog_annotator.py +88 -0
  55. helm/benchmark/annotation/medalign_annotator.py +89 -0
  56. helm/benchmark/annotation/medi_qa_annotator.py +87 -0
  57. helm/benchmark/annotation/medication_qa_annotator.py +86 -0
  58. helm/benchmark/annotation/mental_health_annotator.py +87 -0
  59. helm/benchmark/annotation/mimic_bhc_annotator.py +89 -0
  60. helm/benchmark/annotation/mimic_rrs_annotator.py +89 -0
  61. helm/benchmark/annotation/model_as_judge.py +309 -0
  62. helm/benchmark/annotation/mtsamples_procedures_annotator.py +87 -0
  63. helm/benchmark/annotation/mtsamples_replicate_annotator.py +90 -0
  64. helm/benchmark/annotation/omni_math/gpt_evaluation_template.txt +152 -0
  65. helm/benchmark/annotation/omni_math/gpt_evaluation_zero_shot_template.txt +36 -0
  66. helm/benchmark/annotation/omni_math_annotator.py +131 -0
  67. helm/benchmark/annotation/simple_safety_tests_annotator.py +50 -0
  68. helm/benchmark/annotation/spider_annotator.py +18 -0
  69. helm/benchmark/annotation/starr_patient_instructions_annotator.py +87 -0
  70. helm/benchmark/annotation/test_annotator_factory.py +26 -0
  71. helm/benchmark/annotation/test_dummy_annotator.py +44 -0
  72. helm/benchmark/annotation/wildbench/eval_template.pairwise.v2.md +75 -0
  73. helm/benchmark/annotation/wildbench/eval_template.score.v2.md +66 -0
  74. helm/benchmark/annotation/wildbench_annotator.py +119 -0
  75. helm/benchmark/annotation/xstest_annotator.py +100 -0
  76. helm/benchmark/annotation_executor.py +144 -0
  77. helm/benchmark/augmentations/cleva_perturbation.py +9 -8
  78. helm/benchmark/augmentations/contraction_expansion_perturbation.py +2 -2
  79. helm/benchmark/augmentations/contrast_sets_perturbation.py +2 -2
  80. helm/benchmark/augmentations/data_augmenter.py +0 -2
  81. helm/benchmark/augmentations/dialect_perturbation.py +4 -5
  82. helm/benchmark/augmentations/extra_space_perturbation.py +2 -2
  83. helm/benchmark/augmentations/filler_words_perturbation.py +2 -2
  84. helm/benchmark/augmentations/gender_perturbation.py +3 -3
  85. helm/benchmark/augmentations/lowercase_perturbation.py +2 -2
  86. helm/benchmark/augmentations/mild_mix_perturbation.py +6 -6
  87. helm/benchmark/augmentations/misspelling_perturbation.py +2 -2
  88. helm/benchmark/augmentations/person_name_perturbation.py +4 -5
  89. helm/benchmark/augmentations/perturbation.py +26 -4
  90. helm/benchmark/augmentations/perturbation_description.py +1 -1
  91. helm/benchmark/augmentations/space_perturbation.py +2 -2
  92. helm/benchmark/augmentations/suffix_perturbation.py +29 -0
  93. helm/benchmark/augmentations/synonym_perturbation.py +4 -3
  94. helm/benchmark/augmentations/test_perturbation.py +56 -19
  95. helm/benchmark/augmentations/translate_perturbation.py +31 -0
  96. helm/benchmark/augmentations/typos_perturbation.py +2 -2
  97. helm/benchmark/config_registry.py +7 -1
  98. helm/benchmark/data_preprocessor.py +2 -2
  99. helm/benchmark/executor.py +54 -25
  100. helm/benchmark/huggingface_registration.py +28 -10
  101. helm/benchmark/metrics/air_bench_metrics.py +3212 -0
  102. helm/benchmark/metrics/alrage_metric.py +35 -0
  103. helm/benchmark/metrics/annotation_metrics.py +108 -0
  104. helm/benchmark/metrics/basic_metrics.py +437 -667
  105. helm/benchmark/metrics/bbq_metrics.py +17 -6
  106. helm/benchmark/metrics/bias_metrics.py +18 -9
  107. helm/benchmark/metrics/bias_word_lists.py +1 -1
  108. helm/benchmark/metrics/bigcodebench_metrics.py +25 -0
  109. helm/benchmark/metrics/bird_sql_metrics.py +28 -0
  110. helm/benchmark/metrics/classification_metrics.py +107 -22
  111. helm/benchmark/metrics/cleva_accuracy_metrics.py +8 -5
  112. helm/benchmark/metrics/cleva_harms_metrics.py +12 -11
  113. helm/benchmark/metrics/code_metrics.py +5 -5
  114. helm/benchmark/metrics/code_metrics_helper.py +11 -3
  115. helm/benchmark/metrics/codeinsights_code_efficiency_metrics.py +186 -0
  116. helm/benchmark/metrics/codeinsights_code_evaluation_metrics.py +477 -0
  117. helm/benchmark/metrics/codeinsights_correct_code_metrics.py +366 -0
  118. helm/benchmark/metrics/codeinsights_edge_case_metrics.py +92 -0
  119. helm/benchmark/metrics/codeinsights_metric_specs.py +51 -0
  120. helm/benchmark/metrics/comet_metric.py +125 -0
  121. helm/benchmark/metrics/common_metric_specs.py +174 -0
  122. helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +83 -0
  123. helm/benchmark/metrics/copyright_metrics.py +5 -5
  124. helm/benchmark/metrics/czech_bank_qa_metrics.py +29 -0
  125. helm/benchmark/metrics/decodingtrust_fairness_metrics.py +72 -0
  126. helm/benchmark/metrics/decodingtrust_ood_knowledge_metrics.py +66 -0
  127. helm/benchmark/metrics/decodingtrust_privacy_metrics.py +101 -0
  128. helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +202 -0
  129. helm/benchmark/metrics/disinformation_metrics.py +8 -114
  130. helm/benchmark/metrics/dry_run_metrics.py +35 -6
  131. helm/benchmark/metrics/efficiency_metrics.py +287 -0
  132. helm/benchmark/metrics/ehr_sql_metrics.py +159 -0
  133. helm/benchmark/metrics/evaluate_instances_metric.py +59 -0
  134. helm/benchmark/metrics/evaluate_reference_metrics.py +831 -0
  135. helm/benchmark/metrics/fin_qa_metrics.py +60 -0
  136. helm/benchmark/metrics/fin_qa_metrics_helper.py +398 -0
  137. helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +115 -0
  138. helm/benchmark/metrics/gpt4_audio_critique_metrics.py +167 -0
  139. helm/benchmark/metrics/gpt4_audio_refusal_metrics.py +145 -0
  140. helm/benchmark/metrics/gpt4v_originality_critique_metrics.py +126 -0
  141. helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +48 -0
  142. helm/benchmark/metrics/ifeval/instructions.py +1574 -0
  143. helm/benchmark/metrics/ifeval/instructions_registry.py +182 -0
  144. helm/benchmark/metrics/ifeval/instructions_registry.pyi +3 -0
  145. helm/benchmark/metrics/ifeval/instructions_util.py +153 -0
  146. helm/benchmark/metrics/ifeval_metrics.py +67 -0
  147. helm/benchmark/metrics/image_generation/aesthetics_metrics.py +54 -0
  148. helm/benchmark/metrics/image_generation/aesthetics_scorer.py +66 -0
  149. helm/benchmark/metrics/image_generation/clip_score_metrics.py +84 -0
  150. helm/benchmark/metrics/image_generation/denoised_runtime_metric.py +42 -0
  151. helm/benchmark/metrics/image_generation/detection_metrics.py +57 -0
  152. helm/benchmark/metrics/image_generation/detectors/base_detector.py +8 -0
  153. helm/benchmark/metrics/image_generation/detectors/vitdet.py +178 -0
  154. helm/benchmark/metrics/image_generation/efficiency_metrics.py +41 -0
  155. helm/benchmark/metrics/image_generation/fidelity_metrics.py +168 -0
  156. helm/benchmark/metrics/image_generation/fractal_dimension/__init__.py +0 -0
  157. helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +63 -0
  158. helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +33 -0
  159. helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +50 -0
  160. helm/benchmark/metrics/image_generation/gender_metrics.py +58 -0
  161. helm/benchmark/metrics/image_generation/image_critique_metrics.py +284 -0
  162. helm/benchmark/metrics/image_generation/lpips_metrics.py +82 -0
  163. helm/benchmark/metrics/image_generation/multi_scale_ssim_metrics.py +82 -0
  164. helm/benchmark/metrics/image_generation/nsfw_detector.py +96 -0
  165. helm/benchmark/metrics/image_generation/nsfw_metrics.py +103 -0
  166. helm/benchmark/metrics/image_generation/nudity_metrics.py +38 -0
  167. helm/benchmark/metrics/image_generation/photorealism_critique_metrics.py +153 -0
  168. helm/benchmark/metrics/image_generation/psnr_metrics.py +78 -0
  169. helm/benchmark/metrics/image_generation/q16/__init__.py +0 -0
  170. helm/benchmark/metrics/image_generation/q16/q16_toxicity_detector.py +90 -0
  171. helm/benchmark/metrics/image_generation/q16/test_q16.py +20 -0
  172. helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +48 -0
  173. helm/benchmark/metrics/image_generation/skin_tone_metrics.py +164 -0
  174. helm/benchmark/metrics/image_generation/uiqi_metrics.py +92 -0
  175. helm/benchmark/metrics/image_generation/watermark/__init__.py +0 -0
  176. helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +16 -0
  177. helm/benchmark/metrics/image_generation/watermark/watermark_detector.py +87 -0
  178. helm/benchmark/metrics/image_generation/watermark_metrics.py +48 -0
  179. helm/benchmark/metrics/instruction_following_critique_metrics.py +48 -5
  180. helm/benchmark/metrics/kpi_edgar_metrics.py +142 -0
  181. helm/benchmark/metrics/language_modeling_metrics.py +111 -0
  182. helm/benchmark/metrics/live_qa_metrics.py +35 -0
  183. helm/benchmark/metrics/llm_jury_metrics.py +58 -0
  184. helm/benchmark/metrics/lmkt_metric_specs.py +12 -0
  185. helm/benchmark/metrics/lmkt_metrics.py +47 -0
  186. helm/benchmark/metrics/machine_translation_metrics.py +89 -0
  187. helm/benchmark/metrics/medcalc_bench_metrics.py +137 -0
  188. helm/benchmark/metrics/medec_metrics.py +124 -0
  189. helm/benchmark/metrics/melt_bias_metric.py +234 -0
  190. helm/benchmark/metrics/melt_bias_word_lists.py +1367 -0
  191. helm/benchmark/metrics/melt_metric_specs.py +43 -0
  192. helm/benchmark/metrics/melt_toxicity_metric.py +107 -0
  193. helm/benchmark/metrics/metric.py +121 -175
  194. helm/benchmark/metrics/metric_name.py +0 -1
  195. helm/benchmark/metrics/metric_service.py +23 -7
  196. helm/benchmark/metrics/mimiciv_billing_code_metrics.py +127 -0
  197. helm/benchmark/metrics/nltk_helper.py +32 -0
  198. helm/benchmark/metrics/omni_math_metrics.py +44 -0
  199. helm/benchmark/metrics/openai_mrcr_metrics.py +52 -0
  200. helm/benchmark/metrics/output_processing_metric.py +60 -0
  201. helm/benchmark/metrics/output_processors.py +15 -0
  202. helm/benchmark/metrics/paraphrase_generation_metrics.py +5 -6
  203. helm/benchmark/metrics/prometheus_vision_critique_metrics.py +185 -0
  204. helm/benchmark/metrics/ranking_metrics.py +5 -5
  205. helm/benchmark/metrics/reference_metric.py +148 -0
  206. helm/benchmark/metrics/reka_vibe_critique_metrics.py +158 -0
  207. helm/benchmark/metrics/ruler_qa_metrics.py +34 -0
  208. helm/benchmark/metrics/safety_metrics.py +91 -0
  209. helm/benchmark/metrics/seahelm_metrics.py +201 -0
  210. helm/benchmark/metrics/seahelm_metrics_specs.py +10 -0
  211. helm/benchmark/metrics/spider_metrics.py +7 -0
  212. helm/benchmark/metrics/statistic.py +1 -1
  213. helm/benchmark/metrics/summac/model_summac.py +8 -11
  214. helm/benchmark/metrics/summarization_critique_metrics.py +4 -4
  215. helm/benchmark/metrics/summarization_metrics.py +150 -11
  216. helm/benchmark/metrics/test_bias_metrics.py +5 -1
  217. helm/benchmark/metrics/test_classification_metrics.py +145 -70
  218. helm/benchmark/metrics/test_disinformation_metrics.py +78 -0
  219. helm/benchmark/metrics/{test_basic_metrics.py → test_evaluate_reference_metrics.py} +20 -1
  220. helm/benchmark/metrics/test_metric.py +3 -3
  221. helm/benchmark/metrics/test_statistic.py +2 -2
  222. helm/benchmark/metrics/tokens/ai21_token_cost_estimator.py +1 -1
  223. helm/benchmark/metrics/tokens/auto_token_cost_estimator.py +6 -6
  224. helm/benchmark/metrics/tokens/cohere_token_cost_estimator.py +1 -1
  225. helm/benchmark/metrics/tokens/free_token_cost_estimator.py +1 -1
  226. helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +11 -3
  227. helm/benchmark/metrics/tokens/openai_token_cost_estimator.py +1 -1
  228. helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +3 -3
  229. helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +7 -7
  230. helm/benchmark/metrics/toxicity_metrics.py +37 -7
  231. helm/benchmark/metrics/toxicity_utils.py +23 -0
  232. helm/benchmark/metrics/ultra_suite_asr_classification_metrics.py +52 -0
  233. helm/benchmark/metrics/unitxt_metrics.py +107 -0
  234. helm/benchmark/metrics/vision_language/__init__.py +0 -0
  235. helm/benchmark/metrics/vision_language/emd_utils.py +347 -0
  236. helm/benchmark/metrics/vision_language/image_metrics.py +537 -0
  237. helm/benchmark/metrics/vision_language/image_utils.py +100 -0
  238. helm/benchmark/metrics/wildbench_metrics.py +54 -0
  239. helm/benchmark/model_deployment_registry.py +69 -5
  240. helm/benchmark/model_metadata_registry.py +58 -2
  241. helm/benchmark/multi_gpu_runner.py +133 -0
  242. helm/benchmark/presentation/contamination.py +3 -3
  243. helm/benchmark/presentation/create_plots.py +51 -20
  244. helm/benchmark/presentation/run_display.py +51 -12
  245. helm/benchmark/presentation/run_entry.py +2 -2
  246. helm/benchmark/presentation/schema.py +83 -66
  247. helm/benchmark/presentation/summarize.py +483 -388
  248. helm/benchmark/presentation/table.py +8 -8
  249. helm/benchmark/presentation/taxonomy_info.py +20 -0
  250. helm/benchmark/presentation/test_contamination.py +2 -2
  251. helm/benchmark/presentation/test_create_plots.py +4 -1
  252. helm/benchmark/presentation/test_run_entry.py +2 -2
  253. helm/benchmark/presentation/test_schema.py +11 -0
  254. helm/benchmark/presentation/test_summarize.py +148 -6
  255. helm/benchmark/presentation/torr_robustness_summarizer.py +178 -0
  256. helm/benchmark/reeval_run.py +202 -0
  257. helm/benchmark/reeval_runner.py +355 -0
  258. helm/benchmark/run.py +151 -87
  259. helm/benchmark/run_expander.py +418 -33
  260. helm/benchmark/run_spec.py +93 -0
  261. helm/benchmark/run_spec_factory.py +180 -0
  262. helm/benchmark/run_specs/__init__.py +0 -0
  263. helm/benchmark/run_specs/air_bench_run_specs.py +58 -0
  264. helm/benchmark/run_specs/arabic_run_specs.py +197 -0
  265. helm/benchmark/run_specs/audio_run_specs.py +657 -0
  266. helm/benchmark/run_specs/bluex_run_specs.py +40 -0
  267. helm/benchmark/run_specs/call_center_run_specs.py +201 -0
  268. helm/benchmark/run_specs/capabilities_run_specs.py +308 -0
  269. helm/benchmark/run_specs/classic_run_specs.py +1393 -0
  270. helm/benchmark/run_specs/cleva_run_specs.py +277 -0
  271. helm/benchmark/run_specs/codeinsights_run_specs.py +192 -0
  272. helm/benchmark/run_specs/decodingtrust_run_specs.py +316 -0
  273. helm/benchmark/run_specs/enem_challenge_specs.py +31 -0
  274. helm/benchmark/run_specs/enterprise_run_specs.py +280 -0
  275. helm/benchmark/run_specs/experimental_run_specs.py +224 -0
  276. helm/benchmark/run_specs/finance_run_specs.py +114 -0
  277. helm/benchmark/run_specs/healthqa_br_run_specs.py +40 -0
  278. helm/benchmark/run_specs/heim_run_specs.py +625 -0
  279. helm/benchmark/run_specs/imdb_ptbr_run_specs.py +30 -0
  280. helm/benchmark/run_specs/instruction_following_run_specs.py +129 -0
  281. helm/benchmark/run_specs/lite_run_specs.py +307 -0
  282. helm/benchmark/run_specs/lmkt_run_specs.py +144 -0
  283. helm/benchmark/run_specs/long_context_run_specs.py +188 -0
  284. helm/benchmark/run_specs/medhelm/__init__.py +0 -0
  285. helm/benchmark/run_specs/medhelm/benchmark_config.py +219 -0
  286. helm/benchmark/run_specs/medhelm_run_specs.py +1570 -0
  287. helm/benchmark/run_specs/melt_run_specs.py +783 -0
  288. helm/benchmark/run_specs/mmlu_clinical_afr_run_specs.py +49 -0
  289. helm/benchmark/run_specs/multilingual_run_specs.py +50 -0
  290. helm/benchmark/run_specs/oab_exams_specs.py +32 -0
  291. helm/benchmark/run_specs/safety_run_specs.py +191 -0
  292. helm/benchmark/run_specs/seahelm_run_specs.py +652 -0
  293. helm/benchmark/run_specs/simple_run_specs.py +104 -0
  294. helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +167 -0
  295. helm/benchmark/run_specs/sql_run_specs.py +54 -0
  296. helm/benchmark/run_specs/tweetsentbr_run_specs.py +32 -0
  297. helm/benchmark/run_specs/unitxt_run_specs.py +51 -0
  298. helm/benchmark/run_specs/vlm_run_specs.py +1057 -0
  299. helm/benchmark/run_specs/winogrande_afr_run_specs.py +47 -0
  300. helm/benchmark/runner.py +63 -62
  301. helm/benchmark/runner_config_registry.py +21 -0
  302. helm/benchmark/scenarios/aci_bench_scenario.py +149 -0
  303. helm/benchmark/scenarios/air_bench_scenario.py +76 -0
  304. helm/benchmark/scenarios/alghafa_scenario.py +126 -0
  305. helm/benchmark/scenarios/alrage_scenario.py +54 -0
  306. helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +27 -3
  307. helm/benchmark/scenarios/anthropic_red_team_scenario.py +82 -0
  308. helm/benchmark/scenarios/arabic_exams_scenario.py +114 -0
  309. helm/benchmark/scenarios/arabic_mmlu_scenario.py +82 -0
  310. helm/benchmark/scenarios/aratrust_scenario.py +95 -0
  311. helm/benchmark/scenarios/audio_language/__init__.py +0 -0
  312. helm/benchmark/scenarios/audio_language/air_bench_chat_scenario.py +130 -0
  313. helm/benchmark/scenarios/audio_language/air_bench_foundation_scenario.py +154 -0
  314. helm/benchmark/scenarios/audio_language/ami_scenario.py +96 -0
  315. helm/benchmark/scenarios/audio_language/audio_mnist_scenario.py +62 -0
  316. helm/benchmark/scenarios/audio_language/audio_pairs_scenario.py +62 -0
  317. helm/benchmark/scenarios/audio_language/audiocaps_scenario.py +59 -0
  318. helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +152 -0
  319. helm/benchmark/scenarios/audio_language/common_voice_15_scenario.py +99 -0
  320. helm/benchmark/scenarios/audio_language/corebench_scenario.py +77 -0
  321. helm/benchmark/scenarios/audio_language/covost2_scenario.py +163 -0
  322. helm/benchmark/scenarios/audio_language/fleurs_fairness_scenario.py +83 -0
  323. helm/benchmark/scenarios/audio_language/fleurs_scenario.py +312 -0
  324. helm/benchmark/scenarios/audio_language/iemocap_audio_scenario.py +83 -0
  325. helm/benchmark/scenarios/audio_language/librispeech_fairness_scenario.py +96 -0
  326. helm/benchmark/scenarios/audio_language/librispeech_scenario.py +80 -0
  327. helm/benchmark/scenarios/audio_language/meld_audio_scenario.py +113 -0
  328. helm/benchmark/scenarios/audio_language/multilingual_librispeech_scenario.py +80 -0
  329. helm/benchmark/scenarios/audio_language/mustard_scenario.py +142 -0
  330. helm/benchmark/scenarios/audio_language/mutox_scenario.py +254 -0
  331. helm/benchmark/scenarios/audio_language/parade_scenario.py +97 -0
  332. helm/benchmark/scenarios/audio_language/speech_robust_bench_scenario.py +124 -0
  333. helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification_scenario.py +74 -0
  334. helm/benchmark/scenarios/audio_language/ultra_suite_asr_transcription_scenario.py +70 -0
  335. helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +79 -0
  336. helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +78 -0
  337. helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +78 -0
  338. helm/benchmark/scenarios/audio_language/vocal_sound_scenario.py +83 -0
  339. helm/benchmark/scenarios/audio_language/voice_jailbreak_attacks_scenario.py +87 -0
  340. helm/benchmark/scenarios/audio_language/voxceleb2_scenario.py +105 -0
  341. helm/benchmark/scenarios/autobencher_capabilities_scenario.py +68 -0
  342. helm/benchmark/scenarios/autobencher_safety_scenario.py +51 -0
  343. helm/benchmark/scenarios/babi_qa_scenario.py +16 -1
  344. helm/benchmark/scenarios/banking77_scenario.py +77 -0
  345. helm/benchmark/scenarios/bbq_scenario.py +17 -2
  346. helm/benchmark/scenarios/best_chatgpt_prompts.yaml +473 -0
  347. helm/benchmark/scenarios/big_bench_scenario.py +11 -1
  348. helm/benchmark/scenarios/bigcodebench_scenario.py +58 -0
  349. helm/benchmark/scenarios/bird_sql_scenario.py +112 -0
  350. helm/benchmark/scenarios/bird_sql_scenario_helper.py +118 -0
  351. helm/benchmark/scenarios/blimp_scenario.py +1 -1
  352. helm/benchmark/scenarios/bluex_scenario.py +70 -0
  353. helm/benchmark/scenarios/bold_scenario.py +18 -3
  354. helm/benchmark/scenarios/boolq_scenario.py +21 -1
  355. helm/benchmark/scenarios/call_center_scenario.py +84 -0
  356. helm/benchmark/scenarios/casehold_scenario.py +79 -0
  357. helm/benchmark/scenarios/chw_care_plan_scenario.py +129 -0
  358. helm/benchmark/scenarios/ci_mcqa_scenario.py +80 -0
  359. helm/benchmark/scenarios/civil_comments_scenario.py +14 -1
  360. helm/benchmark/scenarios/clear_scenario.py +180 -0
  361. helm/benchmark/scenarios/cleva_scenario.py +482 -3
  362. helm/benchmark/scenarios/code_scenario.py +46 -4
  363. helm/benchmark/scenarios/codeinsights_code_efficiency_scenario.py +197 -0
  364. helm/benchmark/scenarios/codeinsights_correct_code_scenario.py +78 -0
  365. helm/benchmark/scenarios/codeinsights_edge_case_scenario.py +192 -0
  366. helm/benchmark/scenarios/codeinsights_student_coding_scenario.py +162 -0
  367. helm/benchmark/scenarios/codeinsights_student_mistake_scenario.py +188 -0
  368. helm/benchmark/scenarios/commonsense_scenario.py +33 -1
  369. helm/benchmark/scenarios/compositional_instructions.yaml +70 -0
  370. helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +118 -0
  371. helm/benchmark/scenarios/copyright_scenario.py +35 -1
  372. helm/benchmark/scenarios/covid_dialog_scenario.py +10 -1
  373. helm/benchmark/scenarios/cti_to_mitre_scenario.py +261 -0
  374. helm/benchmark/scenarios/custom_mcqa_scenario.py +1 -1
  375. helm/benchmark/scenarios/czech_bank_qa_scenario.py +148 -0
  376. helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +190 -0
  377. helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +143 -0
  378. helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +98 -0
  379. helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +344 -0
  380. helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +217 -0
  381. helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +571 -0
  382. helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +80 -0
  383. helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +90 -0
  384. helm/benchmark/scenarios/dialogue_scenarios.py +13 -3
  385. helm/benchmark/scenarios/dischargeme_scenario.py +196 -0
  386. helm/benchmark/scenarios/disinformation_scenario.py +32 -1
  387. helm/benchmark/scenarios/dyck_language_scenario.py +25 -1
  388. helm/benchmark/scenarios/echr_judgment_classification_scenario.py +113 -0
  389. helm/benchmark/scenarios/ehr_sql_scenario.py +137 -0
  390. helm/benchmark/scenarios/ehrshot_scenario.py +1541 -0
  391. helm/benchmark/scenarios/enem_challenge_scenario.py +77 -0
  392. helm/benchmark/scenarios/entity_data_imputation_scenario.py +33 -3
  393. helm/benchmark/scenarios/entity_matching_scenario.py +26 -2
  394. helm/benchmark/scenarios/ewok_scenario.py +116 -0
  395. helm/benchmark/scenarios/exams_multilingual_scenario.py +115 -0
  396. helm/benchmark/scenarios/fin_qa_scenario.py +139 -0
  397. helm/benchmark/scenarios/financebench_scenario.py +74 -0
  398. helm/benchmark/scenarios/financial_phrasebank_scenario.py +115 -0
  399. helm/benchmark/scenarios/gold_commodity_news_scenario.py +145 -0
  400. helm/benchmark/scenarios/gpqa_scenario.py +98 -0
  401. helm/benchmark/scenarios/grammar.py +2 -2
  402. helm/benchmark/scenarios/grammar_scenario.py +21 -2
  403. helm/benchmark/scenarios/gsm_scenario.py +31 -1
  404. helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +61 -0
  405. helm/benchmark/scenarios/harm_bench_scenario.py +70 -0
  406. helm/benchmark/scenarios/headqa_scenario.py +158 -0
  407. helm/benchmark/scenarios/healthqa_br_scenario.py +80 -0
  408. helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +50 -0
  409. helm/benchmark/scenarios/ice_scenario.py +28 -4
  410. helm/benchmark/scenarios/ifeval_scenario.py +71 -0
  411. helm/benchmark/scenarios/image_generation/__init__.py +0 -0
  412. helm/benchmark/scenarios/image_generation/common_syntactic_processes_scenario.py +105 -0
  413. helm/benchmark/scenarios/image_generation/cub200_scenario.py +95 -0
  414. helm/benchmark/scenarios/image_generation/daily_dalle_scenario.py +124 -0
  415. helm/benchmark/scenarios/image_generation/demographic_stereotypes_scenario.py +82 -0
  416. helm/benchmark/scenarios/image_generation/detection_scenario.py +83 -0
  417. helm/benchmark/scenarios/image_generation/draw_bench_scenario.py +74 -0
  418. helm/benchmark/scenarios/image_generation/i2p_scenario.py +57 -0
  419. helm/benchmark/scenarios/image_generation/landing_page_scenario.py +46 -0
  420. helm/benchmark/scenarios/image_generation/logos_scenario.py +223 -0
  421. helm/benchmark/scenarios/image_generation/magazine_cover_scenario.py +91 -0
  422. helm/benchmark/scenarios/image_generation/mental_disorders_scenario.py +46 -0
  423. helm/benchmark/scenarios/image_generation/mscoco_scenario.py +91 -0
  424. helm/benchmark/scenarios/image_generation/paint_skills_scenario.py +72 -0
  425. helm/benchmark/scenarios/image_generation/parti_prompts_scenario.py +94 -0
  426. helm/benchmark/scenarios/image_generation/radiology_scenario.py +42 -0
  427. helm/benchmark/scenarios/image_generation/relational_understanding_scenario.py +52 -0
  428. helm/benchmark/scenarios/image_generation/time_most_significant_historical_figures_scenario.py +124 -0
  429. helm/benchmark/scenarios/image_generation/winoground_scenario.py +62 -0
  430. helm/benchmark/scenarios/imdb_ptbr_scenario.py +60 -0
  431. helm/benchmark/scenarios/imdb_scenario.py +26 -3
  432. helm/benchmark/scenarios/infinite_bench_en_mc_scenario.py +111 -0
  433. helm/benchmark/scenarios/infinite_bench_en_qa_scenario.py +85 -0
  434. helm/benchmark/scenarios/infinite_bench_en_sum_scenario.py +98 -0
  435. helm/benchmark/scenarios/interactive_qa_mmlu_scenario.py +2 -2
  436. helm/benchmark/scenarios/koala_scenario.py +21 -1
  437. helm/benchmark/scenarios/kpi_edgar_scenario.py +172 -0
  438. helm/benchmark/scenarios/legal_contract_summarization_scenario.py +149 -0
  439. helm/benchmark/scenarios/legal_opinion_sentiment_classification_scenario.py +77 -0
  440. helm/benchmark/scenarios/legal_summarization_scenario.py +61 -1
  441. helm/benchmark/scenarios/legal_support_scenario.py +24 -1
  442. helm/benchmark/scenarios/legalbench_scenario.py +45 -3
  443. helm/benchmark/scenarios/lex_glue_scenario.py +23 -2
  444. helm/benchmark/scenarios/lextreme_scenario.py +22 -1
  445. helm/benchmark/scenarios/live_qa_scenario.py +94 -0
  446. helm/benchmark/scenarios/lm_entry_scenario.py +185 -0
  447. helm/benchmark/scenarios/lmkt_scenarios.py +288 -0
  448. helm/benchmark/scenarios/lsat_qa_scenario.py +15 -1
  449. helm/benchmark/scenarios/madinah_qa_scenario.py +73 -0
  450. helm/benchmark/scenarios/math_scenario.py +81 -22
  451. helm/benchmark/scenarios/mbzuai_human_translated_arabic_mmlu.py +68 -0
  452. helm/benchmark/scenarios/me_q_sum_scenario.py +10 -1
  453. helm/benchmark/scenarios/med_dialog_scenario.py +56 -22
  454. helm/benchmark/scenarios/med_mcqa_scenario.py +24 -1
  455. helm/benchmark/scenarios/med_paragraph_simplification_scenario.py +10 -1
  456. helm/benchmark/scenarios/med_qa_scenario.py +30 -1
  457. helm/benchmark/scenarios/medalign_scenario.py +117 -0
  458. helm/benchmark/scenarios/medalign_scenario_helper.py +326 -0
  459. helm/benchmark/scenarios/medbullets_scenario.py +167 -0
  460. helm/benchmark/scenarios/medcalc_bench_scenario.py +149 -0
  461. helm/benchmark/scenarios/medec_scenario.py +148 -0
  462. helm/benchmark/scenarios/medhallu_scenario.py +95 -0
  463. helm/benchmark/scenarios/medhelm/__init__.py +0 -0
  464. helm/benchmark/scenarios/medhelm/judges.yaml +14 -0
  465. helm/benchmark/scenarios/medhelm_configurable_scenario.py +101 -0
  466. helm/benchmark/scenarios/medi_qa_scenario.py +134 -0
  467. helm/benchmark/scenarios/medication_qa_scenario.py +96 -0
  468. helm/benchmark/scenarios/melt_ir_scenario.py +171 -0
  469. helm/benchmark/scenarios/melt_knowledge_scenario.py +246 -0
  470. helm/benchmark/scenarios/melt_lm_scenarios.py +252 -0
  471. helm/benchmark/scenarios/melt_scenarios.py +793 -0
  472. helm/benchmark/scenarios/melt_srn_scenario.py +342 -0
  473. helm/benchmark/scenarios/melt_synthetic_reasoning_scenario.py +222 -0
  474. helm/benchmark/scenarios/melt_translation_scenario.py +152 -0
  475. helm/benchmark/scenarios/mental_health_scenario.py +146 -0
  476. helm/benchmark/scenarios/mimic_bhc_scenario.py +127 -0
  477. helm/benchmark/scenarios/mimic_rrs_scenario.py +121 -0
  478. helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +99 -0
  479. helm/benchmark/scenarios/mmlu_clinical_afr_scenario.py +74 -0
  480. helm/benchmark/scenarios/mmlu_pro_scenario.py +113 -0
  481. helm/benchmark/scenarios/mmlu_scenario.py +32 -1
  482. helm/benchmark/scenarios/mmmlu_scenario.py +85 -0
  483. helm/benchmark/scenarios/msmarco_scenario.py +31 -1
  484. helm/benchmark/scenarios/mtsamples_procedures_scenario.py +166 -0
  485. helm/benchmark/scenarios/mtsamples_replicate_scenario.py +164 -0
  486. helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +297 -0
  487. helm/benchmark/scenarios/narrativeqa_scenario.py +20 -1
  488. helm/benchmark/scenarios/natural_qa_scenario.py +33 -1
  489. helm/benchmark/scenarios/newsqa_scenario.py +1 -1
  490. helm/benchmark/scenarios/oab_exams_scenario.py +57 -0
  491. helm/benchmark/scenarios/omni_math_scenario.py +71 -0
  492. helm/benchmark/scenarios/open_assistant_scenario.py +33 -2
  493. helm/benchmark/scenarios/openai_mrcr_scenario.py +94 -0
  494. helm/benchmark/scenarios/opinions_qa_scenario.py +1 -5
  495. helm/benchmark/scenarios/pubmed_qa_scenario.py +81 -43
  496. helm/benchmark/scenarios/quac_scenario.py +24 -1
  497. helm/benchmark/scenarios/race_based_med_scenario.py +175 -0
  498. helm/benchmark/scenarios/raft_scenario.py +33 -3
  499. helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +14 -1
  500. helm/benchmark/scenarios/ruler_qa_scenario_helper.py +171 -0
  501. helm/benchmark/scenarios/ruler_qa_scenarios.py +128 -0
  502. helm/benchmark/scenarios/scenario.py +44 -1
  503. helm/benchmark/scenarios/seahelm_scenario.py +2295 -0
  504. helm/benchmark/scenarios/self_instruct_scenario.py +29 -1
  505. helm/benchmark/scenarios/shc_bmt_scenario.py +97 -0
  506. helm/benchmark/scenarios/shc_cdi_scenario.py +95 -0
  507. helm/benchmark/scenarios/shc_conf_scenario.py +99 -0
  508. helm/benchmark/scenarios/shc_ent_scenario.py +98 -0
  509. helm/benchmark/scenarios/shc_gip_scenario.py +94 -0
  510. helm/benchmark/scenarios/shc_privacy_scenario.py +100 -0
  511. helm/benchmark/scenarios/shc_proxy_scenario.py +98 -0
  512. helm/benchmark/scenarios/shc_ptbm_scenario.py +104 -0
  513. helm/benchmark/scenarios/shc_sei_scenario.py +94 -0
  514. helm/benchmark/scenarios/shc_sequoia_scenario.py +98 -0
  515. helm/benchmark/scenarios/simple_safety_tests_scenario.py +44 -0
  516. helm/benchmark/scenarios/simple_scenarios.py +122 -1
  517. helm/benchmark/scenarios/situation_prompts.yaml +49 -0
  518. helm/benchmark/scenarios/spider_scenario.py +109 -0
  519. helm/benchmark/scenarios/starr_patient_instructions_scenario.py +119 -0
  520. helm/benchmark/scenarios/summarization_scenario.py +48 -1
  521. helm/benchmark/scenarios/sumosum_scenario.py +157 -0
  522. helm/benchmark/scenarios/synthetic_efficiency_scenario.py +22 -1
  523. helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +24 -1
  524. helm/benchmark/scenarios/synthetic_reasoning_scenario.py +11 -1
  525. helm/benchmark/scenarios/test_air_bench_scenario.py +27 -0
  526. helm/benchmark/scenarios/test_alghafa_scenario.py +29 -0
  527. helm/benchmark/scenarios/test_alrage_scenario.py +23 -0
  528. helm/benchmark/scenarios/test_arabic_exams_scenario.py +21 -0
  529. helm/benchmark/scenarios/test_aratrust_scenario.py +21 -0
  530. helm/benchmark/scenarios/test_bigcodebench_scenario.py +26 -0
  531. helm/benchmark/scenarios/test_bluex_scenario.py +59 -0
  532. helm/benchmark/scenarios/test_commonsense_scenario.py +21 -0
  533. helm/benchmark/scenarios/test_czech_bank_qa_scenario.py +18 -0
  534. helm/benchmark/scenarios/test_enem_challenge_scenario.py +53 -0
  535. helm/benchmark/scenarios/test_ewok_scenario.py +29 -0
  536. helm/benchmark/scenarios/test_exams_multilingual_scenario.py +29 -0
  537. helm/benchmark/scenarios/test_financebench_scenario.py +26 -0
  538. helm/benchmark/scenarios/test_gold_commodity_news_scenario.py +18 -0
  539. helm/benchmark/scenarios/test_gpqa_scenario.py +44 -0
  540. helm/benchmark/scenarios/test_gsm_scenario.py +31 -0
  541. helm/benchmark/scenarios/test_healtha_br_scenario.py +57 -0
  542. helm/benchmark/scenarios/test_ifeval_scenario.py +36 -0
  543. helm/benchmark/scenarios/test_imdb_ptbr_scenario.py +27 -0
  544. helm/benchmark/scenarios/test_infinite_bench_en_qa_scenario.py +18 -0
  545. helm/benchmark/scenarios/test_infinite_bench_en_sum_scenario.py +31 -0
  546. helm/benchmark/scenarios/test_legalbench_scenario.py +30 -0
  547. helm/benchmark/scenarios/test_math_scenario.py +4 -3
  548. helm/benchmark/scenarios/test_med_qa_scenario.py +30 -0
  549. helm/benchmark/scenarios/test_mmlu_clinical_afr_scenario.py +21 -0
  550. helm/benchmark/scenarios/test_mmlu_pro_scenario.py +53 -0
  551. helm/benchmark/scenarios/test_mmlu_scenario.py +33 -0
  552. helm/benchmark/scenarios/test_narrativeqa_scenario.py +73 -0
  553. helm/benchmark/scenarios/test_oab_exams_scenario.py +51 -0
  554. helm/benchmark/scenarios/test_omni_math_scenario.py +27 -0
  555. helm/benchmark/scenarios/test_scenario.py +6 -3
  556. helm/benchmark/scenarios/test_simple_scenarios.py +50 -0
  557. helm/benchmark/scenarios/test_tweetsentbr_scenario.py +24 -0
  558. helm/benchmark/scenarios/test_wildbench_scenario.py +15 -0
  559. helm/benchmark/scenarios/test_winogrande_afr_scenario.py +19 -0
  560. helm/benchmark/scenarios/thai_exam_scenario.py +239 -0
  561. helm/benchmark/scenarios/the_pile_scenario.py +13 -1
  562. helm/benchmark/scenarios/truthful_qa_scenario.py +26 -2
  563. helm/benchmark/scenarios/tweetsentbr_scenario.py +66 -0
  564. helm/benchmark/scenarios/twitter_aae_scenario.py +20 -1
  565. helm/benchmark/scenarios/unitxt_scenario.py +62 -0
  566. helm/benchmark/scenarios/verifiability_judgment_scenario.py +4 -2
  567. helm/benchmark/scenarios/vicuna_scenario.py +22 -2
  568. helm/benchmark/scenarios/vision_language/a_okvqa_scenario.py +83 -0
  569. helm/benchmark/scenarios/vision_language/bingo_scenario.py +103 -0
  570. helm/benchmark/scenarios/vision_language/blink_scenario.py +140 -0
  571. helm/benchmark/scenarios/vision_language/crossmodal_3600_scenario.py +135 -0
  572. helm/benchmark/scenarios/vision_language/exams_v_scenario.py +104 -0
  573. helm/benchmark/scenarios/vision_language/fair_face_scenario.py +136 -0
  574. helm/benchmark/scenarios/vision_language/flickr30k_scenario.py +74 -0
  575. helm/benchmark/scenarios/vision_language/gqa_scenario.py +91 -0
  576. helm/benchmark/scenarios/vision_language/hateful_memes_scenario.py +94 -0
  577. helm/benchmark/scenarios/vision_language/heim_human_eval_scenario.py +113 -0
  578. helm/benchmark/scenarios/vision_language/image2struct/__init__.py +0 -0
  579. helm/benchmark/scenarios/vision_language/image2struct/chart2csv_scenario.py +55 -0
  580. helm/benchmark/scenarios/vision_language/image2struct/image2struct_scenario.py +225 -0
  581. helm/benchmark/scenarios/vision_language/image2struct/latex_scenario.py +21 -0
  582. helm/benchmark/scenarios/vision_language/image2struct/musicsheet_scenario.py +16 -0
  583. helm/benchmark/scenarios/vision_language/image2struct/utils_latex.py +339 -0
  584. helm/benchmark/scenarios/vision_language/image2struct/webpage/__init__.py +0 -0
  585. helm/benchmark/scenarios/vision_language/image2struct/webpage/driver.py +84 -0
  586. helm/benchmark/scenarios/vision_language/image2struct/webpage/jekyll_server.py +182 -0
  587. helm/benchmark/scenarios/vision_language/image2struct/webpage/utils.py +31 -0
  588. helm/benchmark/scenarios/vision_language/image2struct/webpage_scenario.py +256 -0
  589. helm/benchmark/scenarios/vision_language/math_vista_scenario.py +117 -0
  590. helm/benchmark/scenarios/vision_language/mementos_scenario.py +124 -0
  591. helm/benchmark/scenarios/vision_language/mm_safety_bench_scenario.py +103 -0
  592. helm/benchmark/scenarios/vision_language/mm_star_scenario.py +95 -0
  593. helm/benchmark/scenarios/vision_language/mme_scenario.py +148 -0
  594. helm/benchmark/scenarios/vision_language/mmmu_scenario.py +187 -0
  595. helm/benchmark/scenarios/vision_language/mscoco_captioning_scenario.py +92 -0
  596. helm/benchmark/scenarios/vision_language/mscoco_categorization_scenario.py +117 -0
  597. helm/benchmark/scenarios/vision_language/msr_vtt_scenario.py +75 -0
  598. helm/benchmark/scenarios/vision_language/multipanelvqa_scenario.py +169 -0
  599. helm/benchmark/scenarios/vision_language/originality_scenario.py +35 -0
  600. helm/benchmark/scenarios/vision_language/pairs_scenario.py +247 -0
  601. helm/benchmark/scenarios/vision_language/pope_scenario.py +105 -0
  602. helm/benchmark/scenarios/vision_language/real_world_qa_scenario.py +57 -0
  603. helm/benchmark/scenarios/vision_language/seed_bench_scenario.py +131 -0
  604. helm/benchmark/scenarios/vision_language/unicorn_scenario.py +108 -0
  605. helm/benchmark/scenarios/vision_language/vibe_eval_scenario.py +98 -0
  606. helm/benchmark/scenarios/vision_language/viz_wiz_scenario.py +4 -5
  607. helm/benchmark/scenarios/vision_language/vqa_rad_scenario.py +88 -0
  608. helm/benchmark/scenarios/vision_language/vqa_scenario.py +8 -4
  609. helm/benchmark/scenarios/wikifact_scenario.py +31 -1
  610. helm/benchmark/scenarios/wikitext_103_scenario.py +1 -1
  611. helm/benchmark/scenarios/wildbench_scenario.py +101 -0
  612. helm/benchmark/scenarios/winogrande_afr_scenario.py +78 -0
  613. helm/benchmark/scenarios/wmt_14_scenario.py +33 -2
  614. helm/benchmark/scenarios/xstest_scenario.py +35 -0
  615. helm/benchmark/server.py +32 -2
  616. helm/benchmark/slurm_jobs.py +1 -2
  617. helm/benchmark/slurm_runner.py +78 -50
  618. helm/benchmark/static/schema_air_bench.yaml +3149 -0
  619. helm/benchmark/static/schema_arabic.yaml +271 -0
  620. helm/benchmark/static/schema_audio.yaml +763 -0
  621. helm/benchmark/static/schema_autobencher.yaml +150 -0
  622. helm/benchmark/static/schema_call_center.yaml +269 -0
  623. helm/benchmark/static/schema_capabilities.yaml +254 -0
  624. helm/benchmark/static/schema_classic.yaml +259 -1140
  625. helm/benchmark/static/schema_cleva.yaml +768 -0
  626. helm/benchmark/static/schema_czech_bank.yaml +148 -0
  627. helm/benchmark/static/schema_decodingtrust.yaml +444 -0
  628. helm/benchmark/static/schema_enem_challenge.yaml +146 -0
  629. helm/benchmark/static/schema_enterprise.yaml +319 -0
  630. helm/benchmark/static/schema_ewok.yaml +367 -0
  631. helm/benchmark/static/schema_finance.yaml +191 -0
  632. helm/benchmark/static/schema_heim.yaml +1389 -0
  633. helm/benchmark/static/schema_image2struct.yaml +588 -0
  634. helm/benchmark/static/schema_instruction_following.yaml +161 -0
  635. helm/benchmark/static/schema_legal.yaml +566 -0
  636. helm/benchmark/static/schema_lite.yaml +3 -286
  637. helm/benchmark/static/schema_long_context.yaml +282 -0
  638. helm/benchmark/static/schema_medhelm.yaml +1176 -0
  639. helm/benchmark/static/schema_melt.yaml +1257 -0
  640. helm/benchmark/static/schema_mmlu.yaml +1449 -0
  641. helm/benchmark/static/schema_mmlu_winogrande_afr.yaml +1045 -0
  642. helm/benchmark/static/schema_safety.yaml +283 -0
  643. helm/benchmark/static/schema_seahelm.yaml +723 -0
  644. helm/benchmark/static/schema_slp.yaml +219 -0
  645. helm/benchmark/static/schema_slphelm.yaml +162 -0
  646. helm/benchmark/static/schema_social_audio.yaml +224 -0
  647. helm/benchmark/static/schema_sql.yaml +171 -0
  648. helm/benchmark/static/schema_thai.yaml +244 -0
  649. helm/benchmark/static/schema_torr.yaml +474 -0
  650. helm/benchmark/static/schema_tweetsentbr.yaml +146 -0
  651. helm/benchmark/static/schema_unitxt.yaml +370 -0
  652. helm/benchmark/static/schema_vhelm.yaml +933 -0
  653. helm/benchmark/static/schema_vhelm_lite.yaml +109 -0
  654. helm/benchmark/static/schema_video.yaml +219 -0
  655. helm/benchmark/static_build/assets/air-overview-DpBbyagA.png +0 -0
  656. helm/benchmark/static_build/assets/audio-table-Dn5NMMeJ.png +0 -0
  657. helm/benchmark/static_build/assets/heim-logo-BJtQlEbV.png +0 -0
  658. helm/benchmark/static_build/assets/helm-safety-COfndXuS.png +0 -0
  659. helm/benchmark/static_build/assets/helmhero-D9TvmJsp.png +0 -0
  660. helm/benchmark/static_build/assets/index-oIeiQW2g.css +1 -0
  661. helm/benchmark/static_build/assets/index-qOFpOyHb.js +10 -0
  662. helm/benchmark/static_build/assets/medhelm-overview-CND0EIsy.png +0 -0
  663. helm/benchmark/static_build/assets/medhelm-v1-overview-Cu2tphBB.png +0 -0
  664. helm/benchmark/static_build/assets/overview-BwypNWnk.png +0 -0
  665. helm/benchmark/static_build/assets/process-flow-DWDJC733.png +0 -0
  666. helm/benchmark/static_build/assets/react-BteFIppM.js +85 -0
  667. helm/benchmark/static_build/assets/recharts-DxuQtTOs.js +97 -0
  668. helm/benchmark/static_build/assets/tremor-DR4fE7ko.js +10 -0
  669. helm/benchmark/static_build/assets/vhelm-aspects-NiDQofvP.png +0 -0
  670. helm/benchmark/static_build/assets/vhelm-framework-NxJE4fdA.png +0 -0
  671. helm/benchmark/static_build/assets/vhelm-model-ypCL5Yvq.png +0 -0
  672. helm/benchmark/static_build/config.js +4 -0
  673. helm/benchmark/static_build/index.html +19 -0
  674. helm/benchmark/test_data_preprocessor.py +3 -3
  675. helm/benchmark/test_run_expander.py +1 -1
  676. helm/benchmark/window_services/default_window_service.py +3 -45
  677. helm/benchmark/window_services/encoder_decoder_window_service.py +4 -15
  678. helm/benchmark/window_services/ice_window_service.py +1 -35
  679. helm/benchmark/window_services/image_generation/__init__.py +0 -0
  680. helm/benchmark/window_services/image_generation/clip_window_service.py +13 -0
  681. helm/benchmark/window_services/image_generation/lexica_search_window_service.py +9 -0
  682. helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +9 -0
  683. helm/benchmark/window_services/image_generation/test_clip_window_service.py +29 -0
  684. helm/benchmark/window_services/image_generation/test_openai_dalle_window_service.py +30 -0
  685. helm/benchmark/window_services/local_window_service.py +22 -5
  686. helm/benchmark/window_services/test_anthropic_window_service.py +5 -4
  687. helm/benchmark/window_services/test_bloom_window_service.py +5 -4
  688. helm/benchmark/window_services/test_flan_t5_window_service.py +2 -1
  689. helm/benchmark/window_services/test_gpt2_window_service.py +9 -4
  690. helm/benchmark/window_services/test_gpt4_window_service.py +10 -4
  691. helm/benchmark/window_services/test_gptj_window_service.py +11 -5
  692. helm/benchmark/window_services/test_gptneox_window_service.py +6 -5
  693. helm/benchmark/window_services/test_openai_window_service.py +18 -12
  694. helm/benchmark/window_services/test_opt_window_service.py +6 -5
  695. helm/benchmark/window_services/test_palmyra_window_service.py +5 -4
  696. helm/benchmark/window_services/test_t0pp_window_service.py +5 -4
  697. helm/benchmark/window_services/test_t511b_window_service.py +5 -4
  698. helm/benchmark/window_services/test_ul2_window_service.py +5 -4
  699. helm/benchmark/window_services/test_utils.py +6 -6
  700. helm/benchmark/window_services/test_yalm_window_service.py +5 -4
  701. helm/benchmark/window_services/tokenizer_service.py +7 -13
  702. helm/benchmark/window_services/window_service.py +42 -0
  703. helm/benchmark/window_services/window_service_factory.py +4 -1
  704. helm/benchmark/window_services/yalm_window_service.py +1 -28
  705. helm/clients/__init__.py +0 -0
  706. helm/{proxy/clients → clients}/ai21_client.py +78 -12
  707. helm/clients/aleph_alpha_client.py +114 -0
  708. helm/{proxy/clients → clients}/anthropic_client.py +304 -21
  709. helm/clients/audio_language/__init__.py +0 -0
  710. helm/clients/audio_language/diva_llama_client.py +122 -0
  711. helm/clients/audio_language/llama_omni/arguments.py +61 -0
  712. helm/clients/audio_language/llama_omni/constants.py +9 -0
  713. helm/clients/audio_language/llama_omni/conversation.py +213 -0
  714. helm/clients/audio_language/llama_omni/model/__init__.py +0 -0
  715. helm/clients/audio_language/llama_omni/model/builder.py +88 -0
  716. helm/clients/audio_language/llama_omni/model/language_model/omni_speech2s_llama.py +190 -0
  717. helm/clients/audio_language/llama_omni/model/language_model/omni_speech_llama.py +118 -0
  718. helm/clients/audio_language/llama_omni/model/omni_speech_arch.py +249 -0
  719. helm/clients/audio_language/llama_omni/model/speech_encoder/builder.py +9 -0
  720. helm/clients/audio_language/llama_omni/model/speech_encoder/speech_encoder.py +27 -0
  721. helm/clients/audio_language/llama_omni/model/speech_generator/builder.py +9 -0
  722. helm/clients/audio_language/llama_omni/model/speech_generator/generation.py +622 -0
  723. helm/clients/audio_language/llama_omni/model/speech_generator/speech_generator.py +104 -0
  724. helm/clients/audio_language/llama_omni/model/speech_projector/builder.py +9 -0
  725. helm/clients/audio_language/llama_omni/model/speech_projector/speech_projector.py +27 -0
  726. helm/clients/audio_language/llama_omni/preprocess.py +295 -0
  727. helm/clients/audio_language/llama_omni/utils.py +202 -0
  728. helm/clients/audio_language/llama_omni_client.py +199 -0
  729. helm/clients/audio_language/qwen2_5_omni_client.py +210 -0
  730. helm/clients/audio_language/qwen2_audiolm_client.py +191 -0
  731. helm/clients/audio_language/qwen_audiolm_client.py +153 -0
  732. helm/clients/audio_language/qwen_omni/configuration_qwen2_5_omni.py +519 -0
  733. helm/clients/audio_language/qwen_omni/modeling_qwen2_5_omni.py +4308 -0
  734. helm/clients/audio_language/qwen_omni/processing_qwen2_5_omni.py +270 -0
  735. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/__init__.py +0 -0
  736. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/__init__.py +8 -0
  737. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/audio_process.py +56 -0
  738. helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/vision_process.py +380 -0
  739. helm/clients/audio_language/test.py +62 -0
  740. helm/{proxy/clients → clients}/auto_client.py +72 -31
  741. helm/clients/azure_openai_client.py +55 -0
  742. helm/clients/bedrock_client.py +381 -0
  743. helm/clients/bedrock_utils.py +105 -0
  744. helm/{proxy/clients → clients}/client.py +92 -17
  745. helm/clients/clip_score_client.py +49 -0
  746. helm/clients/clip_scorers/__init__.py +0 -0
  747. helm/clients/clip_scorers/base_clip_scorer.py +18 -0
  748. helm/clients/clip_scorers/clip_scorer.py +50 -0
  749. helm/clients/clip_scorers/multilingual_clip_scorer.py +50 -0
  750. helm/{proxy/clients → clients}/cohere_client.py +105 -14
  751. helm/clients/dspy_client.py +135 -0
  752. helm/clients/gcs_client.py +82 -0
  753. helm/{proxy/clients → clients}/google_client.py +8 -6
  754. helm/clients/google_translate_client.py +35 -0
  755. helm/clients/grok_client.py +36 -0
  756. helm/{proxy/clients → clients}/http_model_client.py +8 -8
  757. helm/{proxy/clients → clients}/huggingface_client.py +157 -86
  758. helm/clients/huggingface_pipeline_client.py +138 -0
  759. helm/clients/ibm_client.py +269 -0
  760. helm/clients/image_generation/__init__.py +0 -0
  761. helm/clients/image_generation/adobe_vision_client.py +80 -0
  762. helm/clients/image_generation/aleph_alpha_image_generation_client.py +100 -0
  763. helm/clients/image_generation/cogview2/__init__.py +0 -0
  764. helm/clients/image_generation/cogview2/coglm_strategy.py +96 -0
  765. helm/clients/image_generation/cogview2/coglm_utils.py +82 -0
  766. helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +15 -0
  767. helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +99 -0
  768. helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +254 -0
  769. helm/clients/image_generation/cogview2/sr_pipeline/dsr_sampling.py +190 -0
  770. helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +144 -0
  771. helm/clients/image_generation/cogview2/sr_pipeline/itersr_model.py +269 -0
  772. helm/clients/image_generation/cogview2/sr_pipeline/itersr_sampling.py +120 -0
  773. helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +42 -0
  774. helm/clients/image_generation/cogview2_client.py +192 -0
  775. helm/clients/image_generation/dalle2_client.py +194 -0
  776. helm/clients/image_generation/dalle3_client.py +108 -0
  777. helm/clients/image_generation/dalle_mini/__init__.py +3 -0
  778. helm/clients/image_generation/dalle_mini/data.py +442 -0
  779. helm/clients/image_generation/dalle_mini/model/__init__.py +5 -0
  780. helm/clients/image_generation/dalle_mini/model/configuration.py +175 -0
  781. helm/clients/image_generation/dalle_mini/model/modeling.py +1834 -0
  782. helm/clients/image_generation/dalle_mini/model/partitions.py +84 -0
  783. helm/clients/image_generation/dalle_mini/model/processor.py +63 -0
  784. helm/clients/image_generation/dalle_mini/model/text.py +251 -0
  785. helm/clients/image_generation/dalle_mini/model/tokenizer.py +9 -0
  786. helm/clients/image_generation/dalle_mini/model/utils.py +29 -0
  787. helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -0
  788. helm/clients/image_generation/dalle_mini/vqgan_jax/configuration_vqgan.py +40 -0
  789. helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +107 -0
  790. helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +610 -0
  791. helm/clients/image_generation/dalle_mini_client.py +191 -0
  792. helm/clients/image_generation/deep_floyd_client.py +80 -0
  793. helm/clients/image_generation/huggingface_diffusers_client.py +250 -0
  794. helm/clients/image_generation/image_generation_client_utils.py +9 -0
  795. helm/clients/image_generation/lexica_client.py +88 -0
  796. helm/clients/image_generation/mindalle/__init__.py +0 -0
  797. helm/clients/image_generation/mindalle/models/__init__.py +216 -0
  798. helm/clients/image_generation/mindalle/models/stage1/__init__.py +0 -0
  799. helm/clients/image_generation/mindalle/models/stage1/layers.py +312 -0
  800. helm/clients/image_generation/mindalle/models/stage1/vqgan.py +103 -0
  801. helm/clients/image_generation/mindalle/models/stage2/__init__.py +0 -0
  802. helm/clients/image_generation/mindalle/models/stage2/layers.py +144 -0
  803. helm/clients/image_generation/mindalle/models/stage2/transformer.py +268 -0
  804. helm/clients/image_generation/mindalle/models/tokenizer.py +30 -0
  805. helm/clients/image_generation/mindalle/utils/__init__.py +3 -0
  806. helm/clients/image_generation/mindalle/utils/config.py +129 -0
  807. helm/clients/image_generation/mindalle/utils/sampling.py +149 -0
  808. helm/clients/image_generation/mindalle/utils/utils.py +89 -0
  809. helm/clients/image_generation/mindalle_client.py +116 -0
  810. helm/clients/image_generation/nudity_check_client.py +64 -0
  811. helm/clients/image_generation/together_image_generation_client.py +113 -0
  812. helm/{proxy/clients → clients}/lit_gpt_client.py +6 -6
  813. helm/{proxy/clients → clients}/megatron_client.py +7 -5
  814. helm/clients/mistral_client.py +180 -0
  815. helm/clients/moderation_api_client.py +111 -0
  816. helm/clients/nvidia_nim_client.py +32 -0
  817. helm/clients/open_lm_client.py +43 -0
  818. helm/clients/openai_client.py +604 -0
  819. helm/clients/openai_responses_client.py +200 -0
  820. helm/clients/openrouter_client.py +31 -0
  821. helm/{proxy/clients → clients}/palmyra_client.py +31 -14
  822. helm/{proxy/clients → clients}/perspective_api_client.py +18 -14
  823. helm/clients/reka_client.py +190 -0
  824. helm/clients/simple_client.py +64 -0
  825. helm/clients/stanfordhealthcare_azure_openai_client.py +58 -0
  826. helm/clients/stanfordhealthcare_claude_client.py +31 -0
  827. helm/clients/stanfordhealthcare_google_client.py +43 -0
  828. helm/clients/stanfordhealthcare_http_model_client.py +95 -0
  829. helm/clients/stanfordhealthcare_openai_client.py +62 -0
  830. helm/clients/stanfordhealthcare_shc_openai_client.py +42 -0
  831. helm/{proxy/clients → clients}/test_auto_client.py +13 -15
  832. helm/clients/test_client.py +98 -0
  833. helm/{proxy/clients → clients}/test_huggingface_client.py +31 -16
  834. helm/clients/test_openrouter_client.py +69 -0
  835. helm/clients/test_simple_client.py +19 -0
  836. helm/clients/test_together_client.py +184 -0
  837. helm/clients/together_client.py +599 -0
  838. helm/clients/upstage_client.py +23 -0
  839. helm/clients/vertexai_client.py +488 -0
  840. helm/clients/vision_language/__init__.py +0 -0
  841. helm/clients/vision_language/huggingface_vision2seq_client.py +148 -0
  842. helm/clients/vision_language/huggingface_vlm_client.py +114 -0
  843. helm/{proxy/clients → clients}/vision_language/idefics_client.py +61 -51
  844. helm/clients/vision_language/open_flamingo/__init__.py +2 -0
  845. helm/clients/vision_language/open_flamingo/src/__init__.py +0 -0
  846. helm/clients/vision_language/open_flamingo/src/factory.py +147 -0
  847. helm/clients/vision_language/open_flamingo/src/flamingo.py +337 -0
  848. helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +155 -0
  849. helm/clients/vision_language/open_flamingo/src/helpers.py +267 -0
  850. helm/clients/vision_language/open_flamingo/src/utils.py +47 -0
  851. helm/clients/vision_language/open_flamingo_client.py +155 -0
  852. helm/clients/vision_language/paligemma_client.py +147 -0
  853. helm/clients/vision_language/palmyra_vision_client.py +101 -0
  854. helm/clients/vision_language/qwen2_vlm_client.py +189 -0
  855. helm/clients/vision_language/qwen_vlm_client.py +174 -0
  856. helm/clients/vllm_client.py +80 -0
  857. helm/clients/vllm_granite_thinking_client.py +56 -0
  858. helm/clients/writer_client.py +105 -0
  859. helm/clients/yi_client.py +28 -0
  860. helm/common/audio_utils.py +111 -0
  861. helm/common/cache.py +23 -33
  862. helm/common/cache_backend_config.py +47 -0
  863. helm/common/clip_score_request.py +41 -0
  864. helm/common/context.py +80 -0
  865. helm/common/credentials_utils.py +5 -5
  866. helm/common/critique_request.py +10 -2
  867. helm/common/file_caches/__init__.py +0 -0
  868. helm/common/file_caches/file_cache.py +16 -0
  869. helm/common/file_caches/local_file_cache.py +61 -0
  870. helm/common/file_caches/test_local_file_cache.py +25 -0
  871. helm/common/file_upload_request.py +27 -0
  872. helm/common/general.py +10 -3
  873. helm/common/hierarchical_logger.py +124 -12
  874. helm/common/image_generation_parameters.py +25 -0
  875. helm/common/images_utils.py +60 -5
  876. helm/common/key_value_store.py +41 -10
  877. helm/common/local_context.py +140 -0
  878. helm/common/media_object.py +14 -1
  879. helm/common/moderations_api_request.py +71 -0
  880. helm/common/mongo_key_value_store.py +8 -7
  881. helm/common/multimodal_request_utils.py +57 -0
  882. helm/common/nudity_check_request.py +29 -0
  883. helm/common/object_spec.py +23 -8
  884. helm/common/optional_dependencies.py +1 -1
  885. helm/common/reeval_parameters.py +12 -0
  886. helm/common/remote_context.py +61 -0
  887. helm/common/request.py +45 -19
  888. helm/common/response_format.py +18 -0
  889. helm/common/test_cache.py +1 -48
  890. helm/common/test_general.py +10 -0
  891. helm/common/test_logging.py +94 -0
  892. helm/common/test_media_object.py +1 -1
  893. helm/common/tokenization_request.py +1 -10
  894. helm/config/model_deployments.yaml +4713 -1005
  895. helm/config/model_metadata.yaml +4045 -255
  896. helm/config/tokenizer_configs.yaml +1091 -50
  897. helm/proxy/accounts.py +31 -4
  898. helm/proxy/cli.py +6 -4
  899. helm/proxy/critique/mechanical_turk_critique_importer.py +3 -0
  900. helm/proxy/critique/mechanical_turk_utils.py +1 -1
  901. helm/proxy/critique/model_critique_client.py +40 -10
  902. helm/proxy/example_queries.py +33 -28
  903. helm/proxy/retry.py +5 -0
  904. helm/proxy/server.py +82 -18
  905. helm/proxy/services/remote_service.py +32 -7
  906. helm/proxy/services/server_service.py +71 -69
  907. helm/proxy/services/service.py +30 -6
  908. helm/proxy/services/test_remote_service.py +6 -5
  909. helm/proxy/services/test_service.py +1 -13
  910. helm/proxy/static/help.html +99 -0
  911. helm/proxy/static/index.css +61 -0
  912. helm/proxy/static/index.html +40 -0
  913. helm/proxy/static/index.js +462 -0
  914. helm/proxy/test_accounts.py +32 -0
  915. helm/proxy/test_retry.py +1 -1
  916. helm/proxy/token_counters/auto_token_counter.py +37 -37
  917. helm/proxy/token_counters/test_auto_token_counter.py +164 -0
  918. helm/proxy/token_counters/token_counter.py +3 -5
  919. helm/tokenizers/__init__.py +0 -0
  920. helm/tokenizers/ai21_tokenizer.py +52 -0
  921. helm/{proxy/tokenizers → tokenizers}/aleph_alpha_tokenizer.py +1 -1
  922. helm/{proxy/tokenizers → tokenizers}/auto_tokenizer.py +9 -12
  923. helm/{proxy/tokenizers → tokenizers}/caching_tokenizer.py +2 -30
  924. helm/tokenizers/cohere_tokenizer.py +50 -0
  925. helm/tokenizers/grok_tokenizer.py +55 -0
  926. helm/{proxy/tokenizers → tokenizers}/http_model_tokenizer.py +4 -4
  927. helm/{proxy/tokenizers → tokenizers}/huggingface_tokenizer.py +44 -41
  928. helm/{proxy/tokenizers → tokenizers}/lit_gpt_tokenizer.py +1 -1
  929. helm/tokenizers/simple_tokenizer.py +33 -0
  930. helm/tokenizers/test_ai21_tokenizer.py +48 -0
  931. helm/{proxy/tokenizers → tokenizers}/test_anthropic_tokenizer.py +6 -2
  932. helm/tokenizers/test_cohere_tokenizer.py +39 -0
  933. helm/tokenizers/test_grok_tokenizer.py +33 -0
  934. helm/{proxy/tokenizers → tokenizers}/test_huggingface_tokenizer.py +9 -2
  935. helm/tokenizers/test_simple_tokenizer.py +33 -0
  936. helm/{proxy/tokenizers → tokenizers}/test_yalm_tokenizer.py +1 -1
  937. helm/{proxy/tokenizers → tokenizers}/tiktoken_tokenizer.py +1 -1
  938. helm/{proxy/tokenizers → tokenizers}/tokenizer.py +3 -1
  939. helm/{proxy/tokenizers → tokenizers}/vertexai_tokenizer.py +1 -1
  940. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer.py +8 -6
  941. helm/tokenizers/yalm_tokenizer_data/__init__.py +0 -0
  942. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/test_yalm_tokenizer.py +1 -1
  943. helm/tokenizers/yalm_tokenizer_data/voc_100b.sp +0 -0
  944. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/yalm_tokenizer.py +1 -1
  945. crfm_helm-0.4.0.dist-info/METADATA +0 -264
  946. crfm_helm-0.4.0.dist-info/RECORD +0 -397
  947. helm/benchmark/data_overlap/data_overlap_spec.py +0 -86
  948. helm/benchmark/data_overlap/export_scenario_text.py +0 -119
  949. helm/benchmark/data_overlap/light_scenario.py +0 -60
  950. helm/benchmark/metrics/numeracy_metrics.py +0 -72
  951. helm/benchmark/metrics/test_numeracy_metrics.py +0 -95
  952. helm/benchmark/run_specs.py +0 -2762
  953. helm/benchmark/scenarios/numeracy_scenario.py +0 -784
  954. helm/benchmark/static/benchmarking.css +0 -156
  955. helm/benchmark/static/benchmarking.js +0 -1705
  956. helm/benchmark/static/config.js +0 -3
  957. helm/benchmark/static/images/helm-logo.png +0 -0
  958. helm/benchmark/static/images/language-model-helm.png +0 -0
  959. helm/benchmark/static/images/organizations/ai21.png +0 -0
  960. helm/benchmark/static/images/organizations/anthropic.png +0 -0
  961. helm/benchmark/static/images/organizations/bigscience.png +0 -0
  962. helm/benchmark/static/images/organizations/cohere.png +0 -0
  963. helm/benchmark/static/images/organizations/eleutherai.png +0 -0
  964. helm/benchmark/static/images/organizations/google.png +0 -0
  965. helm/benchmark/static/images/organizations/meta.png +0 -0
  966. helm/benchmark/static/images/organizations/microsoft.png +0 -0
  967. helm/benchmark/static/images/organizations/nvidia.png +0 -0
  968. helm/benchmark/static/images/organizations/openai.png +0 -0
  969. helm/benchmark/static/images/organizations/together.png +0 -0
  970. helm/benchmark/static/images/organizations/tsinghua-keg.png +0 -0
  971. helm/benchmark/static/images/organizations/yandex.png +0 -0
  972. helm/benchmark/static/images/scenarios-by-metrics.png +0 -0
  973. helm/benchmark/static/images/taxonomy-scenarios.png +0 -0
  974. helm/benchmark/static/index.html +0 -68
  975. helm/benchmark/static/json-urls.js +0 -69
  976. helm/benchmark/static/plot-captions.js +0 -27
  977. helm/benchmark/static/utils.js +0 -285
  978. helm/benchmark/test_model_deployment_definition.py +0 -92
  979. helm/benchmark/test_model_properties.py +0 -1570
  980. helm/benchmark/vlm_run_specs.py +0 -97
  981. helm/benchmark/window_services/ai21_window_service.py +0 -258
  982. helm/benchmark/window_services/cohere_window_service.py +0 -163
  983. helm/benchmark/window_services/flan_t5_window_service.py +0 -29
  984. helm/benchmark/window_services/gpt2_window_service.py +0 -32
  985. helm/benchmark/window_services/huggingface_window_service.py +0 -60
  986. helm/benchmark/window_services/t0pp_window_service.py +0 -35
  987. helm/benchmark/window_services/t511b_window_service.py +0 -30
  988. helm/benchmark/window_services/test_ai21_window_service.py +0 -163
  989. helm/benchmark/window_services/test_cohere_window_service.py +0 -74
  990. helm/benchmark/window_services/test_cohere_window_service_utils.py +0 -8328
  991. helm/benchmark/window_services/test_ice_window_service.py +0 -326
  992. helm/benchmark/window_services/test_mt_nlg_window_service.py +0 -48
  993. helm/benchmark/window_services/ul2_window_service.py +0 -30
  994. helm/benchmark/window_services/wider_ai21_window_service.py +0 -24
  995. helm/common/cache_utils.py +0 -14
  996. helm/proxy/clients/aleph_alpha_client.py +0 -95
  997. helm/proxy/clients/goose_ai_client.py +0 -99
  998. helm/proxy/clients/microsoft_client.py +0 -180
  999. helm/proxy/clients/openai_client.py +0 -206
  1000. helm/proxy/clients/simple_client.py +0 -60
  1001. helm/proxy/clients/test_client.py +0 -49
  1002. helm/proxy/clients/test_together_client.py +0 -97
  1003. helm/proxy/clients/together_client.py +0 -334
  1004. helm/proxy/clients/vertexai_client.py +0 -115
  1005. helm/proxy/token_counters/ai21_token_counter.py +0 -20
  1006. helm/proxy/token_counters/cohere_token_counter.py +0 -13
  1007. helm/proxy/token_counters/free_token_counter.py +0 -12
  1008. helm/proxy/token_counters/gooseai_token_counter.py +0 -24
  1009. helm/proxy/token_counters/openai_token_counter.py +0 -22
  1010. helm/proxy/token_counters/test_ai21_token_counter.py +0 -88
  1011. helm/proxy/token_counters/test_openai_token_counter.py +0 -81
  1012. helm/proxy/tokenizers/ai21_tokenizer.py +0 -60
  1013. helm/proxy/tokenizers/anthropic_tokenizer.py +0 -52
  1014. helm/proxy/tokenizers/cohere_tokenizer.py +0 -83
  1015. helm/proxy/tokenizers/ice_tokenizer.py +0 -30
  1016. helm/proxy/tokenizers/simple_tokenizer.py +0 -32
  1017. helm/proxy/tokenizers/test_ice_tokenizer.py +0 -57
  1018. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info}/entry_points.txt +0 -0
  1019. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info/licenses}/LICENSE +0 -0
  1020. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.10.dist-info}/top_level.txt +0 -0
  1021. /helm/benchmark/{data_overlap → annotation}/__init__.py +0 -0
  1022. /helm/{proxy/clients → benchmark/annotation/image2struct}/__init__.py +0 -0
  1023. /helm/{proxy/clients/vision_language → benchmark/metrics/ifeval}/__init__.py +0 -0
  1024. /helm/{proxy/tokenizers → benchmark/metrics/image_generation}/__init__.py +0 -0
  1025. /helm/{proxy/tokenizers/yalm_tokenizer_data → benchmark/metrics/image_generation/detectors}/__init__.py +0 -0
  1026. /helm/benchmark/{static/images/crfm-logo.png → static_build/assets/crfm-logo-Du4T1uWZ.png} +0 -0
  1027. /helm/benchmark/{static/images/helm-logo-simple.png → static_build/assets/helm-logo-simple-DzOhNN41.png} +0 -0
  1028. /helm/{proxy/clients → clients}/ai21_utils.py +0 -0
  1029. /helm/{proxy/clients → clients}/cohere_utils.py +0 -0
  1030. /helm/{proxy/clients → clients}/lit_gpt_generate.py +0 -0
  1031. /helm/{proxy/clients → clients}/toxicity_classifier_client.py +0 -0
  1032. /helm/{benchmark → proxy}/static/general.js +0 -0
  1033. /helm/{benchmark → proxy}/static/info-icon.png +0 -0
@@ -0,0 +1,1393 @@
1
+ """Run spec functions for the HELM Classic leaderboard.
2
+
3
+ Website: https://crfm.stanford.edu/helm/classic/
4
+
5
+ If a run spec function is included in both the HELM Classic leaderboard and the
6
+ HELM Lite leaderboard, it will be included in the lite_run_specs module instead of this module.
7
+ This module also contains some scenarios that are currently not used on any HELM leaderboard."""
8
+
9
+ from typing import Any, Dict, List, Optional, Set
10
+
11
+ from helm.benchmark.adaptation.adapter_spec import (
12
+ ADAPT_GENERATION,
13
+ ADAPT_MULTIPLE_CHOICE_JOINT,
14
+ ADAPT_MULTIPLE_CHOICE_SEPARATE_ORIGINAL,
15
+ ADAPT_RANKING_BINARY,
16
+ AdapterSpec,
17
+ )
18
+ from helm.benchmark.adaptation.adapters.binary_ranking_adapter import BinaryRankingAdapter
19
+ from helm.benchmark.adaptation.common_adapter_specs import (
20
+ get_completion_adapter_spec,
21
+ get_generation_adapter_spec,
22
+ get_language_modeling_adapter_spec,
23
+ get_multiple_choice_adapter_spec,
24
+ get_ranking_binary_adapter_spec,
25
+ get_summarization_adapter_spec,
26
+ )
27
+ from helm.benchmark.annotation.annotator import AnnotatorSpec
28
+ from helm.benchmark.metrics.common_metric_specs import (
29
+ get_basic_metric_specs,
30
+ get_bias_metric_specs,
31
+ get_classification_metric_specs,
32
+ get_copyright_metric_specs,
33
+ get_disinformation_metric_specs,
34
+ get_exact_match_metric_specs,
35
+ get_f1_metric_specs,
36
+ get_generative_harms_metric_specs,
37
+ get_language_modeling_metric_specs,
38
+ get_open_ended_generation_metric_specs,
39
+ get_summarization_metric_specs,
40
+ get_basic_generation_metric_specs,
41
+ get_basic_reference_metric_specs,
42
+ get_generic_metric_specs,
43
+ )
44
+ from helm.benchmark.metrics.metric import MetricSpec
45
+ from helm.benchmark.run_spec import RunSpec, run_spec_function
46
+ from helm.benchmark.runner import get_benchmark_output_path
47
+ from helm.benchmark.scenarios.scenario import ScenarioSpec, get_scenario_cache_path
48
+ from helm.common.hierarchical_logger import hlog, htrack
49
+
50
+
51
+ @run_spec_function("bbq")
52
+ def get_bbq_spec(subject: str, method: str = ADAPT_MULTIPLE_CHOICE_JOINT) -> RunSpec:
53
+ scenario_spec = ScenarioSpec(
54
+ class_name="helm.benchmark.scenarios.bbq_scenario.BBQScenario", args={"subject": subject}
55
+ )
56
+ adapter_spec = get_multiple_choice_adapter_spec(
57
+ method=method,
58
+ instructions="The following are multiple choice questions (with answers).",
59
+ input_noun="Passage",
60
+ output_noun="Answer",
61
+ )
62
+ metric_specs = [
63
+ MetricSpec(class_name="helm.benchmark.metrics.bbq_metrics.BBQMetric", args={})
64
+ ] + get_exact_match_metric_specs()
65
+
66
+ return RunSpec(
67
+ name=f"bbq:subject={subject},method={method}",
68
+ scenario_spec=scenario_spec,
69
+ adapter_spec=adapter_spec,
70
+ metric_specs=metric_specs,
71
+ groups=["bbq"],
72
+ )
73
+
74
+
75
+ @run_spec_function("msmarco")
76
+ def get_msmarco_spec(track: str, valid_topk: Optional[int] = None) -> RunSpec:
77
+ from helm.benchmark.scenarios.msmarco_scenario import MSMARCOScenario
78
+
79
+ valid_topk = None if valid_topk is None else int(valid_topk)
80
+ scenario_spec = ScenarioSpec(
81
+ class_name="helm.benchmark.scenarios.msmarco_scenario.MSMARCOScenario",
82
+ args={"track": track, "valid_topk": valid_topk},
83
+ )
84
+
85
+ adapter_spec: AdapterSpec = get_ranking_binary_adapter_spec(max_train_instances=4, stop_sequences=["\n"])
86
+
87
+ # Names of the measures we want to compute.
88
+ measure_names = MSMARCOScenario.MEASURE_NAMES[track]
89
+ multiple_relevance_values = set(MSMARCOScenario.GOLD_RELATIONS[track]) != {1}
90
+
91
+ metric_specs = (
92
+ [
93
+ MetricSpec(
94
+ class_name="helm.benchmark.metrics.ranking_metrics.RankingMetric",
95
+ args={
96
+ "method": ADAPT_RANKING_BINARY,
97
+ "measure_names": measure_names,
98
+ "correct_output": BinaryRankingAdapter.RANKING_CORRECT_LABEL,
99
+ "wrong_output": BinaryRankingAdapter.RANKING_WRONG_LABEL,
100
+ "rank": valid_topk,
101
+ "multiple_relevance_values": multiple_relevance_values,
102
+ },
103
+ ),
104
+ ]
105
+ + get_basic_reference_metric_specs()
106
+ + get_generic_metric_specs()
107
+ )
108
+
109
+ return RunSpec(
110
+ name=f"msmarco:track={track},valid_topk={valid_topk}",
111
+ scenario_spec=scenario_spec,
112
+ adapter_spec=adapter_spec,
113
+ metric_specs=metric_specs,
114
+ groups=[f"msmarco_{track}"],
115
+ )
116
+
117
+
118
+ @run_spec_function("bold")
119
+ def get_bold_spec(subject: str) -> RunSpec:
120
+ scenario_spec = ScenarioSpec(
121
+ class_name="helm.benchmark.scenarios.bold_scenario.BOLDScenario", args={"subject": subject}
122
+ )
123
+
124
+ adapter_spec = get_completion_adapter_spec(
125
+ temperature=0.9, # Set to approximate nucleus sampling conditions.
126
+ max_tokens=20, # See Table 8 of RealToxicityPrompts: https://arxiv.org/pdf/2009.11462.pdf
127
+ )
128
+
129
+ return RunSpec(
130
+ name=f"bold:subject={subject}",
131
+ scenario_spec=scenario_spec,
132
+ adapter_spec=adapter_spec,
133
+ metric_specs=get_generative_harms_metric_specs(include_basic_metrics=True),
134
+ groups=["bold"],
135
+ )
136
+
137
+
138
+ @run_spec_function("civil_comments")
139
+ def get_civil_comments_spec(demographic: str) -> RunSpec:
140
+ scenario_spec = ScenarioSpec(
141
+ class_name="helm.benchmark.scenarios.civil_comments_scenario.CivilCommentsScenario",
142
+ args={"demographic": demographic},
143
+ )
144
+
145
+ adapter_spec = get_generation_adapter_spec(input_noun="Passage", output_noun="Answer")
146
+
147
+ return RunSpec(
148
+ name=f"civil_comments:demographic={demographic}",
149
+ scenario_spec=scenario_spec,
150
+ adapter_spec=adapter_spec,
151
+ metric_specs=get_exact_match_metric_specs() + get_bias_metric_specs() + get_classification_metric_specs(),
152
+ groups=["civil_comments"],
153
+ )
154
+
155
+
156
+ @run_spec_function("custom_mcqa")
157
+ def get_custom_mcqa_spec(
158
+ path: str,
159
+ num_train_instances: int = 0,
160
+ method: str = ADAPT_MULTIPLE_CHOICE_JOINT,
161
+ ) -> RunSpec:
162
+ scenario_spec = ScenarioSpec(
163
+ class_name="helm.benchmark.scenarios.custom_mcqa_scenario.CustomMCQAScenario",
164
+ args={
165
+ "path": path,
166
+ "num_train_instances": num_train_instances,
167
+ },
168
+ )
169
+
170
+ adapter_spec = get_multiple_choice_adapter_spec(
171
+ method=method,
172
+ instructions="The following are multiple choice questions (with answers).",
173
+ input_noun="Question",
174
+ output_noun="Answer",
175
+ max_train_instances=num_train_instances,
176
+ )
177
+
178
+ return RunSpec(
179
+ name=f"custom_mcqa,path={path},method={method}",
180
+ scenario_spec=scenario_spec,
181
+ adapter_spec=adapter_spec,
182
+ metric_specs=get_exact_match_metric_specs(),
183
+ groups=["custom"],
184
+ )
185
+
186
+
187
+ @run_spec_function("interactive_qa_mmlu")
188
+ def get_interactive_qa_mmlu_spec(subject: str) -> RunSpec:
189
+ scenario_spec = ScenarioSpec(
190
+ class_name="helm.benchmark.scenarios.interactive_qa_mmlu_scenario.InteractiveQAMMLUScenario",
191
+ args={"subject": subject},
192
+ )
193
+
194
+ adapter_spec = get_multiple_choice_adapter_spec(
195
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
196
+ instructions=f"The following are multiple choice questions (with answers) about {subject.replace('_', ' ')}.",
197
+ input_noun="Question",
198
+ output_noun="Answer",
199
+ )
200
+ return RunSpec(
201
+ name=f"interactive_qa_mmlu:subject={subject}",
202
+ scenario_spec=scenario_spec,
203
+ adapter_spec=adapter_spec,
204
+ metric_specs=get_exact_match_metric_specs(),
205
+ groups=["mmlu"],
206
+ )
207
+
208
+
209
+ @run_spec_function("wikifact")
210
+ def get_wikifact_spec(k: str, subject: str) -> RunSpec:
211
+ scenario_spec = ScenarioSpec(
212
+ class_name="helm.benchmark.scenarios.wikifact_scenario.WIKIFactScenario",
213
+ args={"subject": subject},
214
+ )
215
+
216
+ adapter_spec = get_completion_adapter_spec(
217
+ output_prefix=" ", # Separate subject and predicate by a space
218
+ output_suffix="\n",
219
+ max_train_instances=5,
220
+ num_outputs=int(k), # We will measure accuracy@k
221
+ temperature=1.0, # Need temperature=1 so that we can get diverse answers among the top k predictions.
222
+ max_tokens=8, # Number of tokens for the longest answer in the dataset
223
+ stop_sequences=["\n"],
224
+ )
225
+
226
+ return RunSpec(
227
+ name=f"wikifact:k={k},subject={subject}",
228
+ scenario_spec=scenario_spec,
229
+ adapter_spec=adapter_spec,
230
+ metric_specs=get_exact_match_metric_specs() + get_generative_harms_metric_specs(),
231
+ groups=["wikifact"],
232
+ )
233
+
234
+
235
+ @run_spec_function("quac")
236
+ def get_quac_spec() -> RunSpec:
237
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.quac_scenario.QuACScenario", args={})
238
+
239
+ adapter_spec = get_generation_adapter_spec(input_noun=None, output_noun="Answer", max_tokens=100)
240
+
241
+ return RunSpec(
242
+ name="quac",
243
+ scenario_spec=scenario_spec,
244
+ adapter_spec=adapter_spec,
245
+ metric_specs=get_f1_metric_specs() + get_generative_harms_metric_specs(),
246
+ groups=["quac"],
247
+ )
248
+
249
+
250
+ @run_spec_function("news_qa")
251
+ def get_news_qa_spec() -> RunSpec:
252
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.newsqa_scenario.NewsQAScenario", args={})
253
+
254
+ # max_tokens=50 because answers are at most 13 words
255
+ adapter_spec = get_generation_adapter_spec(input_noun="Passage", output_noun="Answer", max_tokens=50)
256
+
257
+ return RunSpec(
258
+ name="news_qa",
259
+ scenario_spec=scenario_spec,
260
+ adapter_spec=adapter_spec,
261
+ metric_specs=get_f1_metric_specs() + get_generative_harms_metric_specs(),
262
+ groups=["news_qa"],
263
+ )
264
+
265
+
266
+ @run_spec_function("truthful_qa")
267
+ def get_truthful_qa_spec(task: str, method: str = ADAPT_MULTIPLE_CHOICE_JOINT) -> RunSpec:
268
+ scenario_spec = ScenarioSpec(
269
+ class_name="helm.benchmark.scenarios.truthful_qa_scenario.TruthfulQAScenario",
270
+ args={"task": task},
271
+ )
272
+
273
+ adapter_spec = get_multiple_choice_adapter_spec(
274
+ method=method, instructions="", input_noun="Question", output_noun="Answer"
275
+ )
276
+
277
+ return RunSpec(
278
+ name=f"truthful_qa:task={task},method={method}",
279
+ scenario_spec=scenario_spec,
280
+ adapter_spec=adapter_spec,
281
+ metric_specs=get_exact_match_metric_specs(),
282
+ groups=["truthful_qa"],
283
+ )
284
+
285
+
286
+ @run_spec_function("twitter_aae")
287
+ def get_twitter_aae_spec(demographic: str) -> RunSpec:
288
+ scenario_spec = ScenarioSpec(
289
+ class_name="helm.benchmark.scenarios.twitter_aae_scenario.TwitterAAEScenario",
290
+ args={"demographic": demographic},
291
+ )
292
+
293
+ return RunSpec(
294
+ name=f"twitter_aae:demographic={demographic}",
295
+ scenario_spec=scenario_spec,
296
+ adapter_spec=get_language_modeling_adapter_spec(),
297
+ metric_specs=get_language_modeling_metric_specs([]),
298
+ groups=["twitter_aae", f"twitter_aae_{demographic}"],
299
+ )
300
+
301
+
302
+ @run_spec_function("real_toxicity_prompts")
303
+ def get_real_toxicity_prompts_spec() -> RunSpec:
304
+ scenario_spec = ScenarioSpec(
305
+ class_name="helm.benchmark.scenarios.real_toxicity_prompts_scenario.RealToxicityPromptsScenario", args={}
306
+ )
307
+
308
+ # Create AdapterSpec based on the RealToxicityPrompts paper: https://arxiv.org/pdf/2009.11462.pdf
309
+ adapter_spec = get_completion_adapter_spec(
310
+ temperature=0.9, # "We use a temperature of 1 during generation..."
311
+ # Rishi: This is a bit different though, since they also do nucleus sampling, which we don't.
312
+ max_tokens=20, # "We generate up to 20 tokens per example..."
313
+ # We capped `num_outputs` at 5 since the Cohere API only allows up to 5 completions per request.
314
+ # Also, setting a high value for `num_outputs` makes the models seem more toxic than they actually are.
315
+ # Therefore, in addition to computing the "empirical probability of generating a span with
316
+ # Toxicity >= 0.5 at least once over k generations," we also compute the fraction of all completions that
317
+ # are toxic (see toxicity_metrics.py).
318
+ num_outputs=5, # "...the expected maximum toxicity over k = 25 generations..."
319
+ )
320
+
321
+ return RunSpec(
322
+ name="real_toxicity_prompts",
323
+ scenario_spec=scenario_spec,
324
+ adapter_spec=adapter_spec,
325
+ metric_specs=get_generative_harms_metric_specs(
326
+ include_basic_metrics=True, include_generative_harms_metrics=True
327
+ ),
328
+ groups=["real_toxicity_prompts"],
329
+ )
330
+
331
+
332
+ @run_spec_function("synthetic_reasoning_natural")
333
+ def get_synthetic_reasoning_natural_spec(difficulty: str) -> RunSpec:
334
+ scenario_spec = ScenarioSpec(
335
+ class_name="helm.benchmark.scenarios.synthetic_reasoning_natural_scenario.SRNScenario",
336
+ args={"difficulty": difficulty},
337
+ )
338
+
339
+ adapter_spec = get_generation_adapter_spec(
340
+ instructions="Please solve the following problem.",
341
+ input_noun="Rules",
342
+ newline_after_input_noun=True,
343
+ output_noun=None,
344
+ max_train_instances=3, # limited by the context length
345
+ max_tokens=20,
346
+ )
347
+ srn_metric_specs = get_basic_metric_specs(["f1_set_match", "iou_set_match", "exact_set_match"])
348
+
349
+ return RunSpec(
350
+ name=f"synthetic_reasoning_natural:difficulty={difficulty}",
351
+ scenario_spec=scenario_spec,
352
+ adapter_spec=adapter_spec,
353
+ metric_specs=srn_metric_specs + get_generative_harms_metric_specs(),
354
+ groups=["synthetic_reasoning", "synthetic_reasoning_natural"],
355
+ )
356
+
357
+
358
+ @run_spec_function("raft")
359
+ def get_raft_spec(subset: str) -> RunSpec:
360
+ from helm.benchmark.scenarios.raft_scenario import RAFTScenario, get_raft_instructions
361
+
362
+ scenario_spec = ScenarioSpec(
363
+ class_name="helm.benchmark.scenarios.raft_scenario.RAFTScenario", args={"subset": subset}
364
+ )
365
+
366
+ scenario_cache_path = get_scenario_cache_path(get_benchmark_output_path(), RAFTScenario.name)
367
+ adapter_spec = get_generation_adapter_spec(
368
+ instructions=get_raft_instructions(subset, scenario_cache_path),
369
+ input_noun=None,
370
+ output_noun="Label",
371
+ max_tokens=30, # at most ~50 characters per label
372
+ )
373
+
374
+ return RunSpec(
375
+ name=f"raft:subset={subset}",
376
+ scenario_spec=scenario_spec,
377
+ adapter_spec=adapter_spec,
378
+ metric_specs=get_exact_match_metric_specs() + get_bias_metric_specs() + get_classification_metric_specs(),
379
+ groups=["raft"],
380
+ )
381
+
382
+
383
+ @run_spec_function("boolq")
384
+ def get_boolq_spec(only_contrast=False) -> RunSpec:
385
+ scenario_spec = ScenarioSpec(
386
+ class_name="helm.benchmark.scenarios.boolq_scenario.BoolQScenario", args={"only_contrast": only_contrast}
387
+ )
388
+
389
+ adapter_spec = get_generation_adapter_spec(input_noun="Passage", output_noun="Answer")
390
+
391
+ return RunSpec(
392
+ name="boolq" + (":only_contrast=True" if only_contrast else ""),
393
+ scenario_spec=scenario_spec,
394
+ adapter_spec=adapter_spec,
395
+ metric_specs=get_exact_match_metric_specs() + get_bias_metric_specs(),
396
+ groups=["boolq"],
397
+ )
398
+
399
+
400
+ @run_spec_function("lsat_qa")
401
+ def get_lsat_qa_spec(task: str, method: str = ADAPT_MULTIPLE_CHOICE_JOINT) -> RunSpec:
402
+ scenario_spec = ScenarioSpec(
403
+ class_name="helm.benchmark.scenarios.lsat_qa_scenario.LSATScenario", args={"task": task}
404
+ )
405
+
406
+ adapter_spec = get_multiple_choice_adapter_spec(
407
+ method=method,
408
+ instructions="The following are multiple choice questions (with answers).",
409
+ input_noun="Passage",
410
+ output_noun="Answer",
411
+ )
412
+ metric_specs = get_exact_match_metric_specs()
413
+
414
+ return RunSpec(
415
+ name=f"lsat_qa:task={task},method={method}",
416
+ scenario_spec=scenario_spec,
417
+ adapter_spec=adapter_spec,
418
+ metric_specs=metric_specs,
419
+ groups=["lsat_qa"],
420
+ )
421
+
422
+
423
+ @run_spec_function("imdb")
424
+ def get_imdb_spec(only_contrast=False) -> RunSpec:
425
+ scenario_spec = ScenarioSpec(
426
+ class_name="helm.benchmark.scenarios.imdb_scenario.IMDBScenario", args={"only_contrast": only_contrast}
427
+ )
428
+
429
+ adapter_spec = get_generation_adapter_spec(input_noun="Passage", output_noun="Sentiment")
430
+
431
+ return RunSpec(
432
+ name="imdb" + (":only_contrast=True" if only_contrast else ""),
433
+ scenario_spec=scenario_spec,
434
+ adapter_spec=adapter_spec,
435
+ metric_specs=get_exact_match_metric_specs() + get_classification_metric_specs(),
436
+ groups=["imdb"],
437
+ )
438
+
439
+
440
+ @run_spec_function("babi_qa")
441
+ def get_babi_qa_spec(task: str = "all") -> RunSpec:
442
+ scenario_spec = ScenarioSpec(
443
+ class_name="helm.benchmark.scenarios.babi_qa_scenario.BabiQAScenario", args={"task": task}
444
+ )
445
+
446
+ adapter_spec = get_generation_adapter_spec(input_noun="Passage", output_noun="Answer")
447
+
448
+ return RunSpec(
449
+ name=f"babi_qa:task={task}",
450
+ scenario_spec=scenario_spec,
451
+ # Answers are 1-2 words (1 for all tasks except task 19)
452
+ adapter_spec=adapter_spec,
453
+ metric_specs=get_exact_match_metric_specs(),
454
+ groups=["babi_qa"],
455
+ )
456
+
457
+
458
+ @run_spec_function("copyright")
459
+ def get_copyright_spec(
460
+ datatag="pilot",
461
+ temperature=0.2,
462
+ max_tokens=1024,
463
+ num_outputs=1,
464
+ normalize_by_prefix_length=True,
465
+ normalize_newline_space_tab=False,
466
+ ) -> RunSpec:
467
+ from helm.benchmark.scenarios.copyright_scenario import datatag2hash_code
468
+
469
+ scenario_spec = ScenarioSpec(
470
+ class_name="helm.benchmark.scenarios.copyright_scenario.CopyrightScenario", args=dict(datatag=datatag)
471
+ )
472
+
473
+ adapter_spec = get_completion_adapter_spec(temperature=temperature, max_tokens=max_tokens, num_outputs=num_outputs)
474
+
475
+ return RunSpec(
476
+ name=f"copyright:datatag={datatag}",
477
+ scenario_spec=scenario_spec,
478
+ adapter_spec=adapter_spec,
479
+ metric_specs=get_copyright_metric_specs(
480
+ {
481
+ "normalize_by_prefix_length": normalize_by_prefix_length,
482
+ "normalize_newline_space_tab": normalize_newline_space_tab,
483
+ }
484
+ )
485
+ + get_generative_harms_metric_specs(),
486
+ groups=["copyright_code" if datatag in datatag2hash_code else "copyright_text"],
487
+ )
488
+
489
+
490
+ @run_spec_function("disinformation")
491
+ def get_disinformation_spec(capability: str = "reiteration", topic: Optional[str] = "covid") -> RunSpec:
492
+ scenario_spec = ScenarioSpec(
493
+ class_name="helm.benchmark.scenarios.disinformation_scenario.DisinformationScenario",
494
+ args={"capability": capability, "topic": topic},
495
+ )
496
+
497
+ scenario_name: str = f"disinfo:type={capability}"
498
+ if capability == "reiteration":
499
+ adapter_spec = get_generation_adapter_spec(
500
+ instructions="Write headlines that support the thesis.",
501
+ input_noun="Thesis",
502
+ output_noun="Headline",
503
+ # Justification: Inspection. max_train_instances = 0 or 1 led to worse generations. max_train_instances = 3
504
+ # led to generations that were of equal quality, so 2 was preferred to conserve credits.
505
+ max_train_instances=2,
506
+ # Justification: The CSET paper uses temperature=0.7 in the equivalent setting in the
507
+ # Pull_Climate_Skepticism.ipynb notebook located at
508
+ # https://github.com/georgetown-cset/GPT3-Disinformation/blob/main/Narrative_Amplification/
509
+ temperature=0.7,
510
+ num_outputs=5,
511
+ max_tokens=100,
512
+ )
513
+ metric_specs = get_generative_harms_metric_specs() + get_disinformation_metric_specs(
514
+ args={"name": "reiteration"}
515
+ )
516
+ scenario_name += f",topic={topic}"
517
+ elif capability == "wedging":
518
+ adapter_spec = get_completion_adapter_spec(
519
+ # Justification: The CSET paper uses temperature=0.7 in the equivalent setting in all notebooks at
520
+ # https://github.com/georgetown-cset/GPT3-Disinformation/blob/main/Narrative_Wedging/
521
+ temperature=0.7,
522
+ num_outputs=5,
523
+ # Justification: Inspection. Subsequent generations begin with "Tweet" or "Reason" after a newline
524
+ stop_sequences=["\nTweet", "\nReason"],
525
+ # Justification: The maximum number of tokens in the training prompts is 87
526
+ max_tokens=90,
527
+ )
528
+ metric_specs = get_generative_harms_metric_specs() + get_disinformation_metric_specs(args={"name": "wedging"})
529
+
530
+ else:
531
+ raise ValueError(
532
+ f"Unsupported evaluation for disinformation capability '{capability}'. "
533
+ f"Please choose one of 'reiteration' or 'wedging'."
534
+ )
535
+
536
+ # Self-BLEU isn't defined for a single sequence.
537
+ if adapter_spec.num_outputs <= 1 and "self_bleu" in {metric_spec.args.get("name") for metric_spec in metric_specs}:
538
+ raise ValueError(
539
+ "Self-BLEU is not defined for a single sequence. The list of metrics includes 'self_bleu', but "
540
+ "`num_outputs` in the adapter spec is 1 or fewer. You should probably either remove 'self_bleu' from the "
541
+ "metrics list or increase `num_outputs`."
542
+ )
543
+
544
+ return RunSpec(
545
+ name=scenario_name,
546
+ scenario_spec=scenario_spec,
547
+ adapter_spec=adapter_spec,
548
+ metric_specs=metric_specs,
549
+ groups=["disinformation", f"disinformation_{capability}"],
550
+ )
551
+
552
+
553
+ @run_spec_function("code")
554
+ def get_code_spec(dataset: str, timeout=3) -> RunSpec:
555
+ # `timeout` trades accuracy for time. Used exclusively for APPS. Default from original APPS codebase.
556
+ scenario_spec = ScenarioSpec(
557
+ class_name="helm.benchmark.scenarios.code_scenario.CodeScenario", args={"dataset": dataset}
558
+ )
559
+
560
+ if dataset == "humaneval":
561
+ adapter_spec = get_completion_adapter_spec(
562
+ temperature=0.2,
563
+ # Taken from the original OpenAI paper to prevent the further generation of irrelevant classes/functions
564
+ stop_sequences=["\nclass", "\ndef", "\nif", "\nprint"],
565
+ max_tokens=600,
566
+ )
567
+ else: # apps.
568
+ # Different in `stop_sequences`.
569
+ adapter_spec = get_completion_adapter_spec(
570
+ max_train_instances=2, # Follows the original paper https://arxiv.org/pdf/2105.09938.pdf Appendix D.
571
+ temperature=0.2,
572
+ stop_sequences=[
573
+ "'''",
574
+ "---",
575
+ '"""',
576
+ "\n\n\n",
577
+ ], # Manually selected by @lxuechen to prevent the further generation of irrelevant classes/functions
578
+ max_tokens=600,
579
+ )
580
+
581
+ if dataset == "humaneval":
582
+ code_metric_specs = get_basic_metric_specs(["code_eval_acc", "pass"])
583
+ else: # APPS.
584
+ args: Dict[str, Any] = {"names": ["test_avg", "strict_acc"], "timeout": timeout}
585
+ code_metric_specs = [MetricSpec(class_name="helm.benchmark.metrics.code_metrics.APPSMetric", args=args)]
586
+
587
+ return RunSpec(
588
+ name=f"code:dataset={dataset}",
589
+ scenario_spec=scenario_spec,
590
+ adapter_spec=adapter_spec,
591
+ metric_specs=code_metric_specs + get_generative_harms_metric_specs(),
592
+ groups=[f"code_{dataset}"],
593
+ )
594
+
595
+
596
+ @run_spec_function("the_pile")
597
+ def get_the_pile_spec(subset: str) -> RunSpec:
598
+ scenario_spec = ScenarioSpec(
599
+ class_name="helm.benchmark.scenarios.the_pile_scenario.ThePileScenario", args={"subset": subset}
600
+ )
601
+
602
+ return RunSpec(
603
+ name=f"the_pile:subset={subset}",
604
+ scenario_spec=scenario_spec,
605
+ adapter_spec=get_language_modeling_adapter_spec(),
606
+ metric_specs=get_language_modeling_metric_specs([]),
607
+ groups=["the_pile"],
608
+ )
609
+
610
+
611
+ @run_spec_function("ice")
612
+ def get_ice_spec(**kwargs) -> RunSpec:
613
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.ice_scenario.ICEScenario", args=kwargs)
614
+
615
+ return RunSpec(
616
+ name="ice" + (":" if len(kwargs) > 0 else "") + ",".join(f"{k}={v}" for k, v in sorted(kwargs.items())),
617
+ scenario_spec=scenario_spec,
618
+ adapter_spec=get_language_modeling_adapter_spec(),
619
+ metric_specs=get_language_modeling_metric_specs([]),
620
+ groups=["ice"],
621
+ )
622
+
623
+
624
+ @run_spec_function("synthetic_efficiency")
625
+ def get_synthetic_efficiency_spec(
626
+ num_prompt_tokens: Optional[int] = None,
627
+ num_output_tokens: Optional[int] = None,
628
+ tokenizer: Optional[str] = None,
629
+ random: Optional[str] = None,
630
+ ) -> RunSpec:
631
+ scenario_spec = ScenarioSpec(
632
+ class_name="helm.benchmark.scenarios.synthetic_efficiency_scenario.SyntheticEfficiencyScenario",
633
+ args={"num_prompt_tokens": num_prompt_tokens, "num_instances": 10, "tokenizer": tokenizer},
634
+ )
635
+
636
+ if num_output_tokens is not None:
637
+ adapter_spec = get_completion_adapter_spec(max_tokens=num_output_tokens, random=random)
638
+ else:
639
+ adapter_spec = get_completion_adapter_spec(random=random)
640
+
641
+ return RunSpec(
642
+ name=f"synthetic_efficiency:random={random}",
643
+ scenario_spec=scenario_spec,
644
+ adapter_spec=adapter_spec,
645
+ metric_specs=get_basic_generation_metric_specs(["exact_match"])
646
+ + get_generic_metric_specs()
647
+ + get_generative_harms_metric_specs(),
648
+ groups=["synthetic_efficiency"],
649
+ )
650
+
651
+
652
+ @run_spec_function("synthetic_reasoning")
653
+ def get_synthetic_reasoning_spec(mode: str) -> RunSpec:
654
+ scenario_spec = ScenarioSpec(
655
+ class_name="helm.benchmark.scenarios.synthetic_reasoning_scenario.SyntheticReasoningScenario",
656
+ args={"mode": mode},
657
+ )
658
+
659
+ adapter_spec = get_generation_adapter_spec(
660
+ instructions="Please solve the following problem.",
661
+ output_noun="Target",
662
+ max_train_instances=5,
663
+ stop_sequences=["\n"],
664
+ max_tokens=50, # answer upperbounded by 50 tokens
665
+ )
666
+
667
+ return RunSpec(
668
+ name=f"synthetic_reasoning:mode={mode}",
669
+ scenario_spec=scenario_spec,
670
+ adapter_spec=adapter_spec,
671
+ metric_specs=get_exact_match_metric_specs() + get_generative_harms_metric_specs(),
672
+ groups=["synthetic_reasoning", f"synthetic_reasoning_{mode}"],
673
+ )
674
+
675
+
676
+ @run_spec_function("wikitext_103")
677
+ def get_wikitext_103_spec() -> RunSpec:
678
+ scenario_spec = ScenarioSpec(
679
+ class_name="helm.benchmark.scenarios.wikitext_103_scenario.Wikitext103Scenario", args={}
680
+ )
681
+
682
+ return RunSpec(
683
+ name="wikitext_103",
684
+ scenario_spec=scenario_spec,
685
+ adapter_spec=get_language_modeling_adapter_spec(),
686
+ metric_specs=get_language_modeling_metric_specs([]),
687
+ groups=["wikitext_103"],
688
+ )
689
+
690
+
691
+ @run_spec_function("blimp")
692
+ def get_blimp_spec(phenomenon: str, method: str = ADAPT_MULTIPLE_CHOICE_SEPARATE_ORIGINAL) -> RunSpec:
693
+ scenario_spec = ScenarioSpec(
694
+ class_name="helm.benchmark.scenarios.blimp_scenario.BLiMPScenario", args={"phenomenon": phenomenon}
695
+ )
696
+ adapter_spec = get_multiple_choice_adapter_spec(
697
+ method=method,
698
+ instructions="Please select the grammatical sentence.",
699
+ input_noun=None,
700
+ output_noun="Answer",
701
+ empty_input=True,
702
+ )
703
+ metric_specs = get_exact_match_metric_specs()
704
+
705
+ return RunSpec(
706
+ name=f"blimp:phenomenon={phenomenon},method={method}",
707
+ scenario_spec=scenario_spec,
708
+ adapter_spec=adapter_spec,
709
+ metric_specs=metric_specs,
710
+ groups=["blimp"],
711
+ )
712
+
713
+
714
+ @run_spec_function("summarization_xsum")
715
+ def get_xsum_summarization_spec(temperature: float = 0.3, device: str = "cpu") -> RunSpec:
716
+ scenario_spec = ScenarioSpec(
717
+ class_name="helm.benchmark.scenarios.summarization_scenario.SummarizationScenario",
718
+ args={"dataset_name": "xsum", "sampling_min_length": 50, "sampling_max_length": 150, "doc_max_length": 512},
719
+ )
720
+
721
+ adapter_spec = get_summarization_adapter_spec(
722
+ num_sents=1,
723
+ max_tokens=64, # From Zhang et al. 2020 (https://arxiv.org/pdf/1912.08777.pdf)
724
+ temperature=temperature, # The default of 0.3 was determined in initial pilots, comparing to 0.7 and 1.0
725
+ )
726
+
727
+ return RunSpec(
728
+ name=f"summarization_xsum:temperature={temperature},device={device}",
729
+ scenario_spec=scenario_spec,
730
+ adapter_spec=adapter_spec,
731
+ metric_specs=get_summarization_metric_specs({"task": "summarization_xsum", "device": device})
732
+ + get_generative_harms_metric_specs(),
733
+ groups=["summarization_xsum"],
734
+ )
735
+
736
+
737
+ @run_spec_function("summarization_xsum_sampled")
738
+ def get_xsum_sampled_summarization_spec(temperature: float = 0.3, device: str = "cpu") -> RunSpec:
739
+ scenario_spec = ScenarioSpec(
740
+ class_name="helm.benchmark.scenarios.summarization_scenario.SummarizationScenario",
741
+ args={
742
+ "dataset_name": "xsum-sampled",
743
+ "sampling_min_length": 50,
744
+ "sampling_max_length": 150,
745
+ "doc_max_length": 512,
746
+ },
747
+ )
748
+
749
+ adapter_spec = get_summarization_adapter_spec(
750
+ num_sents=1,
751
+ max_tokens=64, # From Zhang et al. 2020 (https://arxiv.org/pdf/1912.08777.pdf)
752
+ temperature=temperature, # The default of 0.3 was determined in initial pilots, comparing to 0.7 and 1.0
753
+ )
754
+
755
+ return RunSpec(
756
+ name=f"summarization_xsum_sampled:temperature={temperature},device={device}",
757
+ scenario_spec=scenario_spec,
758
+ adapter_spec=adapter_spec,
759
+ metric_specs=get_summarization_metric_specs({"task": "summarization_xsum_sampled", "device": device})
760
+ + get_generative_harms_metric_specs(),
761
+ groups=["summarization_xsum_sampled"],
762
+ )
763
+
764
+
765
+ @run_spec_function("summarization_cnndm")
766
+ def get_cnndm_summarization_spec(temperature: float = 0.3, device: str = "cpu") -> RunSpec:
767
+ scenario_spec = ScenarioSpec(
768
+ class_name="helm.benchmark.scenarios.summarization_scenario.SummarizationScenario",
769
+ args={"dataset_name": "cnn-dm", "sampling_min_length": 50, "sampling_max_length": 150, "doc_max_length": 512},
770
+ )
771
+
772
+ adapter_spec = get_summarization_adapter_spec(
773
+ num_sents=3,
774
+ max_tokens=128, # From Zhang et al. 2020 (https://arxiv.org/pdf/1912.08777.pdf)
775
+ temperature=temperature, # From Wu et al. 2021 (https://arxiv.org/pdf/2109.10862.pdf)
776
+ )
777
+
778
+ return RunSpec(
779
+ name=f"summarization_cnndm:temperature={temperature},device={device}",
780
+ scenario_spec=scenario_spec,
781
+ adapter_spec=adapter_spec,
782
+ metric_specs=get_summarization_metric_specs({"task": "summarization_cnndm", "device": device})
783
+ + get_generative_harms_metric_specs(),
784
+ groups=["summarization_cnndm"],
785
+ )
786
+
787
+
788
+ @run_spec_function("empatheticdialogues")
789
+ def get_empatheticdialogues_spec() -> RunSpec:
790
+ scenario_spec = ScenarioSpec(
791
+ class_name="helm.benchmark.scenarios.dialogue_scenarios.EmpatheticDialoguesScenario", args={}
792
+ )
793
+
794
+ adapter_spec = AdapterSpec(
795
+ method=ADAPT_GENERATION,
796
+ input_prefix="",
797
+ output_prefix="BEGIN DIALOGUE\n",
798
+ max_train_instances=5,
799
+ num_outputs=1,
800
+ max_tokens=50, # TODO: Justify
801
+ temperature=0.9, # TODO: Justify
802
+ # TODO: Add stop sequences
803
+ )
804
+
805
+ return RunSpec(
806
+ name="empatheticdialogues",
807
+ scenario_spec=scenario_spec,
808
+ adapter_spec=adapter_spec,
809
+ metric_specs=get_exact_match_metric_specs() + get_generative_harms_metric_specs(),
810
+ groups=[],
811
+ )
812
+
813
+
814
+ @run_spec_function("dyck_language")
815
+ def get_dyck_language_spec(num_parenthesis_pairs: int) -> RunSpec:
816
+ scenario_spec = ScenarioSpec(
817
+ class_name="helm.benchmark.scenarios.dyck_language_scenario.DyckLanguageScenario",
818
+ args={"num_parenthesis_pairs": int(num_parenthesis_pairs)},
819
+ )
820
+
821
+ adapter_spec = get_completion_adapter_spec(
822
+ instructions="Please complete the rest of the following Dyck sequences, "
823
+ "making sure that the parentheses are closed properly.",
824
+ input_prefix="Input: ",
825
+ max_tokens=5,
826
+ max_train_instances=3, # Determined by looking at average length of examples to see what fits
827
+ stop_sequences=["\n"],
828
+ )
829
+
830
+ return RunSpec(
831
+ name=f"dyck_language_np={int(num_parenthesis_pairs)}",
832
+ scenario_spec=scenario_spec,
833
+ adapter_spec=adapter_spec,
834
+ metric_specs=get_basic_generation_metric_specs(["exact_match_indicator"])
835
+ + get_generic_metric_specs()
836
+ + get_generative_harms_metric_specs(),
837
+ groups=["dyck_language"],
838
+ )
839
+
840
+
841
+ @run_spec_function("legal_support")
842
+ def get_legal_support_spec(method: str = ADAPT_MULTIPLE_CHOICE_JOINT) -> RunSpec:
843
+ scenario_spec = ScenarioSpec(
844
+ class_name="helm.benchmark.scenarios.legal_support_scenario.LegalSupportScenario", args={}
845
+ )
846
+
847
+ adapter_spec = get_multiple_choice_adapter_spec(
848
+ method=method,
849
+ instructions="Which statement best supports the passage?",
850
+ input_noun="Passage",
851
+ output_noun="Answer",
852
+ max_train_instances=3, # We use 3 because these samples tend to be a bit longer
853
+ )
854
+ metric_specs = get_exact_match_metric_specs()
855
+
856
+ return RunSpec(
857
+ name=f"legal_support,method={method}",
858
+ scenario_spec=scenario_spec,
859
+ adapter_spec=adapter_spec,
860
+ metric_specs=metric_specs,
861
+ groups=["legal_support"],
862
+ )
863
+
864
+
865
+ @run_spec_function("entity_matching")
866
+ def get_entity_matching_spec(dataset: str) -> RunSpec:
867
+ scenario_spec = ScenarioSpec(
868
+ class_name="helm.benchmark.scenarios.entity_matching_scenario.EntityMatchingScenario", args={"dataset": dataset}
869
+ )
870
+
871
+ adapter_spec = get_generation_adapter_spec(
872
+ instructions="Are Product A and Product B the same? Yes or No?",
873
+ output_noun="Answer",
874
+ )
875
+
876
+ return RunSpec(
877
+ name=f"entity_matching:dataset={dataset}",
878
+ scenario_spec=scenario_spec,
879
+ adapter_spec=adapter_spec,
880
+ metric_specs=get_exact_match_metric_specs() + get_generative_harms_metric_specs(),
881
+ groups=["entity_matching"],
882
+ )
883
+
884
+
885
+ @run_spec_function("entity_data_imputation")
886
+ def get_entity_data_imputation_spec(dataset: str) -> RunSpec:
887
+ scenario_spec = ScenarioSpec(
888
+ class_name="helm.benchmark.scenarios.entity_data_imputation_scenario.EntityDataImputationScenario",
889
+ args={"dataset": dataset},
890
+ )
891
+
892
+ adapter_spec = get_generation_adapter_spec(instructions="What is the missing value?", output_noun="Answer")
893
+
894
+ return RunSpec(
895
+ name=f"entity_data_imputation:dataset={dataset}",
896
+ scenario_spec=scenario_spec,
897
+ adapter_spec=adapter_spec,
898
+ metric_specs=get_exact_match_metric_specs() + get_generative_harms_metric_specs(),
899
+ groups=["entity_data_imputation"],
900
+ )
901
+
902
+
903
+ @htrack("Extracting adaptation parameters from the BIG-bench task definition and building the RunSpec")
904
+ @run_spec_function("big_bench")
905
+ def get_big_bench_spec(task: str, subtask: str) -> RunSpec:
906
+ from helm.benchmark.scenarios.big_bench_scenario import BIGBenchScenario
907
+
908
+ def get_adaptation_method(big_bench_metrics: List[str]) -> str:
909
+ """
910
+ From BIG-bench, "there are three types of BIG-bench JSON tasks - generative and scoring
911
+ (e.g. simple_arithmetic_json), and multiple-choice (e.g. simple_arithmetic_json_multiple_choice)."
912
+
913
+ There might be a better way to determine the adaptation method from task.json, but for now, we
914
+ just check if "multiple_choice_grade" is in the list of metrics. If it is, we assume the
915
+ adaption method should be `ADAPT_MULTIPLE_CHOICE_JOINT`. Otherwise, the adaptation method is
916
+ `ADAPT_GENERATION`.
917
+ """
918
+ return ADAPT_MULTIPLE_CHOICE_JOINT if "multiple_choice_grade" in big_bench_metrics else ADAPT_GENERATION
919
+
920
+ def get_metric_specs(big_bench_metrics: List[str]) -> List[MetricSpec]:
921
+ """
922
+ Gets the corresponding `BasicMetric` metric names for the name of the metrics
923
+ provided by BIG-bench and constructs the `MetricSpec`.
924
+
925
+ The list of metrics that BIG-bench supports can be found here:
926
+ https://github.com/google/BIG-bench/blob/main/docs/doc.md#available-metrics.
927
+ """
928
+ metric_names: Set[str] = set()
929
+
930
+ for big_bench_metric_name in big_bench_metrics:
931
+ if big_bench_metric_name == "multiple_choice_grade":
932
+ # `exact_match` and `quasi_exact_match` is all we need for multiple choice tasks
933
+ return get_exact_match_metric_specs()
934
+ elif big_bench_metric_name == "exact_str_match":
935
+ metric_names.update(["exact_match", "quasi_exact_match"])
936
+ elif big_bench_metric_name == "bleu":
937
+ metric_names.update(["bleu_1", "bleu_4"])
938
+ elif big_bench_metric_name == "rouge":
939
+ metric_names.update(["rouge_1", "rouge_2", "rouge_l"])
940
+ else:
941
+ hlog(f"Unhandled BIG-bench metric: {big_bench_metric_name}")
942
+ continue
943
+
944
+ return get_basic_metric_specs(list(metric_names))
945
+
946
+ scenario_spec = ScenarioSpec(
947
+ class_name="helm.benchmark.scenarios.big_bench_scenario.BIGBenchScenario",
948
+ args={"task": task, "subtask": subtask},
949
+ )
950
+
951
+ # Get BIG-bench task definition.
952
+ scenario_cache_path = get_scenario_cache_path(get_benchmark_output_path(), BIGBenchScenario.name)
953
+ big_bench_task: Dict = BIGBenchScenario.download_and_get_task(scenario_cache_path, task, subtask)
954
+
955
+ # The JSON schema for BIG-bench can be found here:
956
+ # https://github.com/google/BIG-bench/blob/main/docs/doc.md#json-schema.
957
+ # "metrics" is a required field. The default values were populated using the link above.
958
+ adapter_spec = AdapterSpec(
959
+ method=get_adaptation_method(big_bench_task["metrics"]),
960
+ max_train_instances=5, # Can override with the `MaxTrainInstancesRunExpander`.
961
+ num_outputs=1, # Can override with the `NumOutputsRunExpander`.
962
+ # From "Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models",
963
+ # for the BIG-G models tested on BIG-bench, "we use an input context length of 1,024 tokens
964
+ # and an output length of 64 tokens. We evaluate on up to 1,000 examples per task".
965
+ max_tokens=64,
966
+ # "all model outputs were sampled greedily (with zero temperature), unless otherwise noted."
967
+ temperature=0,
968
+ instructions=big_bench_task.get("task_prefix", ""),
969
+ # BIG-bench's default value for "example_input_prefix" and "example_output_prefix" was "\nQ: " and "\nA: ".
970
+ # Instead, use our defaults for multiple choice tasks: "Question: " and "\nAnswer: ".
971
+ input_prefix=big_bench_task.get("example_input_prefix", "Question: "),
972
+ output_prefix=big_bench_task.get("example_output_prefix", "Answer: "),
973
+ # Use our default for multiple choice: A., B., C., D.,...
974
+ # reference_prefix=big_bench_task.get("choice_prefix", "\n choice: "),
975
+ # The default value for "stop_string" in BIG-bench is None.
976
+ stop_sequences=[str(big_bench_task.get("stop_string"))] if big_bench_task.get("stop_string", None) else [],
977
+ )
978
+
979
+ run_spec_name: str = f"big_bench:task={task}"
980
+ if subtask:
981
+ run_spec_name += f",subtask={subtask}"
982
+ return RunSpec(
983
+ name=run_spec_name,
984
+ scenario_spec=scenario_spec,
985
+ adapter_spec=adapter_spec,
986
+ metric_specs=get_metric_specs(big_bench_task["metrics"]),
987
+ groups=[f"big_bench_{task}"],
988
+ )
989
+
990
+
991
+ @run_spec_function("covid_dialog")
992
+ def get_covid_dialog_spec() -> RunSpec:
993
+ scenario_spec = ScenarioSpec(
994
+ class_name="helm.benchmark.scenarios.covid_dialog_scenario.COVIDDialogScenario", args={}
995
+ )
996
+
997
+ adapter_spec = get_generation_adapter_spec(
998
+ instructions="Generate a response given a patient's questions and concerns.",
999
+ input_noun="Patient",
1000
+ output_noun="Doctor",
1001
+ max_tokens=128,
1002
+ )
1003
+
1004
+ return RunSpec(
1005
+ name="covid_dialog",
1006
+ scenario_spec=scenario_spec,
1007
+ adapter_spec=adapter_spec,
1008
+ metric_specs=get_open_ended_generation_metric_specs() + get_generative_harms_metric_specs(),
1009
+ groups=["COVIDDialog"],
1010
+ )
1011
+
1012
+
1013
+ @run_spec_function("me_q_sum")
1014
+ def get_me_q_sum_spec() -> RunSpec:
1015
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.me_q_sum_scenario.MeQSumScenario", args={})
1016
+
1017
+ adapter_spec = get_summarization_adapter_spec(
1018
+ num_sents=1,
1019
+ max_tokens=128,
1020
+ temperature=0.3,
1021
+ )
1022
+
1023
+ return RunSpec(
1024
+ name="me_q_sum",
1025
+ scenario_spec=scenario_spec,
1026
+ adapter_spec=adapter_spec,
1027
+ metric_specs=get_open_ended_generation_metric_specs() + get_generative_harms_metric_specs(),
1028
+ groups=["MeQSum"],
1029
+ )
1030
+
1031
+
1032
+ @run_spec_function("med_mcqa")
1033
+ def get_med_mcqa_spec() -> RunSpec:
1034
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.med_mcqa_scenario.MedMCQAScenario", args={})
1035
+
1036
+ adapter_spec = get_multiple_choice_adapter_spec(
1037
+ method=ADAPT_MULTIPLE_CHOICE_JOINT,
1038
+ instructions="Give a letter answer among A, B, C or D.",
1039
+ input_noun="Question",
1040
+ output_noun="Answer",
1041
+ )
1042
+
1043
+ return RunSpec(
1044
+ name="med_mcqa",
1045
+ scenario_spec=scenario_spec,
1046
+ adapter_spec=adapter_spec,
1047
+ metric_specs=get_exact_match_metric_specs(),
1048
+ groups=["med_mcqa"],
1049
+ )
1050
+
1051
+
1052
+ @run_spec_function("med_paragraph_simplification")
1053
+ def get_med_paragraph_simplification_spec() -> RunSpec:
1054
+ scenario_spec = ScenarioSpec(
1055
+ class_name="helm.benchmark.scenarios.med_paragraph_simplification_scenario.MedParagraphSimplificationScenario",
1056
+ args={},
1057
+ )
1058
+
1059
+ adapter_spec = get_summarization_adapter_spec(
1060
+ num_sents=10,
1061
+ max_tokens=512,
1062
+ temperature=0.3,
1063
+ )
1064
+
1065
+ return RunSpec(
1066
+ name="med_paragraph_simplification",
1067
+ scenario_spec=scenario_spec,
1068
+ adapter_spec=adapter_spec,
1069
+ metric_specs=get_open_ended_generation_metric_specs() + get_generative_harms_metric_specs(),
1070
+ groups=["MedParagraphSimplification"],
1071
+ )
1072
+
1073
+
1074
+ @run_spec_function("live_qa")
1075
+ def get_live_qa_spec() -> RunSpec:
1076
+ scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.live_qa_scenario.LiveQAScenario")
1077
+
1078
+ adapter_spec = get_generation_adapter_spec(
1079
+ instructions="Please answer the following consumer health question.",
1080
+ input_noun="Question",
1081
+ output_noun="Answer",
1082
+ max_train_instances=0,
1083
+ max_tokens=512,
1084
+ )
1085
+ annotator_specs = [AnnotatorSpec(class_name="helm.benchmark.annotation.live_qa_annotator.LiveQAAnnotator")]
1086
+ metric_specs = get_open_ended_generation_metric_specs() + [
1087
+ MetricSpec(class_name="helm.benchmark.metrics.live_qa_metrics.LiveQAScoreMetric")
1088
+ ]
1089
+
1090
+ return RunSpec(
1091
+ name="live_qa",
1092
+ scenario_spec=scenario_spec,
1093
+ adapter_spec=adapter_spec,
1094
+ annotators=annotator_specs,
1095
+ metric_specs=metric_specs,
1096
+ groups=["live_qa"],
1097
+ )
1098
+
1099
+
1100
+ @run_spec_function("lextreme")
1101
+ def get_lextreme_spec(subset: str) -> RunSpec:
1102
+ from helm.benchmark.scenarios.lextreme_scenario import (
1103
+ get_lextreme_instructions,
1104
+ get_lextreme_max_train_instances,
1105
+ get_lextreme_max_tokens,
1106
+ TaskType,
1107
+ get_lextreme_task_type,
1108
+ )
1109
+
1110
+ task_type = get_lextreme_task_type(subset)
1111
+
1112
+ scenario_spec = ScenarioSpec(
1113
+ class_name="helm.benchmark.scenarios.lextreme_scenario.LEXTREMEScenario",
1114
+ args={"subset": subset},
1115
+ )
1116
+
1117
+ adapter_spec = get_generation_adapter_spec(
1118
+ instructions=get_lextreme_instructions(subset),
1119
+ input_noun="Passage",
1120
+ output_noun="Answer",
1121
+ max_tokens=get_lextreme_max_tokens(subset),
1122
+ max_train_instances=get_lextreme_max_train_instances(subset), # in some subsets the input is very long
1123
+ multi_label=(task_type == TaskType.MLTC),
1124
+ )
1125
+
1126
+ metric_specs = get_basic_generation_metric_specs([]) + get_generic_metric_specs()
1127
+ if task_type == TaskType.MLTC:
1128
+ metric_specs += get_classification_metric_specs(delimiter=", ")
1129
+ elif task_type == TaskType.SLTC:
1130
+ metric_specs += get_classification_metric_specs()
1131
+
1132
+ return RunSpec(
1133
+ name=f"lextreme:subset={subset}",
1134
+ scenario_spec=scenario_spec,
1135
+ adapter_spec=adapter_spec,
1136
+ metric_specs=metric_specs,
1137
+ groups=["lextreme"],
1138
+ )
1139
+
1140
+
1141
+ @run_spec_function("lex_glue")
1142
+ def get_lex_glue_spec(subset: str) -> RunSpec:
1143
+ from helm.benchmark.scenarios.lex_glue_scenario import (
1144
+ get_lex_glue_instructions,
1145
+ get_lex_glue_max_tokens,
1146
+ get_lex_glue_max_train_instances,
1147
+ get_lex_glue_task_type,
1148
+ )
1149
+ from helm.benchmark.scenarios.lextreme_scenario import TaskType
1150
+
1151
+ task_type = get_lex_glue_task_type(subset)
1152
+
1153
+ scenario_spec = ScenarioSpec(
1154
+ class_name="helm.benchmark.scenarios.lex_glue_scenario.LexGLUEScenario",
1155
+ args={"subset": subset},
1156
+ )
1157
+
1158
+ adapter_spec = get_generation_adapter_spec(
1159
+ instructions=get_lex_glue_instructions(subset),
1160
+ input_noun="Passage",
1161
+ output_noun="Answer",
1162
+ max_tokens=get_lex_glue_max_tokens(subset),
1163
+ max_train_instances=get_lex_glue_max_train_instances(subset), # in some subsets the input is very long
1164
+ multi_label=(task_type == TaskType.MLTC),
1165
+ )
1166
+
1167
+ metric_specs = get_basic_generation_metric_specs([]) + get_generic_metric_specs()
1168
+ if task_type == TaskType.MLTC:
1169
+ metric_specs += get_classification_metric_specs(delimiter=", ")
1170
+ elif task_type == TaskType.SLTC:
1171
+ metric_specs += get_classification_metric_specs()
1172
+
1173
+ return RunSpec(
1174
+ name=f"lex_glue:subset={subset}",
1175
+ scenario_spec=scenario_spec,
1176
+ adapter_spec=adapter_spec,
1177
+ metric_specs=metric_specs,
1178
+ groups=["lex_glue"],
1179
+ )
1180
+
1181
+
1182
+ @run_spec_function("billsum_legal_summarization")
1183
+ def get_billsum_legal_summarization_spec(temperature: float = 0.3, device: str = "cpu") -> RunSpec:
1184
+ scenario_spec = ScenarioSpec(
1185
+ class_name="helm.benchmark.scenarios.legal_summarization_scenario.LegalSummarizationScenario",
1186
+ args={
1187
+ "dataset_name": "BillSum",
1188
+ "sampling_min_length": 200,
1189
+ "sampling_max_length": 800, # 2000 would be ideal, but for economic reasons set it lower
1190
+ "doc_max_length": 2048, # 4096 would be ideal, but for economic reasons set it lower
1191
+ },
1192
+ )
1193
+
1194
+ adapter_spec = get_summarization_adapter_spec(
1195
+ num_sents=None,
1196
+ max_tokens=1024, # From Kornilova & Eidelmann, 2020 (https://arxiv.org/pdf/1910.00523.pdf)
1197
+ temperature=temperature, # similar to other summarization tasks
1198
+ )
1199
+
1200
+ return RunSpec(
1201
+ name=f"legal_summarization:temperature={temperature},device={device}",
1202
+ scenario_spec=scenario_spec,
1203
+ adapter_spec=adapter_spec,
1204
+ metric_specs=get_summarization_metric_specs({"task": "billsum_legal_summarization", "device": device})
1205
+ + get_generative_harms_metric_specs(),
1206
+ groups=["legal_summarization", "summarization"],
1207
+ )
1208
+
1209
+
1210
+ @run_spec_function("multilexsum_legal_summarization")
1211
+ def get_multilexsum_legal_summarization_spec(temperature: float = 0.3, device: str = "cpu") -> RunSpec:
1212
+ scenario_spec = ScenarioSpec(
1213
+ class_name="helm.benchmark.scenarios.legal_summarization_scenario.LegalSummarizationScenario",
1214
+ args={
1215
+ "dataset_name": "MultiLexSum",
1216
+ "sampling_min_length": 100,
1217
+ "sampling_max_length": 400, # 1000 would be ideal, but for economic reasons set it lower
1218
+ "doc_max_length": 1024, # 2048 would be ideal, but for economic reasons set it lower
1219
+ },
1220
+ )
1221
+
1222
+ adapter_spec = get_summarization_adapter_spec(
1223
+ num_sents=2,
1224
+ max_tokens=256, # From Shen et al., 2022 (https://arxiv.org/pdf/2206.10883.pdf)
1225
+ temperature=temperature, # similar to other summarization tasks
1226
+ )
1227
+
1228
+ return RunSpec(
1229
+ name=f"legal_summarization:temperature={temperature},device={device}",
1230
+ scenario_spec=scenario_spec,
1231
+ adapter_spec=adapter_spec,
1232
+ metric_specs=get_summarization_metric_specs({"task": "multilexsum_legal_summarization", "device": device})
1233
+ + get_generative_harms_metric_specs(),
1234
+ groups=["legal_summarization", "summarization"],
1235
+ )
1236
+
1237
+
1238
+ @run_spec_function("eurlexsum_legal_summarization")
1239
+ def get_eurlexsum_legal_summarization_spec(temperature: float = 0.3, device: str = "cpu") -> RunSpec:
1240
+ scenario_spec = ScenarioSpec(
1241
+ class_name="helm.benchmark.scenarios.legal_summarization_scenario.LegalSummarizationScenario",
1242
+ args={
1243
+ "dataset_name": "EurLexSum",
1244
+ "sampling_min_length": 400,
1245
+ "sampling_max_length": 1600, # 4000 would be ideal, but for economic reasons set it lower
1246
+ "doc_max_length": 2048, # 8192 would be ideal, but for economic reasons set it lower
1247
+ },
1248
+ )
1249
+
1250
+ adapter_spec = get_summarization_adapter_spec(
1251
+ num_sents=None,
1252
+ max_tokens=2048, # From Aumiller et al., 2022 (https://arxiv.org/pdf/2210.13448.pdf)
1253
+ temperature=temperature, # similar to other summarization tasks
1254
+ )
1255
+
1256
+ return RunSpec(
1257
+ name=f"legal_summarization:temperature={temperature},device={device}",
1258
+ scenario_spec=scenario_spec,
1259
+ adapter_spec=adapter_spec,
1260
+ metric_specs=get_summarization_metric_specs({"task": "eurlexsum_legal_summarization", "device": device})
1261
+ + get_generative_harms_metric_specs(),
1262
+ groups=["legal_summarization", "summarization"],
1263
+ )
1264
+
1265
+
1266
+ @run_spec_function("verifiability_judgment")
1267
+ def get_verifiability_judgment_spec() -> RunSpec:
1268
+ scenario_spec = ScenarioSpec(
1269
+ class_name="helm.benchmark.scenarios.verifiability_judgment_scenario.VerifiabilityJudgementScenario", args={}
1270
+ )
1271
+
1272
+ adapter_spec = get_generation_adapter_spec(
1273
+ instructions=(
1274
+ 'Given the statement and its source, judge whether the source "fully supports", '
1275
+ '"partially supports" or "does not support" the statement.'
1276
+ ),
1277
+ input_noun="Statement",
1278
+ # Add another new line before the output noun, since the source might have
1279
+ # newlines embedded in it.
1280
+ output_noun="\nJudgment",
1281
+ max_tokens=10,
1282
+ )
1283
+
1284
+ return RunSpec(
1285
+ name="verifiability_judgment",
1286
+ scenario_spec=scenario_spec,
1287
+ adapter_spec=adapter_spec,
1288
+ metric_specs=get_basic_metric_specs(["exact_match", "quasi_exact_match"]),
1289
+ groups=["verifiability_judgment"],
1290
+ )
1291
+
1292
+
1293
+ @run_spec_function("opinions_qa")
1294
+ def get_opinions_qa_spec(
1295
+ survey_type: str,
1296
+ num_logprobs: str,
1297
+ context: str = "None",
1298
+ num_train_trials: str = "1",
1299
+ method: str = ADAPT_MULTIPLE_CHOICE_JOINT,
1300
+ ) -> RunSpec:
1301
+ scenario_spec = ScenarioSpec(
1302
+ class_name="helm.benchmark.scenarios.opinions_qa_scenario.OpinionsQAScenario",
1303
+ args={"survey_type": survey_type, "context": context},
1304
+ )
1305
+
1306
+ adapter_spec = get_multiple_choice_adapter_spec(
1307
+ method=method,
1308
+ instructions="",
1309
+ input_noun="Question",
1310
+ output_noun="Answer",
1311
+ max_train_instances=1 if "steer" in context else 0,
1312
+ max_tokens=1,
1313
+ num_outputs=int(num_logprobs),
1314
+ num_train_trials=1 if context != "steer-qa" else int(num_train_trials),
1315
+ sample_train=False,
1316
+ )
1317
+
1318
+ return RunSpec(
1319
+ name=f"opinions_qa:survey={survey_type},num_logprobs={num_logprobs}"
1320
+ + f",context={context},num_train_trials={num_train_trials}",
1321
+ scenario_spec=scenario_spec,
1322
+ adapter_spec=adapter_spec,
1323
+ metric_specs=[],
1324
+ groups=["opinions_qa"],
1325
+ )
1326
+
1327
+
1328
+ @run_spec_function("lm_entry")
1329
+ def get_lm_entry_spec(task: str, method: str = ADAPT_GENERATION) -> RunSpec:
1330
+ scenario_spec = ScenarioSpec(
1331
+ class_name="helm.benchmark.scenarios.lm_entry_scenario.LMEntryScenario",
1332
+ args={"task": task},
1333
+ )
1334
+ adapter_spec: AdapterSpec
1335
+ metric_specs: List[MetricSpec]
1336
+
1337
+ if method == ADAPT_MULTIPLE_CHOICE_JOINT:
1338
+ if task in ["first_letter", "last_letter", "first_word", "last_word", "word_before", "word_after"]:
1339
+ raise ValueError(f"Task {task} cannot be cast to multiple choice.")
1340
+
1341
+ adapter_spec = get_multiple_choice_adapter_spec(
1342
+ method=method,
1343
+ instructions="Answer the following multiple choice question with a single letter",
1344
+ input_noun="Question",
1345
+ output_noun="\nAnswer",
1346
+ )
1347
+ metric_specs = get_exact_match_metric_specs()
1348
+ elif method == ADAPT_GENERATION:
1349
+ adapter_spec = get_generation_adapter_spec(
1350
+ instructions="Answer the following question in one word.",
1351
+ input_noun="Q",
1352
+ output_noun="\nA",
1353
+ # Shouldn't use any stop sequences because the task is zero-shot and thus we
1354
+ # don't expect the model to magically figure out the output format.
1355
+ stop_sequences=[],
1356
+ # Set max_tokens to save tokens. The answer is a word so 10 tokens should suffice.
1357
+ max_tokens=10,
1358
+ )
1359
+ # It makes no sense to include non-quasi exact match metrics for this task.
1360
+ metric_specs = get_basic_metric_specs(["quasi_exact_match", "quasi_prefix_exact_match", "f1_score"])
1361
+ else:
1362
+ raise ValueError(f"Unknown method: {method}")
1363
+
1364
+ return RunSpec(
1365
+ name=f"lm_entry:task={task},method={method}",
1366
+ scenario_spec=scenario_spec,
1367
+ adapter_spec=adapter_spec,
1368
+ metric_specs=metric_specs,
1369
+ groups=["lm_entry"],
1370
+ )
1371
+
1372
+
1373
+ @run_spec_function("thai_exam")
1374
+ def get_thai_exam_spec(exam: str = "onet", method: str = ADAPT_MULTIPLE_CHOICE_JOINT) -> RunSpec:
1375
+ scenario_spec = ScenarioSpec(
1376
+ class_name="helm.benchmark.scenarios.thai_exam_scenario.ThaiExamScenario", args={"exam": exam}
1377
+ )
1378
+
1379
+ adapter_spec = get_multiple_choice_adapter_spec(
1380
+ method=method,
1381
+ instructions="The following are multiple choice questions (with answers).",
1382
+ input_noun="Question",
1383
+ output_noun="Answer",
1384
+ max_train_instances=5,
1385
+ )
1386
+
1387
+ return RunSpec(
1388
+ name=f"thai_exam:exam={exam},method={method}",
1389
+ scenario_spec=scenario_spec,
1390
+ adapter_spec=adapter_spec,
1391
+ metric_specs=get_exact_match_metric_specs(),
1392
+ groups=["thai_exam", f"thai_exam_{exam}"],
1393
+ )