crfm-helm 0.5.4__py3-none-any.whl → 0.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (580) hide show
  1. crfm_helm-0.5.5.dist-info/METADATA +413 -0
  2. crfm_helm-0.5.5.dist-info/RECORD +894 -0
  3. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info}/WHEEL +1 -1
  4. helm/benchmark/adaptation/adapter_spec.py +13 -1
  5. helm/benchmark/adaptation/adapters/adapter_factory.py +15 -1
  6. helm/benchmark/adaptation/adapters/binary_ranking_adapter.py +1 -1
  7. helm/benchmark/adaptation/adapters/chat_adapter.py +49 -0
  8. helm/benchmark/adaptation/adapters/ehr_instruction_adapter.py +108 -0
  9. helm/benchmark/adaptation/adapters/generation_adapter.py +1 -1
  10. helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +1 -1
  11. helm/benchmark/adaptation/adapters/language_modeling_adapter.py +1 -1
  12. helm/benchmark/adaptation/adapters/multimodal/generation_multimodal_adapter.py +4 -2
  13. helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +1 -1
  14. helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +1 -1
  15. helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +4 -2
  16. helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +1 -1
  17. helm/benchmark/adaptation/adapters/multiple_choice_calibrated_adapter.py +1 -1
  18. helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +2 -2
  19. helm/benchmark/adaptation/adapters/multiple_choice_joint_chain_of_thought_adapter.py +87 -0
  20. helm/benchmark/adaptation/adapters/multiple_choice_separate_adapter.py +1 -1
  21. helm/benchmark/adaptation/adapters/test_generation_adapter.py +3 -3
  22. helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +2 -2
  23. helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +2 -2
  24. helm/benchmark/adaptation/common_adapter_specs.py +69 -4
  25. helm/benchmark/adaptation/prompt.py +1 -1
  26. helm/benchmark/annotation/aci_bench_annotator.py +95 -0
  27. helm/benchmark/annotation/air_bench_annotator.py +20 -5
  28. helm/benchmark/annotation/annotator.py +5 -0
  29. helm/benchmark/annotation/annotator_factory.py +3 -20
  30. helm/benchmark/annotation/autobencher_capabilities_annotator.py +107 -0
  31. helm/benchmark/annotation/autobencher_safety_annotator.py +98 -0
  32. helm/benchmark/annotation/bigcodebench_annotator.py +108 -0
  33. helm/benchmark/annotation/bird_sql_annotator.py +58 -0
  34. helm/benchmark/annotation/chw_care_plan_annotator.py +98 -0
  35. helm/benchmark/annotation/czech_bank_qa_annotator.py +78 -0
  36. helm/benchmark/annotation/dischargeme_annotator.py +107 -0
  37. helm/benchmark/annotation/ehr_sql_annotator.py +87 -0
  38. helm/benchmark/annotation/helpdesk_call_summarization_annotator.py +131 -0
  39. helm/benchmark/annotation/image2struct/image_compiler_annotator.py +6 -1
  40. helm/benchmark/annotation/live_qa_annotator.py +1 -1
  41. helm/benchmark/annotation/med_dialog_annotator.py +99 -0
  42. helm/benchmark/annotation/medalign_annotator.py +100 -0
  43. helm/benchmark/annotation/medi_qa_annotator.py +98 -0
  44. helm/benchmark/annotation/medication_qa_annotator.py +87 -63
  45. helm/benchmark/annotation/mental_health_annotator.py +98 -0
  46. helm/benchmark/annotation/mimic_rrs_annotator.py +100 -0
  47. helm/benchmark/annotation/model_as_judge.py +218 -6
  48. helm/benchmark/annotation/mtsamples_procedures_annotator.py +98 -0
  49. helm/benchmark/annotation/mtsamples_replicate_annotator.py +101 -0
  50. helm/benchmark/annotation/omni_math/gpt_evaluation_template.txt +152 -0
  51. helm/benchmark/annotation/omni_math/gpt_evaluation_zero_shot_template.txt +36 -0
  52. helm/benchmark/annotation/omni_math_annotator.py +132 -0
  53. helm/benchmark/annotation/spider_annotator.py +18 -0
  54. helm/benchmark/annotation/starr_patient_instructions_annotator.py +98 -0
  55. helm/benchmark/annotation/wildbench/eval_template.pairwise.v2.md +75 -0
  56. helm/benchmark/annotation/wildbench/eval_template.score.v2.md +66 -0
  57. helm/benchmark/annotation/wildbench_annotator.py +119 -0
  58. helm/benchmark/annotation_executor.py +35 -15
  59. helm/benchmark/augmentations/cleva_perturbation.py +9 -8
  60. helm/benchmark/augmentations/contraction_expansion_perturbation.py +2 -2
  61. helm/benchmark/augmentations/contrast_sets_perturbation.py +2 -2
  62. helm/benchmark/augmentations/dialect_perturbation.py +4 -5
  63. helm/benchmark/augmentations/extra_space_perturbation.py +2 -2
  64. helm/benchmark/augmentations/filler_words_perturbation.py +2 -2
  65. helm/benchmark/augmentations/gender_perturbation.py +2 -2
  66. helm/benchmark/augmentations/lowercase_perturbation.py +2 -2
  67. helm/benchmark/augmentations/mild_mix_perturbation.py +6 -6
  68. helm/benchmark/augmentations/misspelling_perturbation.py +2 -2
  69. helm/benchmark/augmentations/person_name_perturbation.py +4 -5
  70. helm/benchmark/augmentations/perturbation.py +1 -1
  71. helm/benchmark/augmentations/space_perturbation.py +2 -2
  72. helm/benchmark/augmentations/suffix_perturbation.py +2 -2
  73. helm/benchmark/augmentations/synonym_perturbation.py +4 -3
  74. helm/benchmark/augmentations/test_perturbation.py +16 -13
  75. helm/benchmark/augmentations/translate_perturbation.py +2 -2
  76. helm/benchmark/augmentations/typos_perturbation.py +2 -2
  77. helm/benchmark/data_preprocessor.py +2 -2
  78. helm/benchmark/huggingface_registration.py +2 -7
  79. helm/benchmark/metrics/aci_bench_metrics.py +34 -0
  80. helm/benchmark/metrics/basic_metrics.py +6 -6
  81. helm/benchmark/metrics/bbq_metrics.py +2 -2
  82. helm/benchmark/metrics/bias_metrics.py +12 -3
  83. helm/benchmark/metrics/bigcodebench_metrics.py +25 -0
  84. helm/benchmark/metrics/bird_sql_metrics.py +28 -0
  85. helm/benchmark/metrics/chw_care_plan_metrics.py +34 -0
  86. helm/benchmark/metrics/classification_metrics.py +76 -12
  87. helm/benchmark/metrics/cleva_harms_metrics.py +8 -7
  88. helm/benchmark/metrics/code_metrics.py +5 -5
  89. helm/benchmark/metrics/comet_metric.py +125 -0
  90. helm/benchmark/metrics/common_metric_specs.py +9 -2
  91. helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +72 -0
  92. helm/benchmark/metrics/copyright_metrics.py +4 -4
  93. helm/benchmark/metrics/czech_bank_qa_metrics.py +29 -0
  94. helm/benchmark/metrics/decodingtrust_fairness_metrics.py +2 -2
  95. helm/benchmark/metrics/decodingtrust_privacy_metrics.py +2 -2
  96. helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +2 -2
  97. helm/benchmark/metrics/dischargeme_metrics.py +34 -0
  98. helm/benchmark/metrics/disinformation_metrics.py +4 -4
  99. helm/benchmark/metrics/dry_run_metrics.py +5 -5
  100. helm/benchmark/metrics/efficiency_metrics.py +3 -3
  101. helm/benchmark/metrics/ehr_sql_metrics.py +103 -0
  102. helm/benchmark/metrics/evaluate_instances_metric.py +3 -3
  103. helm/benchmark/metrics/evaluate_reference_metrics.py +144 -16
  104. helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +103 -0
  105. helm/benchmark/metrics/gpt4_audio_critique_metrics.py +167 -0
  106. helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +36 -0
  107. helm/benchmark/metrics/ifeval/__init__.py +0 -0
  108. helm/benchmark/metrics/ifeval/instructions.py +1574 -0
  109. helm/benchmark/metrics/ifeval/instructions_registry.py +182 -0
  110. helm/benchmark/metrics/ifeval/instructions_registry.pyi +3 -0
  111. helm/benchmark/metrics/ifeval/instructions_util.py +153 -0
  112. helm/benchmark/metrics/ifeval_metrics.py +55 -0
  113. helm/benchmark/metrics/image_generation/aesthetics_metrics.py +1 -1
  114. helm/benchmark/metrics/image_generation/detection_metrics.py +1 -1
  115. helm/benchmark/metrics/image_generation/detectors/vitdet.py +1 -1
  116. helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +1 -1
  117. helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +1 -1
  118. helm/benchmark/metrics/image_generation/nsfw_metrics.py +1 -1
  119. helm/benchmark/metrics/image_generation/q16/test_q16.py +3 -1
  120. helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +1 -1
  121. helm/benchmark/metrics/image_generation/skin_tone_metrics.py +2 -2
  122. helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +1 -1
  123. helm/benchmark/metrics/image_generation/watermark_metrics.py +1 -1
  124. helm/benchmark/metrics/instruction_following_critique_metrics.py +4 -4
  125. helm/benchmark/metrics/language_modeling_metrics.py +4 -4
  126. helm/benchmark/metrics/machine_translation_metrics.py +2 -2
  127. helm/benchmark/metrics/med_dialog_metrics.py +34 -0
  128. helm/benchmark/metrics/medalign_metrics.py +34 -0
  129. helm/benchmark/metrics/medcalc_bench_metrics.py +124 -0
  130. helm/benchmark/metrics/medec_metrics.py +101 -0
  131. helm/benchmark/metrics/medi_qa_metrics.py +34 -0
  132. helm/benchmark/metrics/medication_qa_metrics.py +15 -4
  133. helm/benchmark/metrics/mental_health_metrics.py +34 -0
  134. helm/benchmark/metrics/metric.py +3 -3
  135. helm/benchmark/metrics/mimic_rrs_metrics.py +34 -0
  136. helm/benchmark/metrics/mimiciv_billing_code_metrics.py +96 -0
  137. helm/benchmark/metrics/mtsamples_procedures_metrics.py +34 -0
  138. helm/benchmark/metrics/mtsamples_replicate_metrics.py +34 -0
  139. helm/benchmark/metrics/nltk_helper.py +32 -0
  140. helm/benchmark/metrics/numeracy_metrics.py +4 -4
  141. helm/benchmark/metrics/omni_math_metrics.py +32 -0
  142. helm/benchmark/metrics/output_processing_metric.py +60 -0
  143. helm/benchmark/metrics/output_processors.py +15 -0
  144. helm/benchmark/metrics/paraphrase_generation_metrics.py +2 -2
  145. helm/benchmark/metrics/ranking_metrics.py +3 -3
  146. helm/benchmark/metrics/reference_metric.py +3 -3
  147. helm/benchmark/metrics/{bhasa_metrics.py → seahelm_metrics.py} +3 -3
  148. helm/benchmark/metrics/seahelm_metrics_specs.py +10 -0
  149. helm/benchmark/metrics/spider_metrics.py +7 -0
  150. helm/benchmark/metrics/starr_patient_instructions_metrics.py +34 -0
  151. helm/benchmark/metrics/statistic.py +1 -1
  152. helm/benchmark/metrics/summac/model_summac.py +1 -1
  153. helm/benchmark/metrics/summarization_critique_metrics.py +4 -4
  154. helm/benchmark/metrics/summarization_metrics.py +19 -9
  155. helm/benchmark/metrics/test_bias_metrics.py +5 -1
  156. helm/benchmark/metrics/test_classification_metrics.py +140 -68
  157. helm/benchmark/metrics/test_evaluate_reference_metrics.py +15 -0
  158. helm/benchmark/metrics/test_metric.py +1 -1
  159. helm/benchmark/metrics/test_statistic.py +2 -2
  160. helm/benchmark/metrics/tokens/ai21_token_cost_estimator.py +1 -1
  161. helm/benchmark/metrics/tokens/auto_token_cost_estimator.py +6 -6
  162. helm/benchmark/metrics/tokens/cohere_token_cost_estimator.py +1 -1
  163. helm/benchmark/metrics/tokens/free_token_cost_estimator.py +1 -1
  164. helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +1 -1
  165. helm/benchmark/metrics/tokens/openai_token_cost_estimator.py +1 -1
  166. helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +1 -1
  167. helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +1 -1
  168. helm/benchmark/metrics/toxicity_metrics.py +4 -4
  169. helm/benchmark/metrics/unitxt_metrics.py +4 -1
  170. helm/benchmark/metrics/vision_language/image_metrics.py +1 -1
  171. helm/benchmark/metrics/wildbench_metrics.py +34 -0
  172. helm/benchmark/model_metadata_registry.py +16 -0
  173. helm/benchmark/presentation/summarize.py +23 -10
  174. helm/benchmark/presentation/torr_robustness_summarizer.py +178 -0
  175. helm/benchmark/reeval_run.py +203 -0
  176. helm/benchmark/reeval_runner.py +355 -0
  177. helm/benchmark/run.py +8 -17
  178. helm/benchmark/run_expander.py +78 -8
  179. helm/benchmark/run_spec_factory.py +12 -0
  180. helm/benchmark/run_specs/air_bench_run_specs.py +21 -3
  181. helm/benchmark/run_specs/audio_run_specs.py +613 -0
  182. helm/benchmark/run_specs/call_center_run_specs.py +49 -0
  183. helm/benchmark/run_specs/capabilities_run_specs.py +308 -0
  184. helm/benchmark/run_specs/classic_run_specs.py +1 -69
  185. helm/benchmark/run_specs/enem_challenge_specs.py +31 -0
  186. helm/benchmark/run_specs/enterprise_run_specs.py +260 -0
  187. helm/benchmark/run_specs/experimental_run_specs.py +112 -3
  188. helm/benchmark/run_specs/imdb_ptbr_run_specs.py +30 -0
  189. helm/benchmark/run_specs/lite_run_specs.py +2 -2
  190. helm/benchmark/run_specs/long_context_run_specs.py +89 -0
  191. helm/benchmark/run_specs/medhelm_run_specs.py +1155 -0
  192. helm/benchmark/run_specs/mmlu_clinical_afr_run_specs.py +49 -0
  193. helm/benchmark/run_specs/oab_exams_specs.py +32 -0
  194. helm/benchmark/run_specs/safety_run_specs.py +37 -0
  195. helm/benchmark/run_specs/{bhasa_run_specs.py → seahelm_run_specs.py} +44 -44
  196. helm/benchmark/run_specs/sql_run_specs.py +54 -0
  197. helm/benchmark/run_specs/tweetsentbr_run_specs.py +32 -0
  198. helm/benchmark/run_specs/unitxt_run_specs.py +14 -5
  199. helm/benchmark/run_specs/vlm_run_specs.py +75 -2
  200. helm/benchmark/run_specs/winogrande_afr_run_specs.py +47 -0
  201. helm/benchmark/scenarios/aci_bench_scenario.py +120 -0
  202. helm/benchmark/scenarios/air_bench_scenario.py +6 -1
  203. helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +5 -3
  204. helm/benchmark/scenarios/anthropic_red_team_scenario.py +1 -1
  205. helm/benchmark/scenarios/audio_language/__init__.py +0 -0
  206. helm/benchmark/scenarios/audio_language/air_bench_chat_scenario.py +128 -0
  207. helm/benchmark/scenarios/audio_language/air_bench_foundation_scenario.py +154 -0
  208. helm/benchmark/scenarios/audio_language/ami_scenario.py +96 -0
  209. helm/benchmark/scenarios/audio_language/audio_mnist_scenario.py +62 -0
  210. helm/benchmark/scenarios/audio_language/audio_pairs_scenario.py +62 -0
  211. helm/benchmark/scenarios/audio_language/audiocaps_scenario.py +59 -0
  212. helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +152 -0
  213. helm/benchmark/scenarios/audio_language/common_voice_15_scenario.py +99 -0
  214. helm/benchmark/scenarios/audio_language/covost2_scenario.py +163 -0
  215. helm/benchmark/scenarios/audio_language/fleurs_fairness_scenario.py +83 -0
  216. helm/benchmark/scenarios/audio_language/fleurs_scenario.py +312 -0
  217. helm/benchmark/scenarios/audio_language/iemocap_audio_scenario.py +83 -0
  218. helm/benchmark/scenarios/audio_language/librispeech_fairness_scenario.py +96 -0
  219. helm/benchmark/scenarios/audio_language/librispeech_scenario.py +80 -0
  220. helm/benchmark/scenarios/audio_language/meld_audio_scenario.py +113 -0
  221. helm/benchmark/scenarios/audio_language/multilingual_librispeech_scenario.py +80 -0
  222. helm/benchmark/scenarios/audio_language/mustard_scenario.py +142 -0
  223. helm/benchmark/scenarios/audio_language/mutox_scenario.py +254 -0
  224. helm/benchmark/scenarios/audio_language/parade_scenario.py +97 -0
  225. helm/benchmark/scenarios/audio_language/speech_robust_bench_scenario.py +124 -0
  226. helm/benchmark/scenarios/audio_language/vocal_sound_scenario.py +69 -0
  227. helm/benchmark/scenarios/audio_language/voice_jailbreak_attacks_scenario.py +87 -0
  228. helm/benchmark/scenarios/audio_language/voxceleb2_scenario.py +106 -0
  229. helm/benchmark/scenarios/autobencher_capabilities_scenario.py +68 -0
  230. helm/benchmark/scenarios/autobencher_safety_scenario.py +51 -0
  231. helm/benchmark/scenarios/babi_qa_scenario.py +1 -1
  232. helm/benchmark/scenarios/banking77_scenario.py +6 -1
  233. helm/benchmark/scenarios/bbq_scenario.py +1 -1
  234. helm/benchmark/scenarios/big_bench_scenario.py +11 -1
  235. helm/benchmark/scenarios/bigcodebench_scenario.py +58 -0
  236. helm/benchmark/scenarios/bird_sql_scenario.py +94 -0
  237. helm/benchmark/scenarios/bird_sql_scenario_helper.py +118 -0
  238. helm/benchmark/scenarios/blimp_scenario.py +1 -1
  239. helm/benchmark/scenarios/bold_scenario.py +1 -1
  240. helm/benchmark/scenarios/boolq_scenario.py +1 -1
  241. helm/benchmark/scenarios/casehold_scenario.py +79 -0
  242. helm/benchmark/scenarios/chw_care_plan_scenario.py +105 -0
  243. helm/benchmark/scenarios/civil_comments_scenario.py +1 -1
  244. helm/benchmark/scenarios/clear_scenario.py +153 -0
  245. helm/benchmark/scenarios/cleva_scenario.py +2 -2
  246. helm/benchmark/scenarios/code_scenario.py +17 -4
  247. helm/benchmark/scenarios/commonsense_scenario.py +1 -1
  248. helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +97 -0
  249. helm/benchmark/scenarios/copyright_scenario.py +1 -1
  250. helm/benchmark/scenarios/covid_dialog_scenario.py +10 -1
  251. helm/benchmark/scenarios/cti_to_mitre_scenario.py +240 -0
  252. helm/benchmark/scenarios/custom_mcqa_scenario.py +1 -1
  253. helm/benchmark/scenarios/czech_bank_qa_scenario.py +130 -0
  254. helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +1 -1
  255. helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +1 -1
  256. helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +1 -1
  257. helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +1 -1
  258. helm/benchmark/scenarios/dialogue_scenarios.py +13 -2
  259. helm/benchmark/scenarios/dischargeme_scenario.py +157 -0
  260. helm/benchmark/scenarios/disinformation_scenario.py +10 -1
  261. helm/benchmark/scenarios/dyck_language_scenario.py +10 -1
  262. helm/benchmark/scenarios/echr_judgment_classification_scenario.py +113 -0
  263. helm/benchmark/scenarios/ehr_sql_scenario.py +131 -0
  264. helm/benchmark/scenarios/ehrshot_scenario.py +1546 -0
  265. helm/benchmark/scenarios/enem_challenge_scenario.py +58 -0
  266. helm/benchmark/scenarios/entity_data_imputation_scenario.py +11 -1
  267. helm/benchmark/scenarios/entity_matching_scenario.py +12 -2
  268. helm/benchmark/scenarios/financial_phrasebank_scenario.py +94 -0
  269. helm/benchmark/scenarios/gold_commodity_news_scenario.py +124 -0
  270. helm/benchmark/scenarios/gpqa_scenario.py +80 -0
  271. helm/benchmark/scenarios/grammar_scenario.py +2 -2
  272. helm/benchmark/scenarios/gsm_scenario.py +10 -1
  273. helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +50 -0
  274. helm/benchmark/scenarios/harm_bench_scenario.py +1 -1
  275. helm/benchmark/scenarios/headqa_scenario.py +131 -0
  276. helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +37 -0
  277. helm/benchmark/scenarios/ice_scenario.py +8 -4
  278. helm/benchmark/scenarios/ifeval_scenario.py +53 -0
  279. helm/benchmark/scenarios/imdb_ptbr_scenario.py +60 -0
  280. helm/benchmark/scenarios/imdb_scenario.py +11 -2
  281. helm/benchmark/scenarios/infinite_bench_sum_scenario.py +82 -0
  282. helm/benchmark/scenarios/interactive_qa_mmlu_scenario.py +2 -2
  283. helm/benchmark/scenarios/koala_scenario.py +1 -1
  284. helm/benchmark/scenarios/legal_contract_summarization_scenario.py +129 -0
  285. helm/benchmark/scenarios/legal_opinion_sentiment_classification_scenario.py +77 -0
  286. helm/benchmark/scenarios/legal_summarization_scenario.py +11 -1
  287. helm/benchmark/scenarios/legal_support_scenario.py +11 -1
  288. helm/benchmark/scenarios/legalbench_scenario.py +22 -3
  289. helm/benchmark/scenarios/lex_glue_scenario.py +12 -2
  290. helm/benchmark/scenarios/lextreme_scenario.py +11 -1
  291. helm/benchmark/scenarios/live_qa_scenario.py +1 -1
  292. helm/benchmark/scenarios/lm_entry_scenario.py +1 -1
  293. helm/benchmark/scenarios/lsat_qa_scenario.py +1 -1
  294. helm/benchmark/scenarios/math_scenario.py +9 -1
  295. helm/benchmark/scenarios/me_q_sum_scenario.py +10 -1
  296. helm/benchmark/scenarios/med_dialog_scenario.py +22 -24
  297. helm/benchmark/scenarios/med_mcqa_scenario.py +10 -1
  298. helm/benchmark/scenarios/med_paragraph_simplification_scenario.py +10 -1
  299. helm/benchmark/scenarios/med_qa_scenario.py +10 -1
  300. helm/benchmark/scenarios/medalign_scenario.py +88 -0
  301. helm/benchmark/scenarios/medalign_scenario_helper.py +429 -0
  302. helm/benchmark/scenarios/medbullets_scenario.py +140 -0
  303. helm/benchmark/scenarios/medcalc_bench_scenario.py +125 -0
  304. helm/benchmark/scenarios/medec_scenario.py +120 -0
  305. helm/benchmark/scenarios/medhallu_scenario.py +66 -0
  306. helm/benchmark/scenarios/medi_qa_scenario.py +105 -0
  307. helm/benchmark/scenarios/medication_qa_scenario.py +2 -2
  308. helm/benchmark/scenarios/mental_health_scenario.py +112 -0
  309. helm/benchmark/scenarios/mimic_bhc_scenario.py +98 -0
  310. helm/benchmark/scenarios/mimic_rrs_scenario.py +89 -0
  311. helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +71 -0
  312. helm/benchmark/scenarios/mmlu_clinical_afr_scenario.py +74 -0
  313. helm/benchmark/scenarios/mmlu_pro_scenario.py +95 -0
  314. helm/benchmark/scenarios/mmlu_scenario.py +11 -1
  315. helm/benchmark/scenarios/msmarco_scenario.py +1 -1
  316. helm/benchmark/scenarios/mtsamples_procedures_scenario.py +141 -0
  317. helm/benchmark/scenarios/mtsamples_replicate_scenario.py +141 -0
  318. helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +271 -0
  319. helm/benchmark/scenarios/narrativeqa_scenario.py +1 -1
  320. helm/benchmark/scenarios/natural_qa_scenario.py +1 -1
  321. helm/benchmark/scenarios/newsqa_scenario.py +1 -1
  322. helm/benchmark/scenarios/numeracy_scenario.py +10 -1
  323. helm/benchmark/scenarios/oab_exams_scenario.py +57 -0
  324. helm/benchmark/scenarios/omni_math_scenario.py +53 -0
  325. helm/benchmark/scenarios/open_assistant_scenario.py +11 -2
  326. helm/benchmark/scenarios/opinions_qa_scenario.py +1 -1
  327. helm/benchmark/scenarios/pubmed_qa_scenario.py +54 -43
  328. helm/benchmark/scenarios/quac_scenario.py +10 -1
  329. helm/benchmark/scenarios/race_based_med_scenario.py +142 -0
  330. helm/benchmark/scenarios/raft_scenario.py +17 -2
  331. helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +1 -1
  332. helm/benchmark/scenarios/ruler_qa_scenario_helper.py +171 -0
  333. helm/benchmark/scenarios/ruler_qa_scenarios.py +88 -0
  334. helm/benchmark/scenarios/scenario.py +9 -1
  335. helm/benchmark/scenarios/{bhasa_scenario.py → seahelm_scenario.py} +7 -2
  336. helm/benchmark/scenarios/self_instruct_scenario.py +1 -1
  337. helm/benchmark/scenarios/shc_bmt_scenario.py +69 -0
  338. helm/benchmark/scenarios/shc_cdi_scenario.py +70 -0
  339. helm/benchmark/scenarios/shc_conf_scenario.py +70 -0
  340. helm/benchmark/scenarios/shc_ent_scenario.py +72 -0
  341. helm/benchmark/scenarios/shc_gip_scenario.py +66 -0
  342. helm/benchmark/scenarios/shc_ptbm_scenario.py +76 -0
  343. helm/benchmark/scenarios/shc_sei_scenario.py +89 -0
  344. helm/benchmark/scenarios/shc_sequoia_scenario.py +69 -0
  345. helm/benchmark/scenarios/simple_safety_tests_scenario.py +1 -1
  346. helm/benchmark/scenarios/spider_scenario.py +91 -0
  347. helm/benchmark/scenarios/starr_patient_instructions_scenario.py +90 -0
  348. helm/benchmark/scenarios/summarization_scenario.py +11 -1
  349. helm/benchmark/scenarios/sumosum_scenario.py +157 -0
  350. helm/benchmark/scenarios/synthetic_efficiency_scenario.py +1 -1
  351. helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +11 -1
  352. helm/benchmark/scenarios/synthetic_reasoning_scenario.py +11 -1
  353. helm/benchmark/scenarios/test_bigcodebench_scenario.py +26 -0
  354. helm/benchmark/scenarios/test_czech_bank_qa_scenario.py +18 -0
  355. helm/benchmark/scenarios/test_enem_challenge_scenario.py +53 -0
  356. helm/benchmark/scenarios/test_ewok_scenario.py +6 -2
  357. helm/benchmark/scenarios/test_gold_commodity_news_scenario.py +18 -0
  358. helm/benchmark/scenarios/test_gpqa_scenario.py +44 -0
  359. helm/benchmark/scenarios/test_ifeval_scenario.py +36 -0
  360. helm/benchmark/scenarios/test_imdb_ptbr_scenario.py +27 -0
  361. helm/benchmark/scenarios/test_infinite_bench_sum_scenario.py +46 -0
  362. helm/benchmark/scenarios/test_math_scenario.py +1 -0
  363. helm/benchmark/scenarios/test_mmlu_clinical_afr_scenario.py +21 -0
  364. helm/benchmark/scenarios/test_mmlu_pro_scenario.py +53 -0
  365. helm/benchmark/scenarios/test_oab_exams_scenario.py +51 -0
  366. helm/benchmark/scenarios/test_omni_math_scenario.py +27 -0
  367. helm/benchmark/scenarios/test_tweetsentbr_scenario.py +24 -0
  368. helm/benchmark/scenarios/test_wildbench_scenario.py +15 -0
  369. helm/benchmark/scenarios/test_winogrande_afr_scenario.py +19 -0
  370. helm/benchmark/scenarios/thai_exam_scenario.py +10 -1
  371. helm/benchmark/scenarios/the_pile_scenario.py +1 -1
  372. helm/benchmark/scenarios/truthful_qa_scenario.py +10 -1
  373. helm/benchmark/scenarios/tweetsentbr_scenario.py +66 -0
  374. helm/benchmark/scenarios/twitter_aae_scenario.py +1 -1
  375. helm/benchmark/scenarios/unitxt_scenario.py +8 -2
  376. helm/benchmark/scenarios/verifiability_judgment_scenario.py +1 -1
  377. helm/benchmark/scenarios/vicuna_scenario.py +1 -1
  378. helm/benchmark/scenarios/vision_language/blink_scenario.py +140 -0
  379. helm/benchmark/scenarios/vision_language/mm_star_scenario.py +95 -0
  380. helm/benchmark/scenarios/vision_language/vqa_rad_scenario.py +88 -0
  381. helm/benchmark/scenarios/wikifact_scenario.py +11 -1
  382. helm/benchmark/scenarios/wikitext_103_scenario.py +1 -1
  383. helm/benchmark/scenarios/wildbench_scenario.py +83 -0
  384. helm/benchmark/scenarios/winogrande_afr_scenario.py +78 -0
  385. helm/benchmark/scenarios/wmt_14_scenario.py +14 -2
  386. helm/benchmark/scenarios/xstest_scenario.py +1 -1
  387. helm/benchmark/server.py +11 -0
  388. helm/benchmark/slurm_runner.py +1 -1
  389. helm/benchmark/static/schema_audio.yaml +752 -0
  390. helm/benchmark/static/schema_autobencher.yaml +150 -0
  391. helm/benchmark/static/schema_call_center.yaml +97 -60
  392. helm/benchmark/static/schema_capabilities.yaml +254 -0
  393. helm/benchmark/static/schema_czech_bank.yaml +148 -0
  394. helm/benchmark/static/schema_enem_challenge.yaml +146 -0
  395. helm/benchmark/static/schema_enterprise.yaml +298 -0
  396. helm/benchmark/static/schema_finance.yaml +14 -12
  397. helm/benchmark/static/schema_heim.yaml +1389 -0
  398. helm/benchmark/static/{schema_medical.yaml → schema_long_context.yaml} +67 -82
  399. helm/benchmark/static/schema_medhelm.yaml +1081 -0
  400. helm/benchmark/static/schema_mmlu_winogrande_afr.yaml +1045 -0
  401. helm/benchmark/static/schema_safety.yaml +18 -1
  402. helm/benchmark/static/{schema_bhasa.yaml → schema_seahelm.yaml} +30 -16
  403. helm/benchmark/static/schema_social_audio.yaml +224 -0
  404. helm/benchmark/static/schema_sql.yaml +171 -0
  405. helm/benchmark/static/{schema_tables.yaml → schema_torr.yaml} +169 -36
  406. helm/benchmark/static/schema_tweetsentbr.yaml +146 -0
  407. helm/benchmark/static/schema_vhelm.yaml +109 -36
  408. helm/benchmark/static_build/assets/helm-safety-2907a7b6.png +0 -0
  409. helm/benchmark/static_build/assets/index-262903c1.js +10 -0
  410. helm/benchmark/static_build/assets/index-42060d71.css +1 -0
  411. helm/benchmark/static_build/assets/medhelm-overview-3ddfcd65.png +0 -0
  412. helm/benchmark/static_build/assets/{react-d4a0b69b.js → react-f82877fd.js} +1 -1
  413. helm/benchmark/static_build/assets/{recharts-6d337683.js → recharts-4037aff0.js} +1 -1
  414. helm/benchmark/static_build/assets/{tremor-54a99cc4.js → tremor-9cefc3c5.js} +1 -1
  415. helm/benchmark/static_build/config.js +1 -1
  416. helm/benchmark/static_build/index.html +5 -5
  417. helm/benchmark/window_services/default_window_service.py +1 -1
  418. helm/benchmark/window_services/encoder_decoder_window_service.py +1 -1
  419. helm/benchmark/window_services/ice_window_service.py +1 -1
  420. helm/benchmark/window_services/image_generation/lexica_search_window_service.py +1 -1
  421. helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +1 -1
  422. helm/benchmark/window_services/local_window_service.py +2 -2
  423. helm/benchmark/window_services/test_anthropic_window_service.py +3 -3
  424. helm/benchmark/window_services/test_bloom_window_service.py +3 -3
  425. helm/benchmark/window_services/test_gpt2_window_service.py +7 -2
  426. helm/benchmark/window_services/test_gpt4_window_service.py +8 -3
  427. helm/benchmark/window_services/test_gptj_window_service.py +8 -3
  428. helm/benchmark/window_services/test_gptneox_window_service.py +3 -3
  429. helm/benchmark/window_services/test_openai_window_service.py +8 -3
  430. helm/benchmark/window_services/test_opt_window_service.py +3 -3
  431. helm/benchmark/window_services/test_palmyra_window_service.py +3 -3
  432. helm/benchmark/window_services/test_t0pp_window_service.py +3 -3
  433. helm/benchmark/window_services/test_t511b_window_service.py +3 -3
  434. helm/benchmark/window_services/test_ul2_window_service.py +3 -3
  435. helm/benchmark/window_services/test_utils.py +1 -1
  436. helm/benchmark/window_services/test_yalm_window_service.py +3 -3
  437. helm/benchmark/window_services/yalm_window_service.py +1 -1
  438. helm/clients/ai21_client.py +3 -3
  439. helm/clients/aleph_alpha_client.py +1 -1
  440. helm/clients/audio_language/__init__.py +0 -0
  441. helm/clients/audio_language/diva_llama_client.py +118 -0
  442. helm/clients/audio_language/llama_omni_client.py +198 -0
  443. helm/clients/audio_language/qwen2_audiolm_client.py +188 -0
  444. helm/clients/audio_language/qwen_audiolm_client.py +150 -0
  445. helm/clients/auto_client.py +4 -2
  446. helm/clients/azure_openai_client.py +55 -0
  447. helm/clients/bedrock_client.py +201 -7
  448. helm/clients/bedrock_utils.py +33 -0
  449. helm/clients/clip_scorers/clip_scorer.py +1 -1
  450. helm/clients/clip_scorers/multilingual_clip_scorer.py +1 -1
  451. helm/clients/cohere_client.py +3 -3
  452. helm/clients/google_client.py +1 -1
  453. helm/clients/http_model_client.py +1 -1
  454. helm/clients/huggingface_client.py +10 -18
  455. helm/clients/ibm_client.py +267 -0
  456. helm/clients/image_generation/adobe_vision_client.py +1 -1
  457. helm/clients/image_generation/aleph_alpha_image_generation_client.py +1 -1
  458. helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +3 -3
  459. helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +5 -2
  460. helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +5 -2
  461. helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +2 -2
  462. helm/clients/image_generation/cogview2_client.py +1 -1
  463. helm/clients/image_generation/dalle2_client.py +1 -1
  464. helm/clients/image_generation/dalle3_client.py +2 -2
  465. helm/clients/image_generation/dalle_mini/__init__.py +1 -1
  466. helm/clients/image_generation/dalle_mini/data.py +1 -1
  467. helm/clients/image_generation/dalle_mini/model/__init__.py +5 -5
  468. helm/clients/image_generation/dalle_mini/model/configuration.py +1 -1
  469. helm/clients/image_generation/dalle_mini/model/modeling.py +2 -2
  470. helm/clients/image_generation/dalle_mini/model/processor.py +4 -4
  471. helm/clients/image_generation/dalle_mini/model/tokenizer.py +1 -1
  472. helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -1
  473. helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +2 -2
  474. helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +1 -1
  475. helm/clients/image_generation/dalle_mini_client.py +1 -1
  476. helm/clients/image_generation/deep_floyd_client.py +1 -1
  477. helm/clients/image_generation/huggingface_diffusers_client.py +1 -1
  478. helm/clients/image_generation/lexica_client.py +1 -1
  479. helm/clients/image_generation/mindalle/models/__init__.py +6 -6
  480. helm/clients/image_generation/mindalle/models/stage1/vqgan.py +1 -1
  481. helm/clients/image_generation/mindalle/models/stage2/transformer.py +1 -1
  482. helm/clients/image_generation/mindalle/utils/__init__.py +3 -3
  483. helm/clients/image_generation/mindalle_client.py +1 -1
  484. helm/clients/image_generation/together_image_generation_client.py +1 -1
  485. helm/clients/lit_gpt_client.py +2 -2
  486. helm/clients/mistral_client.py +62 -18
  487. helm/clients/nvidia_nim_client.py +0 -3
  488. helm/clients/openai_client.py +241 -22
  489. helm/clients/palmyra_client.py +1 -4
  490. helm/clients/reka_client.py +1 -1
  491. helm/clients/stanfordhealthcare_azure_openai_client.py +58 -0
  492. helm/clients/stanfordhealthcare_claude_client.py +31 -0
  493. helm/clients/stanfordhealthcare_google_client.py +43 -0
  494. helm/clients/stanfordhealthcare_http_model_client.py +93 -0
  495. helm/clients/stanfordhealthcare_openai_client.py +62 -0
  496. helm/clients/stanfordhealthcare_shc_openai_client.py +42 -0
  497. helm/clients/test_client.py +1 -1
  498. helm/clients/test_together_client.py +6 -1
  499. helm/clients/together_client.py +47 -7
  500. helm/clients/upstage_client.py +23 -0
  501. helm/clients/vertexai_client.py +39 -13
  502. helm/clients/vision_language/open_flamingo/__init__.py +2 -2
  503. helm/clients/vision_language/open_flamingo/src/factory.py +3 -3
  504. helm/clients/vision_language/open_flamingo/src/flamingo.py +2 -2
  505. helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +2 -2
  506. helm/clients/vision_language/qwen2_vlm_client.py +175 -0
  507. helm/clients/vllm_client.py +4 -6
  508. helm/clients/yi_client.py +0 -3
  509. helm/common/audio_utils.py +111 -0
  510. helm/common/file_caches/local_file_cache.py +1 -1
  511. helm/common/file_caches/test_local_file_cache.py +1 -1
  512. helm/common/images_utils.py +2 -2
  513. helm/common/media_object.py +2 -2
  514. helm/common/multimodal_request_utils.py +26 -0
  515. helm/common/reeval_parameters.py +12 -0
  516. helm/common/request.py +6 -2
  517. helm/common/response_format.py +18 -0
  518. helm/common/test_media_object.py +1 -1
  519. helm/config/model_deployments.yaml +1112 -19
  520. helm/config/model_metadata.yaml +985 -44
  521. helm/config/tokenizer_configs.yaml +379 -3
  522. helm/proxy/cli.py +2 -2
  523. helm/proxy/example_queries.py +1 -1
  524. helm/proxy/server.py +11 -4
  525. helm/proxy/services/remote_service.py +1 -1
  526. helm/proxy/services/server_service.py +1 -1
  527. helm/proxy/services/test_remote_service.py +2 -2
  528. helm/proxy/services/test_service.py +1 -1
  529. helm/proxy/static/general.js +122 -0
  530. helm/proxy/static/help.html +99 -0
  531. helm/proxy/static/index.css +57 -0
  532. helm/proxy/static/index.html +40 -0
  533. helm/proxy/static/index.js +456 -0
  534. helm/proxy/static/info-icon.png +0 -0
  535. helm/proxy/test_retry.py +1 -1
  536. helm/proxy/token_counters/auto_token_counter.py +1 -1
  537. helm/tokenizers/aleph_alpha_tokenizer.py +1 -1
  538. helm/tokenizers/caching_tokenizer.py +2 -30
  539. helm/tokenizers/http_model_tokenizer.py +1 -1
  540. helm/tokenizers/huggingface_tokenizer.py +2 -2
  541. helm/tokenizers/lit_gpt_tokenizer.py +1 -1
  542. helm/tokenizers/test_anthropic_tokenizer.py +6 -2
  543. helm/tokenizers/test_huggingface_tokenizer.py +1 -1
  544. helm/tokenizers/test_yalm_tokenizer.py +1 -1
  545. helm/tokenizers/tiktoken_tokenizer.py +1 -1
  546. helm/tokenizers/tokenizer.py +3 -1
  547. helm/tokenizers/yalm_tokenizer.py +3 -3
  548. helm/tokenizers/yalm_tokenizer_data/test_yalm_tokenizer.py +1 -1
  549. crfm_helm-0.5.4.dist-info/METADATA +0 -350
  550. crfm_helm-0.5.4.dist-info/RECORD +0 -697
  551. helm/benchmark/metrics/bhasa_metrics_specs.py +0 -10
  552. helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
  553. helm/benchmark/static_build/assets/accenture-6f97eeda.png +0 -0
  554. helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
  555. helm/benchmark/static_build/assets/aisingapore-6dfc9acf.png +0 -0
  556. helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
  557. helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
  558. helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
  559. helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
  560. helm/benchmark/static_build/assets/cresta-9e22b983.png +0 -0
  561. helm/benchmark/static_build/assets/cuhk-8c5631e9.png +0 -0
  562. helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
  563. helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
  564. helm/benchmark/static_build/assets/index-05c76bb1.css +0 -1
  565. helm/benchmark/static_build/assets/index-3ee38b3d.js +0 -10
  566. helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
  567. helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
  568. helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
  569. helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
  570. helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
  571. helm/benchmark/static_build/assets/scb10x-204bd786.png +0 -0
  572. helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
  573. helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
  574. helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
  575. helm/benchmark/static_build/assets/wellsfargo-a86a6c4a.png +0 -0
  576. helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
  577. helm/tokenizers/anthropic_tokenizer.py +0 -52
  578. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info}/entry_points.txt +0 -0
  579. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info/licenses}/LICENSE +0 -0
  580. {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.5.dist-info}/top_level.txt +0 -0
@@ -4,8 +4,8 @@ from random import Random
4
4
  from typing import List, Optional
5
5
 
6
6
  from helm.benchmark.scenarios.scenario import Instance, Reference, Input
7
- from .perturbation_description import PerturbationDescription
8
- from .perturbation import Perturbation
7
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
8
+ from helm.benchmark.augmentations.perturbation import Perturbation
9
9
 
10
10
 
11
11
  class ContrastSetsPerturbation(Perturbation):
@@ -7,8 +7,9 @@ from pathlib import Path
7
7
  from typing import Dict, Optional, List
8
8
 
9
9
  from helm.common.general import match_case, ensure_file_downloaded
10
- from .perturbation_description import PerturbationDescription
11
- from .perturbation import TextPerturbation
10
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
11
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
12
+ from helm.benchmark.runner import get_benchmark_output_path
12
13
 
13
14
 
14
15
  class DialectPerturbation(TextPerturbation):
@@ -20,7 +21,7 @@ class DialectPerturbation(TextPerturbation):
20
21
  should_perturb_references: bool = True
21
22
 
22
23
  """ Output path to store external files and folders """
23
- OUTPUT_PATH = os.path.join("benchmark_output", "perturbations", name)
24
+ OUTPUT_PATH = os.path.join(get_benchmark_output_path(), "perturbations", name)
24
25
 
25
26
  """ Dictionary mapping dialects to one another """
26
27
  SAE = "SAE"
@@ -74,8 +75,6 @@ class DialectPerturbation(TextPerturbation):
74
75
  self.MAPPING_DICTS for the provided source and target classes
75
76
  will be used, if available.
76
77
  """
77
- # TODO: Update path so it is not hard-coded to benchmark_output
78
- # https://github.com/stanford-crfm/benchmarking/issues/493
79
78
  self.output_path: str = self.OUTPUT_PATH
80
79
  Path(self.output_path).mkdir(parents=True, exist_ok=True)
81
80
 
@@ -1,8 +1,8 @@
1
1
  from dataclasses import dataclass
2
2
  from random import Random
3
3
 
4
- from .perturbation import TextPerturbation
5
- from .perturbation_description import PerturbationDescription
4
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
5
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
6
6
 
7
7
 
8
8
  class ExtraSpacePerturbation(TextPerturbation):
@@ -1,7 +1,7 @@
1
1
  from dataclasses import dataclass
2
2
 
3
- from .perturbation import TextPerturbation
4
- from .perturbation_description import PerturbationDescription
3
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
4
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
5
5
 
6
6
  from random import Random
7
7
 
@@ -5,8 +5,8 @@ import re
5
5
  from typing import Dict, List, Optional, Tuple
6
6
 
7
7
  from helm.common.general import match_case
8
- from .perturbation_description import PerturbationDescription
9
- from .perturbation import TextPerturbation
8
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
9
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
10
10
 
11
11
 
12
12
  """ Gender term mappings """
@@ -1,7 +1,7 @@
1
1
  from random import Random
2
2
 
3
- from .perturbation import TextPerturbation
4
- from .perturbation_description import PerturbationDescription
3
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
4
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
5
5
 
6
6
 
7
7
  class LowerCasePerturbation(TextPerturbation):
@@ -1,11 +1,11 @@
1
1
  from random import Random
2
2
 
3
- from .perturbation_description import PerturbationDescription
4
- from .perturbation import TextPerturbation
5
- from .lowercase_perturbation import LowerCasePerturbation
6
- from .contraction_expansion_perturbation import ContractionPerturbation
7
- from .space_perturbation import SpacePerturbation
8
- from .misspelling_perturbation import MisspellingPerturbation
3
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
4
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
5
+ from helm.benchmark.augmentations.lowercase_perturbation import LowerCasePerturbation
6
+ from helm.benchmark.augmentations.contraction_expansion_perturbation import ContractionPerturbation
7
+ from helm.benchmark.augmentations.space_perturbation import SpacePerturbation
8
+ from helm.benchmark.augmentations.misspelling_perturbation import MisspellingPerturbation
9
9
 
10
10
 
11
11
  class MildMixPerturbation(TextPerturbation):
@@ -6,8 +6,8 @@ from random import Random
6
6
  from typing import Dict, List
7
7
 
8
8
  from helm.common.general import match_case
9
- from .perturbation import TextPerturbation
10
- from .perturbation_description import PerturbationDescription
9
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
10
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
11
11
 
12
12
 
13
13
  # The implementation below is based on the following list of common misspellings:
@@ -9,8 +9,9 @@ from typing import Dict, List, Optional, Set
9
9
 
10
10
  from helm.benchmark.scenarios.scenario import Input, Instance, Reference, Output
11
11
  from helm.common.general import ensure_file_downloaded, ensure_directory_exists, match_case
12
- from .perturbation_description import PerturbationDescription
13
- from .perturbation import Perturbation
12
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
13
+ from helm.benchmark.augmentations.perturbation import Perturbation
14
+ from helm.benchmark.runner import get_benchmark_output_path
14
15
 
15
16
 
16
17
  # Pull this out so serialization works for multiprocessing
@@ -35,7 +36,7 @@ class PersonNamePerturbation(Perturbation):
35
36
  "https://storage.googleapis.com/crfm-helm-public/source_datasets/"
36
37
  "augmentations/person_name_perturbation/person_names.txt"
37
38
  )
38
- OUTPUT_PATH = os.path.join("benchmark_output", "perturbations", name)
39
+ OUTPUT_PATH = os.path.join(get_benchmark_output_path(), "perturbations", name)
39
40
 
40
41
  """ Name types """
41
42
  FIRST_NAME = "first_name"
@@ -153,8 +154,6 @@ class PersonNamePerturbation(Perturbation):
153
154
  find the gender association for a source_word, we randomly
154
155
  pick from one of the target names.
155
156
  """
156
- # TODO: Update path so it is not hard-coded to benchmark_output
157
- # https://github.com/stanford-crfm/benchmarking/issues/493
158
157
  self.output_path: str = self.OUTPUT_PATH
159
158
  Path(self.output_path).mkdir(parents=True, exist_ok=True)
160
159
 
@@ -4,7 +4,7 @@ from random import Random
4
4
  from typing import List, Optional
5
5
 
6
6
 
7
- from .perturbation_description import PerturbationDescription
7
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
8
8
  from helm.benchmark.scenarios.scenario import Input, Instance, Reference, Output
9
9
  from helm.common.object_spec import ObjectSpec, create_object
10
10
 
@@ -2,8 +2,8 @@ from dataclasses import dataclass
2
2
  from random import Random
3
3
  import re
4
4
 
5
- from .perturbation import TextPerturbation
6
- from .perturbation_description import PerturbationDescription
5
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
6
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
7
7
 
8
8
 
9
9
  class SpacePerturbation(TextPerturbation):
@@ -1,8 +1,8 @@
1
1
  from dataclasses import dataclass
2
2
  from random import Random
3
3
 
4
- from .perturbation import TextPerturbation
5
- from .perturbation_description import PerturbationDescription
4
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
5
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
6
6
 
7
7
 
8
8
  class SuffixPerturbation(TextPerturbation):
@@ -10,8 +10,9 @@ from nltk.corpus import wordnet
10
10
  import spacy
11
11
 
12
12
  from helm.common.general import match_case, ensure_file_downloaded
13
- from .perturbation_description import PerturbationDescription
14
- from .perturbation import TextPerturbation
13
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
14
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
15
+ from helm.benchmark.runner import get_benchmark_output_path
15
16
 
16
17
 
17
18
  class SynonymPerturbation(TextPerturbation):
@@ -57,7 +58,7 @@ class SynonymPerturbation(TextPerturbation):
57
58
  spacy.cli.download("en_core_web_sm") # type: ignore
58
59
  self.spacy_model = spacy.load("en_core_web_sm")
59
60
 
60
- output_dir = os.path.join("benchmark_output", "perturbations", self.name)
61
+ output_dir = os.path.join(get_benchmark_output_path(), "perturbations", self.name)
61
62
  Path(output_dir).mkdir(parents=True, exist_ok=True)
62
63
  nltk.data.path.append(output_dir)
63
64
  try:
@@ -4,19 +4,22 @@ import unittest
4
4
 
5
5
  from helm.common.media_object import MediaObject, MultimediaObject
6
6
  from helm.benchmark.scenarios.scenario import Input, Instance, Output, Reference
7
- from .data_augmenter import DataAugmenter
8
- from .extra_space_perturbation import ExtraSpacePerturbation
9
- from .misspelling_perturbation import MisspellingPerturbation
10
- from .contraction_expansion_perturbation import ContractionPerturbation, ExpansionPerturbation
11
- from .typos_perturbation import TyposPerturbation
12
- from .filler_words_perturbation import FillerWordsPerturbation
13
- from .synonym_perturbation import SynonymPerturbation
14
- from .lowercase_perturbation import LowerCasePerturbation
15
- from .space_perturbation import SpacePerturbation
16
- from .dialect_perturbation import DialectPerturbation
17
- from .person_name_perturbation import PersonNamePerturbation
18
- from .gender_perturbation import GenderPerturbation
19
- from .suffix_perturbation import SuffixPerturbation
7
+ from helm.benchmark.augmentations.data_augmenter import DataAugmenter
8
+ from helm.benchmark.augmentations.extra_space_perturbation import ExtraSpacePerturbation
9
+ from helm.benchmark.augmentations.misspelling_perturbation import MisspellingPerturbation
10
+ from helm.benchmark.augmentations.contraction_expansion_perturbation import (
11
+ ContractionPerturbation,
12
+ ExpansionPerturbation,
13
+ )
14
+ from helm.benchmark.augmentations.typos_perturbation import TyposPerturbation
15
+ from helm.benchmark.augmentations.filler_words_perturbation import FillerWordsPerturbation
16
+ from helm.benchmark.augmentations.synonym_perturbation import SynonymPerturbation
17
+ from helm.benchmark.augmentations.lowercase_perturbation import LowerCasePerturbation
18
+ from helm.benchmark.augmentations.space_perturbation import SpacePerturbation
19
+ from helm.benchmark.augmentations.dialect_perturbation import DialectPerturbation
20
+ from helm.benchmark.augmentations.person_name_perturbation import PersonNamePerturbation
21
+ from helm.benchmark.augmentations.gender_perturbation import GenderPerturbation
22
+ from helm.benchmark.augmentations.suffix_perturbation import SuffixPerturbation
20
23
 
21
24
 
22
25
  def test_extra_space_perturbation():
@@ -2,8 +2,8 @@ from dataclasses import dataclass
2
2
  from random import Random
3
3
 
4
4
  from helm.clients.google_translate_client import GoogleTranslateClient
5
- from .perturbation import TextPerturbation
6
- from .perturbation_description import PerturbationDescription
5
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
6
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
7
7
 
8
8
 
9
9
  class TranslatePerturbation(TextPerturbation):
@@ -1,8 +1,8 @@
1
1
  from dataclasses import dataclass
2
2
  from random import Random
3
3
 
4
- from .perturbation_description import PerturbationDescription
5
- from .perturbation import TextPerturbation
4
+ from helm.benchmark.augmentations.perturbation_description import PerturbationDescription
5
+ from helm.benchmark.augmentations.perturbation import TextPerturbation
6
6
 
7
7
 
8
8
  class TyposPerturbation(TextPerturbation):
@@ -1,7 +1,7 @@
1
1
  from typing import List
2
2
  from helm.common.hierarchical_logger import htrack
3
- from .augmentations.data_augmenter import create_data_augmenter, DataAugmenterSpec, DataAugmenter
4
- from .scenarios.scenario import Instance, TRAIN_SPLIT, EVAL_SPLITS
3
+ from helm.benchmark.augmentations.data_augmenter import create_data_augmenter, DataAugmenterSpec, DataAugmenter
4
+ from helm.benchmark.scenarios.scenario import Instance, TRAIN_SPLIT, EVAL_SPLITS
5
5
 
6
6
 
7
7
  class DataPreprocessor:
@@ -20,13 +20,10 @@ def register_huggingface_model(
20
20
  helm_model_name: str,
21
21
  pretrained_model_name_or_path: str,
22
22
  revision: Optional[str] = None,
23
- openvino: Optional[bool] = False,
24
23
  ) -> None:
25
24
  object_spec_args: Dict[str, Union[str, bool]] = {"pretrained_model_name_or_path": pretrained_model_name_or_path}
26
25
  if revision:
27
26
  object_spec_args["revision"] = revision
28
- if openvino:
29
- object_spec_args["openvino"] = openvino
30
27
 
31
28
  # Auto-infer model properties from the tokenizer.
32
29
  create_tokenizer_args: Dict[str, str] = {"pretrained_model_name_or_path": pretrained_model_name_or_path}
@@ -79,7 +76,7 @@ def register_huggingface_model(
79
76
  register_tokenizer_config(tokenizer_config)
80
77
 
81
78
 
82
- def register_huggingface_hub_model_from_flag_value(raw_model_string: str, openvino=False) -> None:
79
+ def register_huggingface_hub_model_from_flag_value(raw_model_string: str) -> None:
83
80
  raw_model_string_parts = raw_model_string.split("@")
84
81
  pretrained_model_name_or_path: str
85
82
  revision: Optional[str]
@@ -96,11 +93,10 @@ def register_huggingface_hub_model_from_flag_value(raw_model_string: str, openvi
96
93
  helm_model_name=raw_model_string,
97
94
  pretrained_model_name_or_path=pretrained_model_name_or_path,
98
95
  revision=revision,
99
- openvino=openvino,
100
96
  )
101
97
 
102
98
 
103
- def register_huggingface_local_model_from_flag_value(path: str, openvino=False) -> None:
99
+ def register_huggingface_local_model_from_flag_value(path: str) -> None:
104
100
  if not path:
105
101
  raise ValueError("Path to Hugging Face model must be non-empty")
106
102
  path_parts = os.path.split(path)
@@ -108,5 +104,4 @@ def register_huggingface_local_model_from_flag_value(path: str, openvino=False)
108
104
  register_huggingface_model(
109
105
  helm_model_name=helm_model_name,
110
106
  pretrained_model_name_or_path=path,
111
- openvino=openvino,
112
107
  )
@@ -0,0 +1,34 @@
1
+ from typing import Any, Dict, List
2
+
3
+ from helm.benchmark.adaptation.adapter_spec import AdapterSpec
4
+ from helm.benchmark.adaptation.request_state import RequestState
5
+ from helm.benchmark.annotation.aci_bench_annotator import ANNOTATOR_MODELS
6
+ from helm.benchmark.metrics.metric import Metric
7
+ from helm.benchmark.metrics.metric_name import MetricName
8
+ from helm.benchmark.metrics.metric_service import MetricService
9
+ from helm.benchmark.metrics.statistic import Stat
10
+
11
+
12
+ class ACIBenchMetric(Metric):
13
+ """Score metrics for ACIBench."""
14
+
15
+ def evaluate_generation(
16
+ self,
17
+ adapter_spec: AdapterSpec,
18
+ request_state: RequestState,
19
+ metric_service: MetricService,
20
+ eval_cache_path: str,
21
+ ) -> List[Stat]:
22
+ assert request_state.annotations
23
+ annotations: Dict[str, Any] = request_state.annotations["aci_bench"]
24
+ scores: List[int] = []
25
+ score = 0.0
26
+ for annotation_key, annotation_dict in annotations.items():
27
+ if annotation_key in ANNOTATOR_MODELS.keys() and annotation_dict is not None:
28
+ for val in annotation_dict.values():
29
+ scores.append(int(val["score"]))
30
+ if scores:
31
+ score = sum(scores) / len(scores)
32
+ return [
33
+ Stat(MetricName("aci_bench_accuracy")).add(score),
34
+ ]
@@ -5,8 +5,8 @@ from typing import List, Dict, Set
5
5
  from urllib.parse import unquote
6
6
 
7
7
  import numpy as np
8
- import scipy
9
- import calibration as cal
8
+ import scipy # type: ignore
9
+ import calibration as cal # type: ignore
10
10
  from helm.benchmark.adaptation.scenario_state import ScenarioState
11
11
  from helm.benchmark.metrics.evaluate_reference_metrics import compute_reference_metrics
12
12
  from helm.benchmark.metrics.efficiency_metrics import EfficiencyMetric
@@ -25,10 +25,10 @@ from helm.benchmark.window_services.window_service import WindowService
25
25
  from helm.benchmark.window_services.window_service_factory import WindowServiceFactory
26
26
  from helm.benchmark.window_services.tokenizer_service import TokenizerService
27
27
  from helm.benchmark.scenarios.scenario import CORRECT_TAG, Instance
28
- from .metric import Metric, MetricInterface, MetricResult, add_context, get_unique_stat_by_name
29
- from .metric_name import MetricContext, MetricName
30
- from .metric_service import MetricService
31
- from .statistic import Stat, merge_stat
28
+ from helm.benchmark.metrics.metric import Metric, MetricInterface, MetricResult, add_context, get_unique_stat_by_name
29
+ from helm.benchmark.metrics.metric_name import MetricContext, MetricName
30
+ from helm.benchmark.metrics.metric_service import MetricService
31
+ from helm.benchmark.metrics.statistic import Stat, merge_stat
32
32
 
33
33
 
34
34
  def get_num_bytes(tokens: List[Token]) -> int:
@@ -3,8 +3,8 @@ from helm.benchmark.metrics.evaluate_instances_metric import EvaluateInstancesMe
3
3
 
4
4
  from helm.common.request import RequestResult
5
5
  from helm.benchmark.adaptation.request_state import RequestState
6
- from .metric_name import MetricName
7
- from .statistic import Stat
6
+ from helm.benchmark.metrics.metric_name import MetricName
7
+ from helm.benchmark.metrics.statistic import Stat
8
8
 
9
9
  AMBIGUOUS_TAG = "ambiguous"
10
10
  NON_AMBIGUOUS_TAG = "non-ambiguous"
@@ -8,9 +8,18 @@ from helm.benchmark.metrics.evaluate_instances_metric import EvaluateInstancesMe
8
8
 
9
9
  from helm.common.request import RequestResult, GeneratedOutput
10
10
  from helm.benchmark.adaptation.request_state import RequestState
11
- from .statistic import Stat
12
- from .metric_name import MetricName
13
- from .bias_word_lists import GENDER_TO_WORD_LISTS, RACE_TO_NAME_LISTS, ADJECTIVE_LIST, PROFESSION_LIST
11
+ from helm.benchmark.metrics.nltk_helper import install_nltk_resources
12
+ from helm.benchmark.metrics.statistic import Stat
13
+ from helm.benchmark.metrics.metric_name import MetricName
14
+ from helm.benchmark.metrics.bias_word_lists import (
15
+ GENDER_TO_WORD_LISTS,
16
+ RACE_TO_NAME_LISTS,
17
+ ADJECTIVE_LIST,
18
+ PROFESSION_LIST,
19
+ )
20
+
21
+
22
+ install_nltk_resources()
14
23
 
15
24
 
16
25
  class BiasMetric(EvaluateInstancesMetric):
@@ -0,0 +1,25 @@
1
+ from typing import List
2
+
3
+ from helm.benchmark.adaptation.adapter_spec import AdapterSpec
4
+ from helm.benchmark.adaptation.request_state import RequestState
5
+ from helm.benchmark.metrics.metric import Metric
6
+ from helm.benchmark.metrics.metric_name import MetricName
7
+ from helm.benchmark.metrics.metric_service import MetricService
8
+ from helm.benchmark.metrics.statistic import Stat
9
+
10
+
11
+ class BigCodeBenchMetric(Metric):
12
+ """Score metrics for BigCodeBench."""
13
+
14
+ def evaluate_generation(
15
+ self,
16
+ adapter_spec: AdapterSpec,
17
+ request_state: RequestState,
18
+ metric_service: MetricService,
19
+ eval_cache_path: str,
20
+ ) -> List[Stat]:
21
+ assert request_state.annotations
22
+ score = request_state.annotations["bigcodebench"]["pass_at_one"]
23
+ return [
24
+ Stat(MetricName("bigcodebench_p@1")).add(score),
25
+ ]
@@ -0,0 +1,28 @@
1
+ from typing import List
2
+
3
+ from helm.benchmark.adaptation.adapter_spec import AdapterSpec
4
+ from helm.benchmark.adaptation.request_state import RequestState
5
+ from helm.benchmark.metrics.metric import Metric
6
+ from helm.benchmark.metrics.metric_name import MetricName
7
+ from helm.benchmark.metrics.metric_service import MetricService
8
+ from helm.benchmark.metrics.statistic import Stat
9
+
10
+
11
+ class BirdSQLMetric(Metric):
12
+ """Score metrics for Bird-SQL."""
13
+
14
+ ANNOTATOR_NAME = "bird_sql"
15
+
16
+ def evaluate_generation(
17
+ self,
18
+ adapter_spec: AdapterSpec,
19
+ request_state: RequestState,
20
+ metric_service: MetricService,
21
+ eval_cache_path: str,
22
+ ) -> List[Stat]:
23
+ if not request_state.annotations:
24
+ raise Exception("Request state did not have annotations.")
25
+ predicted_result = request_state.annotations[self.ANNOTATOR_NAME]["predicted_result"]
26
+ ground_truth_result = request_state.annotations[self.ANNOTATOR_NAME]["ground_truth_result"]
27
+ execution_accuracy = int(set(predicted_result) == set(ground_truth_result))
28
+ return [Stat(MetricName("execution_accuracy")).add(execution_accuracy)]
@@ -0,0 +1,34 @@
1
+ from typing import Any, Dict, List
2
+
3
+ from helm.benchmark.adaptation.adapter_spec import AdapterSpec
4
+ from helm.benchmark.adaptation.request_state import RequestState
5
+ from helm.benchmark.annotation.chw_care_plan_annotator import ANNOTATOR_MODELS
6
+ from helm.benchmark.metrics.metric import Metric
7
+ from helm.benchmark.metrics.metric_name import MetricName
8
+ from helm.benchmark.metrics.metric_service import MetricService
9
+ from helm.benchmark.metrics.statistic import Stat
10
+
11
+
12
+ class CHWCarePlanMetric(Metric):
13
+ """Score metrics for CHWCarePla."""
14
+
15
+ def evaluate_generation(
16
+ self,
17
+ adapter_spec: AdapterSpec,
18
+ request_state: RequestState,
19
+ metric_service: MetricService,
20
+ eval_cache_path: str,
21
+ ) -> List[Stat]:
22
+ assert request_state.annotations
23
+ annotations: Dict[str, Any] = request_state.annotations["chw_care_plan"]
24
+ scores: List[int] = []
25
+ score = 0.0
26
+ for annotation_key, annotation_dict in annotations.items():
27
+ if annotation_key in ANNOTATOR_MODELS.keys() and annotation_dict is not None:
28
+ for val in annotation_dict.values():
29
+ scores.append(int(val["score"]))
30
+ if scores:
31
+ score = sum(scores) / len(scores)
32
+ return [
33
+ Stat(MetricName("chw_care_plan_accuracy")).add(score),
34
+ ]
@@ -1,6 +1,6 @@
1
1
  from typing import List, Optional
2
2
 
3
- from sklearn.metrics import f1_score
3
+ from sklearn.metrics import f1_score, precision_score, recall_score
4
4
  from sklearn.preprocessing import MultiLabelBinarizer
5
5
 
6
6
  from helm.benchmark.adaptation.request_state import RequestState
@@ -9,9 +9,14 @@ from helm.benchmark.metrics.evaluate_reference_metrics import normalize_text
9
9
  from helm.benchmark.metrics.metric import MetricName
10
10
  from helm.benchmark.metrics.statistic import Stat
11
11
  from helm.benchmark.scenarios.scenario import Reference
12
+ from helm.common.hierarchical_logger import hlog
12
13
  from helm.common.request import GeneratedOutput
13
14
 
14
15
 
16
+ def _normalize_label_text(text: str) -> str:
17
+ return normalize_text(text, should_remove_articles=False)
18
+
19
+
15
20
  class ClassificationMetric(EvaluateInstancesMetric):
16
21
  """Defines metrics for multi-class classification using the generation adapter.
17
22
 
@@ -23,8 +28,12 @@ class ClassificationMetric(EvaluateInstancesMetric):
23
28
  reference. The predicted class for each instance is the normalized text of the generation.
24
29
 
25
30
  Note:
26
- - The set of classes is derived from the correct references from all the instances.
27
- This means that classes may be omitted if they are never used as a correct reference.
31
+ - It is highly recommended to specify the set of classes should be specified using the
32
+ `labels` parameter. Otherwise, the set of classes is derived from the correct references
33
+ from all the instances. This means that classes may be incorrectly omitted if they are never
34
+ used as a correct reference.
35
+ - The `averages` parameter is a list of averaging methods to be used.
36
+ It has the same meaning `average` as in scikit-learn.
28
37
  - Generations that are not in any of the known classes are counted as a
29
38
  negative prediction for every class.
30
39
  - Perturbed classes are considered different classes from unperturbed
@@ -32,8 +41,45 @@ class ClassificationMetric(EvaluateInstancesMetric):
32
41
  - Currently, multi-label classification is not supported.
33
42
  """
34
43
 
35
- def __init__(self, delimiter: Optional[str] = None):
44
+ AVERAGE_OPTIONS = ["micro", "macro", "weighted", None]
45
+ SCORE_OPTIONS = ["f1", "precision", "recall"]
46
+
47
+ def __init__(
48
+ self,
49
+ averages: Optional[List[Optional[str]]] = None,
50
+ labels: Optional[List[str]] = None,
51
+ scores: Optional[List[str]] = None,
52
+ delimiter: Optional[str] = None,
53
+ ) -> None:
54
+ """Creates metrics for multi-class classification.
55
+
56
+ :param delimiter: For multi-label classification, the string delimiter between classes in the model's output.
57
+ :param average: The list of scores to compute (e.g. "f1", "precision", "recall").
58
+ Defaults to ["f1"].
59
+ :param average: The averaging methods (e.g. "micro", "macro", "weighted") to be used.
60
+ It has the same meaning `average` as in scikit-learn.
61
+ Defaults to ["macro", "micro"].
62
+ :param labels: The set of labels.
63
+ :return: A list of `Stat` objects.
64
+ """
65
+ self.averages = averages or ["macro", "micro"]
66
+ for average in self.averages:
67
+ if average not in ClassificationMetric.AVERAGE_OPTIONS:
68
+ raise ValueError(
69
+ f"Each value in `averages` must be set to one of {ClassificationMetric.AVERAGE_OPTIONS}."
70
+ )
71
+ self.scores = scores or ["f1"]
72
+ for score_name in self.scores:
73
+ if score_name not in ClassificationMetric.SCORE_OPTIONS:
74
+ raise ValueError(f"Each value in `scores` must be set to one of {ClassificationMetric.SCORE_OPTIONS}.")
36
75
  self.delimiter = delimiter
76
+ self.labels = labels
77
+ if not self.labels:
78
+ hlog(
79
+ "WARNING: `labels` were not set on `ClassificationMetric`, "
80
+ "so they will be inferred from target references. "
81
+ "It is recommend to explicitly set `labels` on `ClassificationMetric`."
82
+ )
37
83
 
38
84
  def is_multi_label(self) -> bool:
39
85
  return bool(self.delimiter)
@@ -57,20 +103,38 @@ class ClassificationMetric(EvaluateInstancesMetric):
57
103
  references = request_state.instance.all_correct_references
58
104
  if not self.is_multi_label():
59
105
  assert len(references) == 1
60
- correct_ref_texts = [normalize_text(ref.output.text) for ref in references if ref.output.text]
106
+ correct_ref_texts = [_normalize_label_text(ref.output.text) for ref in references if ref.output.text]
61
107
  y_true.append(correct_ref_texts)
62
108
 
63
109
  input_text = request_state.result.completions[0].text
64
110
  predictions = input_text.split(self.delimiter) if self.is_multi_label() else [input_text]
65
- y_pred.append([normalize_text(pred) for pred in predictions if pred])
66
- labels: List[str] = list(set(y for ys in y_true for y in ys))
67
- mlb = MultiLabelBinarizer().fit([labels])
111
+ y_pred.append([_normalize_label_text(pred) for pred in predictions if pred])
112
+ mlb = MultiLabelBinarizer().fit(
113
+ [[_normalize_label_text(label) for label in self.labels]] if self.labels else y_true
114
+ )
68
115
  y_true = mlb.transform(y_true)
69
116
  y_pred = mlb.transform(y_pred)
70
- return [
71
- Stat(MetricName("classification_macro_f1")).add(f1_score(y_pred=y_pred, y_true=y_true, average="macro")),
72
- Stat(MetricName("classification_micro_f1")).add(f1_score(y_pred=y_pred, y_true=y_true, average="micro")),
73
- ]
117
+ stats: List[Stat] = []
118
+ for average in self.averages:
119
+ for score_name in self.scores:
120
+ if score_name == "f1":
121
+ score_value = f1_score(y_pred=y_pred, y_true=y_true, average=average)
122
+ elif score_name == "precision":
123
+ score_value = precision_score(y_pred=y_pred, y_true=y_true, average=average)
124
+ elif score_name == "recall":
125
+ score_value = recall_score(y_pred=y_pred, y_true=y_true, average=average)
126
+ else:
127
+ raise ValueError(
128
+ f"Unknown score name: '{score_name}' - expected one of ['f1', 'precision', 'recall']"
129
+ )
130
+ if average is None:
131
+ for mlb_class, class_score_value in zip(mlb.classes_, score_value):
132
+ stats.append(
133
+ Stat(MetricName(f"classification_{mlb_class}_{score_name}")).add(class_score_value)
134
+ )
135
+ else:
136
+ stats.append(Stat(MetricName(f"classification_{average}_{score_name}")).add(score_value))
137
+ return stats
74
138
 
75
139
 
76
140
  class MultipleChoiceClassificationMetric(EvaluateInstancesMetric):