crfm-helm 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crfm-helm might be problematic. Click here for more details.

Files changed (499) hide show
  1. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.1.dist-info}/METADATA +138 -31
  2. crfm_helm-0.5.1.dist-info/RECORD +654 -0
  3. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.1.dist-info}/WHEEL +1 -1
  4. helm/benchmark/adaptation/adapter_spec.py +31 -3
  5. helm/benchmark/adaptation/adapters/adapter.py +2 -2
  6. helm/benchmark/adaptation/adapters/adapter_factory.py +24 -27
  7. helm/benchmark/adaptation/adapters/generation_adapter.py +1 -0
  8. helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +20 -4
  9. helm/benchmark/adaptation/adapters/language_modeling_adapter.py +2 -3
  10. helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +1 -0
  11. helm/benchmark/adaptation/adapters/multimodal/multimodal_prompt.py +7 -0
  12. helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +104 -0
  13. helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +2 -1
  14. helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +2 -0
  15. helm/benchmark/adaptation/adapters/test_adapter.py +2 -1
  16. helm/benchmark/adaptation/adapters/test_generation_adapter.py +32 -8
  17. helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +7 -19
  18. helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +60 -6
  19. helm/benchmark/adaptation/common_adapter_specs.py +376 -0
  20. helm/benchmark/adaptation/request_state.py +6 -1
  21. helm/benchmark/adaptation/scenario_state.py +6 -2
  22. helm/benchmark/annotation/annotator.py +43 -0
  23. helm/benchmark/annotation/annotator_factory.py +61 -0
  24. helm/benchmark/annotation/image2structure/image_compiler_annotator.py +88 -0
  25. helm/benchmark/annotation/image2structure/latex_compiler_annotator.py +59 -0
  26. helm/benchmark/annotation/image2structure/lilypond_compiler_annotator.py +84 -0
  27. helm/benchmark/annotation/image2structure/webpage_compiler_annotator.py +132 -0
  28. helm/benchmark/annotation/test_annotator_factory.py +26 -0
  29. helm/benchmark/annotation/test_dummy_annotator.py +44 -0
  30. helm/benchmark/annotation_executor.py +124 -0
  31. helm/benchmark/augmentations/data_augmenter.py +0 -2
  32. helm/benchmark/augmentations/gender_perturbation.py +1 -1
  33. helm/benchmark/augmentations/perturbation.py +25 -3
  34. helm/benchmark/augmentations/perturbation_description.py +1 -1
  35. helm/benchmark/augmentations/suffix_perturbation.py +29 -0
  36. helm/benchmark/augmentations/test_perturbation.py +41 -7
  37. helm/benchmark/augmentations/translate_perturbation.py +30 -0
  38. helm/benchmark/config_registry.py +7 -1
  39. helm/benchmark/executor.py +46 -16
  40. helm/benchmark/huggingface_registration.py +20 -7
  41. helm/benchmark/metrics/basic_metrics.py +169 -664
  42. helm/benchmark/metrics/bbq_metrics.py +3 -4
  43. helm/benchmark/metrics/bias_metrics.py +6 -6
  44. helm/benchmark/metrics/classification_metrics.py +11 -8
  45. helm/benchmark/metrics/cleva_accuracy_metrics.py +8 -5
  46. helm/benchmark/metrics/cleva_harms_metrics.py +2 -2
  47. helm/benchmark/metrics/code_metrics_helper.py +0 -2
  48. helm/benchmark/metrics/common_metric_specs.py +167 -0
  49. helm/benchmark/metrics/decodingtrust_fairness_metrics.py +72 -0
  50. helm/benchmark/metrics/decodingtrust_ood_knowledge_metrics.py +66 -0
  51. helm/benchmark/metrics/decodingtrust_privacy_metrics.py +101 -0
  52. helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +202 -0
  53. helm/benchmark/metrics/disinformation_metrics.py +4 -110
  54. helm/benchmark/metrics/dry_run_metrics.py +2 -2
  55. helm/benchmark/metrics/efficiency_metrics.py +213 -0
  56. helm/benchmark/metrics/evaluate_instances_metric.py +59 -0
  57. helm/benchmark/metrics/evaluate_reference_metrics.py +392 -0
  58. helm/benchmark/metrics/image_generation/aesthetics_metrics.py +54 -0
  59. helm/benchmark/metrics/image_generation/aesthetics_scorer.py +66 -0
  60. helm/benchmark/metrics/image_generation/clip_score_metrics.py +73 -0
  61. helm/benchmark/metrics/image_generation/denoised_runtime_metric.py +42 -0
  62. helm/benchmark/metrics/image_generation/detection_metrics.py +57 -0
  63. helm/benchmark/metrics/image_generation/detectors/base_detector.py +8 -0
  64. helm/benchmark/metrics/image_generation/detectors/vitdet.py +178 -0
  65. helm/benchmark/metrics/image_generation/efficiency_metrics.py +41 -0
  66. helm/benchmark/metrics/image_generation/fidelity_metrics.py +168 -0
  67. helm/benchmark/metrics/image_generation/fractal_dimension/__init__.py +0 -0
  68. helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +63 -0
  69. helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +33 -0
  70. helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +50 -0
  71. helm/benchmark/metrics/image_generation/gender_metrics.py +58 -0
  72. helm/benchmark/metrics/image_generation/image_critique_metrics.py +284 -0
  73. helm/benchmark/metrics/image_generation/lpips_metrics.py +82 -0
  74. helm/benchmark/metrics/image_generation/multi_scale_ssim_metrics.py +82 -0
  75. helm/benchmark/metrics/image_generation/nsfw_detector.py +96 -0
  76. helm/benchmark/metrics/image_generation/nsfw_metrics.py +103 -0
  77. helm/benchmark/metrics/image_generation/nudity_metrics.py +38 -0
  78. helm/benchmark/metrics/image_generation/photorealism_critique_metrics.py +153 -0
  79. helm/benchmark/metrics/image_generation/psnr_metrics.py +78 -0
  80. helm/benchmark/metrics/image_generation/q16/__init__.py +0 -0
  81. helm/benchmark/metrics/image_generation/q16/q16_toxicity_detector.py +90 -0
  82. helm/benchmark/metrics/image_generation/q16/test_q16.py +18 -0
  83. helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +48 -0
  84. helm/benchmark/metrics/image_generation/skin_tone_metrics.py +164 -0
  85. helm/benchmark/metrics/image_generation/uiqi_metrics.py +92 -0
  86. helm/benchmark/metrics/image_generation/watermark/__init__.py +0 -0
  87. helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +16 -0
  88. helm/benchmark/metrics/image_generation/watermark/watermark_detector.py +87 -0
  89. helm/benchmark/metrics/image_generation/watermark_metrics.py +48 -0
  90. helm/benchmark/metrics/instruction_following_critique_metrics.py +3 -1
  91. helm/benchmark/metrics/language_modeling_metrics.py +99 -0
  92. helm/benchmark/metrics/machine_translation_metrics.py +89 -0
  93. helm/benchmark/metrics/metric.py +93 -172
  94. helm/benchmark/metrics/metric_name.py +0 -1
  95. helm/benchmark/metrics/metric_service.py +16 -0
  96. helm/benchmark/metrics/paraphrase_generation_metrics.py +3 -4
  97. helm/benchmark/metrics/ranking_metrics.py +2 -2
  98. helm/benchmark/metrics/reference_metric.py +148 -0
  99. helm/benchmark/metrics/summac/model_summac.py +0 -2
  100. helm/benchmark/metrics/summarization_metrics.py +2 -2
  101. helm/benchmark/metrics/test_classification_metrics.py +8 -5
  102. helm/benchmark/metrics/test_disinformation_metrics.py +78 -0
  103. helm/benchmark/metrics/{test_basic_metrics.py → test_evaluate_reference_metrics.py} +5 -1
  104. helm/benchmark/metrics/test_metric.py +2 -2
  105. helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +10 -2
  106. helm/benchmark/metrics/toxicity_metrics.py +1 -1
  107. helm/benchmark/metrics/toxicity_utils.py +23 -0
  108. helm/benchmark/metrics/unitxt_metrics.py +81 -0
  109. helm/benchmark/metrics/vision_language/__init__.py +0 -0
  110. helm/benchmark/metrics/vision_language/emd_utils.py +341 -0
  111. helm/benchmark/metrics/vision_language/image_metrics.py +575 -0
  112. helm/benchmark/metrics/vision_language/image_utils.py +100 -0
  113. helm/benchmark/model_deployment_registry.py +74 -0
  114. helm/benchmark/model_metadata_registry.py +41 -1
  115. helm/benchmark/multi_gpu_runner.py +133 -0
  116. helm/benchmark/presentation/create_plots.py +8 -7
  117. helm/benchmark/presentation/run_display.py +26 -10
  118. helm/benchmark/presentation/schema.py +15 -40
  119. helm/benchmark/presentation/summarize.py +119 -79
  120. helm/benchmark/presentation/table.py +8 -8
  121. helm/benchmark/presentation/test_contamination.py +2 -2
  122. helm/benchmark/presentation/test_run_entry.py +1 -2
  123. helm/benchmark/presentation/test_summarize.py +3 -3
  124. helm/benchmark/run.py +54 -26
  125. helm/benchmark/run_expander.py +205 -35
  126. helm/benchmark/run_spec.py +93 -0
  127. helm/benchmark/run_spec_factory.py +163 -0
  128. helm/benchmark/run_specs/__init__.py +0 -0
  129. helm/benchmark/run_specs/classic_run_specs.py +1510 -0
  130. helm/benchmark/run_specs/cleva_run_specs.py +277 -0
  131. helm/benchmark/run_specs/decodingtrust_run_specs.py +314 -0
  132. helm/benchmark/run_specs/heim_run_specs.py +623 -0
  133. helm/benchmark/run_specs/instruction_following_run_specs.py +129 -0
  134. helm/benchmark/run_specs/lite_run_specs.py +307 -0
  135. helm/benchmark/run_specs/simple_run_specs.py +104 -0
  136. helm/benchmark/run_specs/unitxt_run_specs.py +42 -0
  137. helm/benchmark/run_specs/vlm_run_specs.py +757 -0
  138. helm/benchmark/runner.py +51 -57
  139. helm/benchmark/runner_config_registry.py +21 -0
  140. helm/benchmark/scenarios/bbq_scenario.py +1 -1
  141. helm/benchmark/scenarios/bold_scenario.py +2 -2
  142. helm/benchmark/scenarios/code_scenario.py +1 -0
  143. helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +169 -0
  144. helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +121 -0
  145. helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +77 -0
  146. helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +324 -0
  147. helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +204 -0
  148. helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +559 -0
  149. helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +67 -0
  150. helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +78 -0
  151. helm/benchmark/scenarios/dialogue_scenarios.py +0 -1
  152. helm/benchmark/scenarios/image_generation/__init__.py +0 -0
  153. helm/benchmark/scenarios/image_generation/common_syntactic_processes_scenario.py +105 -0
  154. helm/benchmark/scenarios/image_generation/cub200_scenario.py +95 -0
  155. helm/benchmark/scenarios/image_generation/daily_dalle_scenario.py +124 -0
  156. helm/benchmark/scenarios/image_generation/demographic_stereotypes_scenario.py +82 -0
  157. helm/benchmark/scenarios/image_generation/detection_scenario.py +83 -0
  158. helm/benchmark/scenarios/image_generation/draw_bench_scenario.py +74 -0
  159. helm/benchmark/scenarios/image_generation/i2p_scenario.py +57 -0
  160. helm/benchmark/scenarios/image_generation/landing_page_scenario.py +46 -0
  161. helm/benchmark/scenarios/image_generation/logos_scenario.py +223 -0
  162. helm/benchmark/scenarios/image_generation/magazine_cover_scenario.py +91 -0
  163. helm/benchmark/scenarios/image_generation/mental_disorders_scenario.py +46 -0
  164. helm/benchmark/scenarios/image_generation/mscoco_scenario.py +91 -0
  165. helm/benchmark/scenarios/image_generation/paint_skills_scenario.py +72 -0
  166. helm/benchmark/scenarios/image_generation/parti_prompts_scenario.py +94 -0
  167. helm/benchmark/scenarios/image_generation/radiology_scenario.py +42 -0
  168. helm/benchmark/scenarios/image_generation/relational_understanding_scenario.py +52 -0
  169. helm/benchmark/scenarios/image_generation/time_most_significant_historical_figures_scenario.py +124 -0
  170. helm/benchmark/scenarios/image_generation/winoground_scenario.py +62 -0
  171. helm/benchmark/scenarios/imdb_scenario.py +0 -1
  172. helm/benchmark/scenarios/legalbench_scenario.py +6 -2
  173. helm/benchmark/scenarios/live_qa_scenario.py +94 -0
  174. helm/benchmark/scenarios/lm_entry_scenario.py +185 -0
  175. helm/benchmark/scenarios/math_scenario.py +19 -2
  176. helm/benchmark/scenarios/medication_qa_scenario.py +60 -0
  177. helm/benchmark/scenarios/numeracy_scenario.py +1 -1
  178. helm/benchmark/scenarios/opinions_qa_scenario.py +0 -4
  179. helm/benchmark/scenarios/scenario.py +4 -0
  180. helm/benchmark/scenarios/simple_scenarios.py +122 -1
  181. helm/benchmark/scenarios/test_math_scenario.py +6 -0
  182. helm/benchmark/scenarios/test_scenario.py +6 -3
  183. helm/benchmark/scenarios/test_simple_scenarios.py +50 -0
  184. helm/benchmark/scenarios/thai_exam_scenario.py +135 -0
  185. helm/benchmark/scenarios/unitxt_scenario.py +56 -0
  186. helm/benchmark/scenarios/verifiability_judgment_scenario.py +3 -1
  187. helm/benchmark/scenarios/vicuna_scenario.py +1 -1
  188. helm/benchmark/scenarios/vision_language/a_okvqa_scenario.py +83 -0
  189. helm/benchmark/scenarios/vision_language/bingo_scenario.py +103 -0
  190. helm/benchmark/scenarios/vision_language/crossmodal_3600_scenario.py +134 -0
  191. helm/benchmark/scenarios/vision_language/flickr30k_scenario.py +74 -0
  192. helm/benchmark/scenarios/vision_language/gqa_scenario.py +91 -0
  193. helm/benchmark/scenarios/vision_language/hateful_memes_scenario.py +94 -0
  194. helm/benchmark/scenarios/vision_language/heim_human_eval_scenario.py +113 -0
  195. helm/benchmark/scenarios/vision_language/image2structure/__init__.py +0 -0
  196. helm/benchmark/scenarios/vision_language/image2structure/chart2csv_scenario.py +55 -0
  197. helm/benchmark/scenarios/vision_language/image2structure/image2structure_scenario.py +214 -0
  198. helm/benchmark/scenarios/vision_language/image2structure/latex_scenario.py +25 -0
  199. helm/benchmark/scenarios/vision_language/image2structure/musicsheet_scenario.py +20 -0
  200. helm/benchmark/scenarios/vision_language/image2structure/utils_latex.py +347 -0
  201. helm/benchmark/scenarios/vision_language/image2structure/webpage/__init__.py +0 -0
  202. helm/benchmark/scenarios/vision_language/image2structure/webpage/driver.py +84 -0
  203. helm/benchmark/scenarios/vision_language/image2structure/webpage/jekyll_server.py +182 -0
  204. helm/benchmark/scenarios/vision_language/image2structure/webpage/utils.py +31 -0
  205. helm/benchmark/scenarios/vision_language/image2structure/webpage_scenario.py +225 -0
  206. helm/benchmark/scenarios/vision_language/math_vista_scenario.py +117 -0
  207. helm/benchmark/scenarios/vision_language/mementos_scenario.py +124 -0
  208. helm/benchmark/scenarios/vision_language/mm_safety_bench_scenario.py +103 -0
  209. helm/benchmark/scenarios/vision_language/mme_scenario.py +145 -0
  210. helm/benchmark/scenarios/vision_language/mmmu_scenario.py +187 -0
  211. helm/benchmark/scenarios/vision_language/mscoco_captioning_scenario.py +92 -0
  212. helm/benchmark/scenarios/vision_language/mscoco_categorization_scenario.py +117 -0
  213. helm/benchmark/scenarios/vision_language/multipanelvqa_scenario.py +169 -0
  214. helm/benchmark/scenarios/vision_language/originality_scenario.py +35 -0
  215. helm/benchmark/scenarios/vision_language/pairs_scenario.py +246 -0
  216. helm/benchmark/scenarios/vision_language/pope_scenario.py +104 -0
  217. helm/benchmark/scenarios/vision_language/seed_bench_scenario.py +129 -0
  218. helm/benchmark/scenarios/vision_language/unicorn_scenario.py +108 -0
  219. helm/benchmark/scenarios/vision_language/viz_wiz_scenario.py +3 -4
  220. helm/benchmark/scenarios/vision_language/vqa_scenario.py +5 -3
  221. helm/benchmark/scenarios/wmt_14_scenario.py +1 -1
  222. helm/benchmark/server.py +24 -1
  223. helm/benchmark/slurm_runner.py +70 -49
  224. helm/benchmark/static/benchmarking.js +1 -1
  225. helm/benchmark/static/schema_classic.yaml +258 -1066
  226. helm/benchmark/static/schema_image2structure.yaml +304 -0
  227. helm/benchmark/static/schema_instruction_following.yaml +210 -0
  228. helm/benchmark/static/schema_lite.yaml +2 -227
  229. helm/benchmark/static/schema_mmlu.yaml +1507 -0
  230. helm/benchmark/static/schema_unitxt.yaml +428 -0
  231. helm/benchmark/static/schema_vhelm_lite.yaml +164 -0
  232. helm/benchmark/static/schema_vlm.yaml +823 -0
  233. helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
  234. helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
  235. helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
  236. helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
  237. helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
  238. helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
  239. helm/benchmark/static_build/assets/crfm-logo-74391ab8.png +0 -0
  240. helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
  241. helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
  242. helm/benchmark/static_build/assets/heim-logo-3e5e3aa4.png +0 -0
  243. helm/benchmark/static_build/assets/helm-logo-simple-2ed5400b.png +0 -0
  244. helm/benchmark/static_build/assets/helmhero-28e90f4d.png +0 -0
  245. helm/benchmark/static_build/assets/index-737eef9e.js +10 -0
  246. helm/benchmark/static_build/assets/index-878a1094.css +1 -0
  247. helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
  248. helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
  249. helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
  250. helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
  251. helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
  252. helm/benchmark/static_build/assets/react-d4a0b69b.js +85 -0
  253. helm/benchmark/static_build/assets/recharts-6d337683.js +97 -0
  254. helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
  255. helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
  256. helm/benchmark/static_build/assets/tremor-54a99cc4.js +10 -0
  257. helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
  258. helm/benchmark/static_build/assets/vhelm-framework-cde7618a.png +0 -0
  259. helm/benchmark/static_build/assets/vhelm-model-6d812526.png +0 -0
  260. helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
  261. helm/benchmark/static_build/config.js +4 -0
  262. helm/benchmark/static_build/index.html +20 -0
  263. helm/benchmark/test_data_preprocessor.py +3 -3
  264. helm/benchmark/test_run_expander.py +1 -1
  265. helm/benchmark/window_services/ai21_window_service.py +22 -33
  266. helm/benchmark/window_services/cohere_window_service.py +1 -63
  267. helm/benchmark/window_services/default_window_service.py +2 -44
  268. helm/benchmark/window_services/encoder_decoder_window_service.py +0 -11
  269. helm/benchmark/window_services/ice_window_service.py +0 -34
  270. helm/benchmark/window_services/image_generation/__init__.py +0 -0
  271. helm/benchmark/window_services/image_generation/clip_window_service.py +15 -0
  272. helm/benchmark/window_services/image_generation/lexica_search_window_service.py +9 -0
  273. helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +9 -0
  274. helm/benchmark/window_services/image_generation/test_clip_window_service.py +29 -0
  275. helm/benchmark/window_services/image_generation/test_openai_dalle_window_service.py +30 -0
  276. helm/benchmark/window_services/local_window_service.py +21 -4
  277. helm/benchmark/window_services/test_anthropic_window_service.py +2 -1
  278. helm/benchmark/window_services/test_bloom_window_service.py +2 -1
  279. helm/benchmark/window_services/test_cohere_window_service.py +2 -1
  280. helm/benchmark/window_services/test_flan_t5_window_service.py +2 -1
  281. helm/benchmark/window_services/test_gpt2_window_service.py +2 -2
  282. helm/benchmark/window_services/test_gpt4_window_service.py +2 -1
  283. helm/benchmark/window_services/test_gptj_window_service.py +3 -2
  284. helm/benchmark/window_services/test_gptneox_window_service.py +3 -2
  285. helm/benchmark/window_services/test_ice_window_service.py +2 -1
  286. helm/benchmark/window_services/test_openai_window_service.py +2 -1
  287. helm/benchmark/window_services/test_opt_window_service.py +3 -2
  288. helm/benchmark/window_services/test_palmyra_window_service.py +2 -1
  289. helm/benchmark/window_services/test_t0pp_window_service.py +2 -1
  290. helm/benchmark/window_services/test_t511b_window_service.py +2 -1
  291. helm/benchmark/window_services/test_ul2_window_service.py +2 -1
  292. helm/benchmark/window_services/test_utils.py +3 -2
  293. helm/benchmark/window_services/test_yalm_window_service.py +2 -1
  294. helm/benchmark/window_services/window_service.py +42 -0
  295. helm/benchmark/window_services/window_service_factory.py +4 -1
  296. helm/benchmark/window_services/yalm_window_service.py +0 -27
  297. helm/clients/__init__.py +0 -0
  298. helm/{proxy/clients → clients}/ai21_client.py +3 -9
  299. helm/clients/aleph_alpha_client.py +112 -0
  300. helm/{proxy/clients → clients}/anthropic_client.py +233 -18
  301. helm/{proxy/clients → clients}/auto_client.py +59 -31
  302. helm/clients/bedrock_client.py +128 -0
  303. helm/clients/bedrock_utils.py +72 -0
  304. helm/{proxy/clients → clients}/client.py +65 -7
  305. helm/clients/clip_score_client.py +49 -0
  306. helm/clients/clip_scorers/__init__.py +0 -0
  307. helm/clients/clip_scorers/base_clip_scorer.py +18 -0
  308. helm/clients/clip_scorers/clip_scorer.py +50 -0
  309. helm/clients/clip_scorers/multilingual_clip_scorer.py +50 -0
  310. helm/{proxy/clients → clients}/cohere_client.py +4 -11
  311. helm/clients/gcs_client.py +82 -0
  312. helm/{proxy/clients → clients}/google_client.py +5 -5
  313. helm/clients/google_translate_client.py +35 -0
  314. helm/{proxy/clients → clients}/http_model_client.py +5 -7
  315. helm/{proxy/clients → clients}/huggingface_client.py +43 -64
  316. helm/clients/image_generation/__init__.py +0 -0
  317. helm/clients/image_generation/adobe_vision_client.py +78 -0
  318. helm/clients/image_generation/aleph_alpha_image_generation_client.py +98 -0
  319. helm/clients/image_generation/cogview2/__init__.py +0 -0
  320. helm/clients/image_generation/cogview2/coglm_strategy.py +96 -0
  321. helm/clients/image_generation/cogview2/coglm_utils.py +82 -0
  322. helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +15 -0
  323. helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +96 -0
  324. helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +254 -0
  325. helm/clients/image_generation/cogview2/sr_pipeline/dsr_sampling.py +190 -0
  326. helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +141 -0
  327. helm/clients/image_generation/cogview2/sr_pipeline/itersr_model.py +269 -0
  328. helm/clients/image_generation/cogview2/sr_pipeline/itersr_sampling.py +120 -0
  329. helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +42 -0
  330. helm/clients/image_generation/cogview2_client.py +191 -0
  331. helm/clients/image_generation/dalle2_client.py +192 -0
  332. helm/clients/image_generation/dalle3_client.py +108 -0
  333. helm/clients/image_generation/dalle_mini/__init__.py +3 -0
  334. helm/clients/image_generation/dalle_mini/data.py +442 -0
  335. helm/clients/image_generation/dalle_mini/model/__init__.py +5 -0
  336. helm/clients/image_generation/dalle_mini/model/configuration.py +175 -0
  337. helm/clients/image_generation/dalle_mini/model/modeling.py +1834 -0
  338. helm/clients/image_generation/dalle_mini/model/partitions.py +84 -0
  339. helm/clients/image_generation/dalle_mini/model/processor.py +63 -0
  340. helm/clients/image_generation/dalle_mini/model/text.py +251 -0
  341. helm/clients/image_generation/dalle_mini/model/tokenizer.py +9 -0
  342. helm/clients/image_generation/dalle_mini/model/utils.py +29 -0
  343. helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -0
  344. helm/clients/image_generation/dalle_mini/vqgan_jax/configuration_vqgan.py +40 -0
  345. helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +107 -0
  346. helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +610 -0
  347. helm/clients/image_generation/dalle_mini_client.py +190 -0
  348. helm/clients/image_generation/deep_floyd_client.py +78 -0
  349. helm/clients/image_generation/huggingface_diffusers_client.py +249 -0
  350. helm/clients/image_generation/image_generation_client_utils.py +9 -0
  351. helm/clients/image_generation/lexica_client.py +86 -0
  352. helm/clients/image_generation/mindalle/__init__.py +0 -0
  353. helm/clients/image_generation/mindalle/models/__init__.py +216 -0
  354. helm/clients/image_generation/mindalle/models/stage1/__init__.py +0 -0
  355. helm/clients/image_generation/mindalle/models/stage1/layers.py +312 -0
  356. helm/clients/image_generation/mindalle/models/stage1/vqgan.py +103 -0
  357. helm/clients/image_generation/mindalle/models/stage2/__init__.py +0 -0
  358. helm/clients/image_generation/mindalle/models/stage2/layers.py +144 -0
  359. helm/clients/image_generation/mindalle/models/stage2/transformer.py +268 -0
  360. helm/clients/image_generation/mindalle/models/tokenizer.py +30 -0
  361. helm/clients/image_generation/mindalle/utils/__init__.py +3 -0
  362. helm/clients/image_generation/mindalle/utils/config.py +129 -0
  363. helm/clients/image_generation/mindalle/utils/sampling.py +149 -0
  364. helm/clients/image_generation/mindalle/utils/utils.py +89 -0
  365. helm/clients/image_generation/mindalle_client.py +115 -0
  366. helm/clients/image_generation/nudity_check_client.py +64 -0
  367. helm/clients/image_generation/together_image_generation_client.py +111 -0
  368. helm/{proxy/clients → clients}/lit_gpt_client.py +4 -4
  369. helm/{proxy/clients → clients}/megatron_client.py +5 -5
  370. helm/clients/mistral_client.py +134 -0
  371. helm/clients/moderation_api_client.py +109 -0
  372. helm/clients/open_lm_client.py +43 -0
  373. helm/clients/openai_client.py +301 -0
  374. helm/{proxy/clients → clients}/palmyra_client.py +6 -8
  375. helm/{proxy/clients → clients}/perspective_api_client.py +7 -8
  376. helm/clients/simple_client.py +64 -0
  377. helm/{proxy/clients → clients}/test_auto_client.py +13 -15
  378. helm/clients/test_client.py +100 -0
  379. helm/{proxy/clients → clients}/test_huggingface_client.py +15 -16
  380. helm/clients/test_simple_client.py +19 -0
  381. helm/{proxy/clients → clients}/test_together_client.py +20 -8
  382. helm/{proxy/clients → clients}/together_client.py +104 -73
  383. helm/clients/vertexai_client.py +400 -0
  384. helm/clients/vision_language/__init__.py +0 -0
  385. helm/clients/vision_language/huggingface_vision2seq_client.py +145 -0
  386. helm/clients/vision_language/huggingface_vlm_client.py +111 -0
  387. helm/{proxy/clients → clients}/vision_language/idefics_client.py +54 -49
  388. helm/clients/vision_language/open_flamingo/__init__.py +2 -0
  389. helm/clients/vision_language/open_flamingo/src/__init__.py +0 -0
  390. helm/clients/vision_language/open_flamingo/src/factory.py +147 -0
  391. helm/clients/vision_language/open_flamingo/src/flamingo.py +337 -0
  392. helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +155 -0
  393. helm/clients/vision_language/open_flamingo/src/helpers.py +267 -0
  394. helm/clients/vision_language/open_flamingo/src/utils.py +47 -0
  395. helm/clients/vision_language/open_flamingo_client.py +155 -0
  396. helm/clients/vision_language/qwen_vlm_client.py +171 -0
  397. helm/clients/vllm_client.py +46 -0
  398. helm/common/cache.py +16 -4
  399. helm/common/cache_backend_config.py +47 -0
  400. helm/common/clip_score_request.py +41 -0
  401. helm/common/file_caches/__init__.py +0 -0
  402. helm/common/file_caches/file_cache.py +16 -0
  403. helm/common/file_caches/local_file_cache.py +61 -0
  404. helm/common/file_caches/test_local_file_cache.py +25 -0
  405. helm/common/file_upload_request.py +27 -0
  406. helm/common/general.py +1 -1
  407. helm/common/image_generation_parameters.py +25 -0
  408. helm/common/images_utils.py +33 -3
  409. helm/common/key_value_store.py +35 -4
  410. helm/common/media_object.py +13 -0
  411. helm/common/moderations_api_request.py +71 -0
  412. helm/common/mongo_key_value_store.py +3 -3
  413. helm/common/multimodal_request_utils.py +31 -0
  414. helm/common/nudity_check_request.py +29 -0
  415. helm/common/request.py +15 -17
  416. helm/common/test_general.py +6 -0
  417. helm/common/tokenization_request.py +1 -1
  418. helm/config/model_deployments.yaml +1159 -538
  419. helm/config/model_metadata.yaml +868 -41
  420. helm/config/tokenizer_configs.yaml +149 -43
  421. helm/proxy/accounts.py +31 -4
  422. helm/proxy/critique/mechanical_turk_critique_importer.py +3 -0
  423. helm/proxy/critique/model_critique_client.py +8 -6
  424. helm/proxy/example_queries.py +29 -17
  425. helm/proxy/server.py +70 -5
  426. helm/proxy/services/remote_service.py +31 -0
  427. helm/proxy/services/server_service.py +96 -16
  428. helm/proxy/services/service.py +30 -0
  429. helm/proxy/services/test_remote_service.py +4 -3
  430. helm/proxy/services/test_service.py +0 -12
  431. helm/proxy/test_accounts.py +32 -0
  432. helm/proxy/token_counters/auto_token_counter.py +37 -37
  433. helm/proxy/token_counters/test_auto_token_counter.py +164 -0
  434. helm/proxy/token_counters/token_counter.py +3 -5
  435. helm/tokenizers/__init__.py +0 -0
  436. helm/{proxy/tokenizers → tokenizers}/ai21_tokenizer.py +3 -3
  437. helm/{proxy/tokenizers → tokenizers}/anthropic_tokenizer.py +1 -1
  438. helm/{proxy/tokenizers → tokenizers}/auto_tokenizer.py +6 -9
  439. helm/{proxy/tokenizers → tokenizers}/cohere_tokenizer.py +1 -1
  440. helm/{proxy/tokenizers → tokenizers}/http_model_tokenizer.py +3 -3
  441. helm/{proxy/tokenizers → tokenizers}/huggingface_tokenizer.py +7 -26
  442. helm/tokenizers/simple_tokenizer.py +33 -0
  443. helm/{proxy/tokenizers → tokenizers}/test_anthropic_tokenizer.py +1 -1
  444. helm/{proxy/tokenizers → tokenizers}/test_huggingface_tokenizer.py +3 -0
  445. helm/tokenizers/test_simple_tokenizer.py +33 -0
  446. helm/{proxy/tokenizers → tokenizers}/vertexai_tokenizer.py +1 -1
  447. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer.py +5 -3
  448. helm/tokenizers/yalm_tokenizer_data/__init__.py +0 -0
  449. helm/tokenizers/yalm_tokenizer_data/voc_100b.sp +0 -0
  450. helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/yalm_tokenizer.py +1 -1
  451. crfm_helm-0.4.0.dist-info/RECORD +0 -397
  452. helm/benchmark/run_specs.py +0 -2762
  453. helm/benchmark/test_model_deployment_definition.py +0 -92
  454. helm/benchmark/test_model_properties.py +0 -1570
  455. helm/benchmark/vlm_run_specs.py +0 -97
  456. helm/benchmark/window_services/flan_t5_window_service.py +0 -29
  457. helm/benchmark/window_services/gpt2_window_service.py +0 -32
  458. helm/benchmark/window_services/huggingface_window_service.py +0 -60
  459. helm/benchmark/window_services/t0pp_window_service.py +0 -35
  460. helm/benchmark/window_services/t511b_window_service.py +0 -30
  461. helm/benchmark/window_services/test_mt_nlg_window_service.py +0 -48
  462. helm/benchmark/window_services/ul2_window_service.py +0 -30
  463. helm/benchmark/window_services/wider_ai21_window_service.py +0 -24
  464. helm/common/cache_utils.py +0 -14
  465. helm/proxy/clients/aleph_alpha_client.py +0 -95
  466. helm/proxy/clients/goose_ai_client.py +0 -99
  467. helm/proxy/clients/microsoft_client.py +0 -180
  468. helm/proxy/clients/openai_client.py +0 -206
  469. helm/proxy/clients/simple_client.py +0 -60
  470. helm/proxy/clients/test_client.py +0 -49
  471. helm/proxy/clients/vertexai_client.py +0 -115
  472. helm/proxy/token_counters/ai21_token_counter.py +0 -20
  473. helm/proxy/token_counters/cohere_token_counter.py +0 -13
  474. helm/proxy/token_counters/free_token_counter.py +0 -12
  475. helm/proxy/token_counters/gooseai_token_counter.py +0 -24
  476. helm/proxy/token_counters/openai_token_counter.py +0 -22
  477. helm/proxy/token_counters/test_ai21_token_counter.py +0 -88
  478. helm/proxy/token_counters/test_openai_token_counter.py +0 -81
  479. helm/proxy/tokenizers/simple_tokenizer.py +0 -32
  480. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.1.dist-info}/LICENSE +0 -0
  481. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.1.dist-info}/entry_points.txt +0 -0
  482. {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.1.dist-info}/top_level.txt +0 -0
  483. /helm/{proxy/clients → benchmark/annotation}/__init__.py +0 -0
  484. /helm/{proxy/clients/vision_language → benchmark/annotation/image2structure}/__init__.py +0 -0
  485. /helm/{proxy/tokenizers → benchmark/metrics/image_generation}/__init__.py +0 -0
  486. /helm/{proxy/tokenizers/yalm_tokenizer_data → benchmark/metrics/image_generation/detectors}/__init__.py +0 -0
  487. /helm/{proxy/clients → clients}/ai21_utils.py +0 -0
  488. /helm/{proxy/clients → clients}/cohere_utils.py +0 -0
  489. /helm/{proxy/clients → clients}/lit_gpt_generate.py +0 -0
  490. /helm/{proxy/clients → clients}/toxicity_classifier_client.py +0 -0
  491. /helm/{proxy/tokenizers → tokenizers}/aleph_alpha_tokenizer.py +0 -0
  492. /helm/{proxy/tokenizers → tokenizers}/caching_tokenizer.py +0 -0
  493. /helm/{proxy/tokenizers → tokenizers}/ice_tokenizer.py +0 -0
  494. /helm/{proxy/tokenizers → tokenizers}/lit_gpt_tokenizer.py +0 -0
  495. /helm/{proxy/tokenizers → tokenizers}/test_ice_tokenizer.py +0 -0
  496. /helm/{proxy/tokenizers → tokenizers}/test_yalm_tokenizer.py +0 -0
  497. /helm/{proxy/tokenizers → tokenizers}/tiktoken_tokenizer.py +0 -0
  498. /helm/{proxy/tokenizers → tokenizers}/tokenizer.py +0 -0
  499. /helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/test_yalm_tokenizer.py +0 -0
@@ -0,0 +1,185 @@
1
+ import json
2
+ import os
3
+ from typing import List
4
+
5
+ from helm.common.general import ensure_file_downloaded
6
+ from .scenario import CORRECT_TAG, Reference, Scenario, Instance, Input, TEST_SPLIT, Output
7
+
8
+
9
+ class LMEntryScenario(Scenario):
10
+ """
11
+ The LMentry Benchmark
12
+ https://arxiv.org/pdf/2211.02069.pdf
13
+
14
+ The implementation is with reference to the original repo: https://github.com/aviaefrat/lmentry
15
+ The data is also downloaded from the repo.
16
+
17
+ LMentry evaluates LM's abilities of performing elementary language tasks. Examples include
18
+ finding which word is shorter, or which word is the last in a sentence.
19
+ """
20
+
21
+ name = "lm_entry"
22
+ description = "The LMentry benchmark for elementary language tasks"
23
+ tags: List[str] = []
24
+ url_template = "https://raw.githubusercontent.com/aviaefrat/lmentry/main/data/{subtask}.json"
25
+ task_to_subtasks = {
26
+ "all_words_from_category": [
27
+ "all_words_from_category",
28
+ "all_words_from_category_0_distractors",
29
+ "all_words_from_category_1_distractors",
30
+ "all_words_from_category_2_distractors",
31
+ ],
32
+ "any_words_from_category": [
33
+ "any_words_from_category",
34
+ "any_words_from_category_3_distractors",
35
+ "any_words_from_category_4_distractors",
36
+ "any_words_from_category_5_distractors",
37
+ ],
38
+ "bigger_number": ["bigger_number"],
39
+ # "ends_with_letter": ["ends_with_letter"], # HELM's metrics currently don't support this
40
+ # "ends_with_word": ["ends_with_word"], # HELM's metrics currently don't support this
41
+ "first_alphabetically": [
42
+ "first_alphabetically",
43
+ "first_alphabetically_consecutive_first_letter",
44
+ "first_alphabetically_different_first_letter",
45
+ "first_alphabetically_far_first_letter",
46
+ "first_alphabetically_same_first_letter",
47
+ ],
48
+ "first_letter": ["first_letter"],
49
+ "first_word": ["first_word"],
50
+ "homophones": ["homophones"],
51
+ "last_letter": ["last_letter"],
52
+ "last_word": ["last_word"],
53
+ "least_associated_word": ["least_associated_word"],
54
+ "less_letters": ["less_letters", "less_letters_length_diff_1", "less_letters_length_diff_3plus"],
55
+ "more_letters": ["more_letters", "more_letters_length_diff_1", "more_letters_length_diff_3plus"],
56
+ "most_associated_word": ["most_associated_word"],
57
+ "rhyming_word": [
58
+ "rhyming_word",
59
+ "rhyming_word_orthographically_different",
60
+ "rhyming_word_orthographically_similar",
61
+ ],
62
+ # "sentence_containing": ["sentence_containing"], # HELM's metrics currently don't support this
63
+ # "sentence_not_containing": ["sentence_not_containing"], # HELM's metrics currently don't support this
64
+ "smaller_number": ["smaller_number"],
65
+ # "starts_with_letter": ["starts_with_letter"], # HELM's metrics currently don't support this
66
+ # "starts_with_word": ["starts_with_word"], # HELM's metrics currently don't support this
67
+ "word_after": ["word_after"],
68
+ "word_before": ["word_before"],
69
+ # "word_containing": ["word_containing"], # HELM's metrics currently don't support this
70
+ # "word_not_containing": ["word_not_containing"], # HELM's metrics currently don't support this
71
+ }
72
+
73
+ def __init__(self, task: str):
74
+ super().__init__()
75
+ assert task in self.task_to_subtasks, f"Unsupported task: {task}"
76
+ self.task: str = task
77
+
78
+ def get_instances(self, output_path: str) -> List[Instance]:
79
+ # Download the raw data
80
+ data_paths: List[str] = []
81
+
82
+ for subtask in self.task_to_subtasks[self.task]:
83
+ data_path: str = os.path.join(output_path, f"{subtask}.json")
84
+ ensure_file_downloaded(
85
+ source_url=self.url_template.format(subtask=subtask),
86
+ target_path=data_path,
87
+ unpack=False,
88
+ )
89
+ data_paths.append(data_path)
90
+
91
+ def generate_references_for_multiple_choice_question(
92
+ options: List[str], correct_answer: str
93
+ ) -> List[Reference]:
94
+ references: List[Reference] = []
95
+
96
+ for option in options:
97
+ if option == correct_answer:
98
+ references.append(Reference(Output(text=option), tags=[CORRECT_TAG]))
99
+ else:
100
+ references.append(Reference(Output(text=option), tags=[]))
101
+
102
+ return references
103
+
104
+ def generate_references_for_generation_question(correct_answer: str) -> List[Reference]:
105
+ return generate_references_for_multiple_choice_question(
106
+ options=[correct_answer], correct_answer=correct_answer
107
+ )
108
+
109
+ instances: List[Instance] = []
110
+
111
+ for data_path in data_paths:
112
+ data: dict = json.load(open(data_path))
113
+
114
+ for example in data["examples"].values():
115
+ input_text: str = example["input"]
116
+
117
+ # Normalize the input text to the same format
118
+ if input_text.startswith("Q: "):
119
+ input_text = input_text[3:]
120
+
121
+ if input_text.endswith("\n"):
122
+ input_text = input_text[:-1]
123
+ elif input_text.endswith("\nA:"):
124
+ input_text = input_text[:-3]
125
+
126
+ input = Input(text=input_text)
127
+ references: List[Reference]
128
+
129
+ if self.task == "all_words_from_category":
130
+ correct_answer = "yes" if example["metadata"]["num_distractors"] == 0 else "no"
131
+ references = generate_references_for_multiple_choice_question(
132
+ options=["yes", "no"], correct_answer=correct_answer
133
+ )
134
+ elif self.task == "any_words_from_category":
135
+ correct_answer = "yes" if len(example["metadata"]["category_words"]) > 0 else "no"
136
+ references = generate_references_for_multiple_choice_question(
137
+ options=["yes", "no"], correct_answer=correct_answer
138
+ )
139
+ elif self.task == "bigger_number" or self.task == "smaller_number":
140
+ references = generate_references_for_multiple_choice_question(
141
+ options=[str(example["metadata"]["n1"]), str(example["metadata"]["n2"])],
142
+ correct_answer=str(example["metadata"]["answer"]),
143
+ )
144
+ elif self.task == "first_alphabetically":
145
+ references = generate_references_for_multiple_choice_question(
146
+ options=[example["metadata"]["word1"], example["metadata"]["word2"]],
147
+ correct_answer=example["metadata"]["answer"],
148
+ )
149
+ elif self.task == "first_letter" or self.task == "last_letter":
150
+ references = generate_references_for_generation_question(example["metadata"]["answer"])
151
+ elif self.task == "first_word" or self.task == "last_word":
152
+ references = generate_references_for_generation_question(example["metadata"]["answer"])
153
+ elif self.task == "homophones":
154
+ references = generate_references_for_multiple_choice_question(
155
+ options=[example["metadata"]["answer"], example["metadata"]["distractor"]],
156
+ correct_answer=example["metadata"]["answer"],
157
+ )
158
+ elif self.task == "least_associated_word" or self.task == "most_associated_word":
159
+ references = generate_references_for_multiple_choice_question(
160
+ options=[example["metadata"]["answer"]] + example["metadata"]["distractors"],
161
+ correct_answer=example["metadata"]["answer"],
162
+ )
163
+ elif self.task == "less_letters" or self.task == "more_letters":
164
+ references = generate_references_for_multiple_choice_question(
165
+ options=[example["metadata"]["word1"], example["metadata"]["word2"]],
166
+ correct_answer=example["metadata"]["answer"],
167
+ )
168
+ elif self.task == "rhyming_word":
169
+ references = generate_references_for_multiple_choice_question(
170
+ options=[example["metadata"]["answer"], example["metadata"]["distractor"]],
171
+ correct_answer=example["metadata"]["answer"],
172
+ )
173
+ elif self.task == "word_before" or self.task == "word_after":
174
+ references = generate_references_for_generation_question(example["metadata"]["answer"])
175
+ else:
176
+ raise ValueError(f"Unsupported task: {self.task}")
177
+
178
+ instance = Instance(
179
+ input=input,
180
+ references=references,
181
+ split=TEST_SPLIT,
182
+ )
183
+ instances.append(instance)
184
+
185
+ return instances
@@ -1,9 +1,20 @@
1
1
  import collections
2
+ import os
2
3
  import typing
3
4
  from typing import Dict, List, Optional
4
5
  from datasets import load_dataset, DatasetDict
5
6
 
6
- from .scenario import Scenario, Instance, Reference, TRAIN_SPLIT, TEST_SPLIT, CORRECT_TAG, Input, Output
7
+ from helm.common.general import ensure_directory_exists
8
+ from helm.benchmark.scenarios.scenario import (
9
+ Scenario,
10
+ Instance,
11
+ Reference,
12
+ TRAIN_SPLIT,
13
+ TEST_SPLIT,
14
+ CORRECT_TAG,
15
+ Input,
16
+ Output,
17
+ )
7
18
 
8
19
 
9
20
  def remove_boxed(string: str) -> Optional[str]:
@@ -354,7 +365,13 @@ class MATHScenario(Scenario):
354
365
 
355
366
  def get_instances(self, output_path: str) -> List[Instance]:
356
367
  dataset = {}
357
- data = typing.cast(DatasetDict, load_dataset("competition_math")).sort("problem").shuffle(seed=42)
368
+ cache_dir = os.path.join(output_path, "data")
369
+ ensure_directory_exists(cache_dir)
370
+ data = (
371
+ typing.cast(DatasetDict, load_dataset("competition_math", trust_remote_code=True, cache_dir=cache_dir))
372
+ .sort("problem")
373
+ .shuffle(seed=42)
374
+ )
358
375
 
359
376
  def group_by_key(dataset_list, key):
360
377
  dataset_per_key = collections.defaultdict(list)
@@ -0,0 +1,60 @@
1
+ import os
2
+ from typing import List
3
+
4
+ import pandas as pd
5
+
6
+ from helm.common.general import ensure_file_downloaded
7
+
8
+ from .scenario import CORRECT_TAG, TEST_SPLIT, Input, Instance, Output, Reference, Scenario
9
+
10
+
11
+ class MedicationQAScenario(Scenario):
12
+ """
13
+ The gold standard corpus for medication question answering introduced in the MedInfo 2019 paper
14
+ "Bridging the Gap between Consumers’ Medication Questions and Trusted Answers":
15
+ http://ebooks.iospress.nl/publication/51941
16
+
17
+ This dataset has consumer questions, as opposed to very clinical questions.
18
+
19
+ Paper citation:
20
+
21
+ @inproceedings{BenAbacha:MEDINFO19,
22
+ author = {Asma {Ben Abacha} and Yassine Mrabet and Mark Sharp and
23
+ Travis Goodwin and Sonya E. Shooshan and Dina Demner{-}Fushman},
24
+ title = {Bridging the Gap between Consumers’ Medication Questions and Trusted Answers},
25
+ booktitle = {MEDINFO 2019},
26
+ year = {2019},
27
+ }
28
+ """
29
+
30
+ SOURCE_REPO_URL = "https://github.com/abachaa/Medication_QA_MedInfo2019/raw/master/"
31
+ FILENAME = "MedInfo2019-QA-Medications.xlsx"
32
+
33
+ name = "medication_qa"
34
+ description = "MedInfo 2019 MedicationQA medication question answering task"
35
+ tags = ["knowledge", "generation", "question_answering", "biomedical"]
36
+
37
+ def download_medication_qa(self, path: str):
38
+ """download the .xlsx spreadsheet containing the question-answer pairs"""
39
+ ensure_file_downloaded(
40
+ source_url=os.path.join(self.SOURCE_REPO_URL, self.FILENAME),
41
+ target_path=os.path.join(path, self.FILENAME),
42
+ unpack=False,
43
+ )
44
+
45
+ def get_instances(self, output_path: str) -> List[Instance]:
46
+ self.download_medication_qa(output_path)
47
+ data_path = os.path.join(output_path, self.FILENAME)
48
+
49
+ data = pd.read_excel(data_path)
50
+ data = data[~data.Answer.isna()] # remove rows missing answers
51
+ instances = [
52
+ Instance(
53
+ input=Input(row.Question),
54
+ references=[Reference(Output(row.Answer), tags=[CORRECT_TAG])],
55
+ split=TEST_SPLIT,
56
+ )
57
+ for _, row in data.iterrows()
58
+ ]
59
+
60
+ return instances
@@ -358,7 +358,7 @@ def distance_paraboloid(point: List[int], rel_str: str, TOL: float = 1e-10):
358
358
  sols = []
359
359
  # Try each possible combined solution for x, y, z, λ
360
360
  for sol_xyz, val_λs in zip(sols_xyz, vals_λ):
361
- val_λs = list(set(filter(lambda _: not _.is_symbol, val_λs))) # get distinct values for λ if there are any
361
+ val_λs = tuple(set(filter(lambda _: not _.is_symbol, val_λs))) # get distinct values for λ if there are any
362
362
  if len(val_λs) > 1: # there can be at most one distinct value for λ
363
363
  continue
364
364
  val_λ = val_λs[0] if val_λs else λ
@@ -108,7 +108,6 @@ class OpinionsQAScenario(Scenario):
108
108
  self.context: str = context
109
109
 
110
110
  def download_data(self, output_path: str):
111
-
112
111
  data_dir: str = os.path.join(output_path, "data")
113
112
  if not os.path.exists(data_dir):
114
113
  os.makedirs(data_dir)
@@ -150,14 +149,12 @@ class OpinionsQAScenario(Scenario):
150
149
  bios_df = pd.read_csv(bios_path, sep="\t")
151
150
 
152
151
  for split in all_splits:
153
-
154
152
  csv_path: str = csv_dict[split]
155
153
  assert os.path.exists(csv_path)
156
154
 
157
155
  question_df = self.read_survey_questions(csv_path)
158
156
 
159
157
  for qidx, (question, answers) in enumerate(zip(question_df["question"], question_df["options"])):
160
-
161
158
  # Opinions QA test questions have no correct answer and thus we set it to be None by default
162
159
  # for all test instances.
163
160
  # In the case where context = steer-qa, we add demographic information in the form of a
@@ -182,7 +179,6 @@ class OpinionsQAScenario(Scenario):
182
179
  else:
183
180
  # context = "steer-bio"or "steer-portray"
184
181
  for bio in bios_df["question"].values:
185
-
186
182
  context = PassageQuestionInput(passage=bio, question=question + "\n")
187
183
  instance = Instance(
188
184
  context,
@@ -25,6 +25,10 @@ DEFAULT_TEST_SIZE: int = 1000
25
25
  """ Reference tags """
26
26
  CORRECT_TAG: str = "correct"
27
27
 
28
+ """ Asset tags (used for compiled outputs such as image2structure)"""
29
+ ASSET_NAME_TAG: str = "asset_name"
30
+ ASSET_PATH_TAG: str = "asset_path"
31
+
28
32
  # Reference tag functions for ranking scenarios.
29
33
  # @TODO: (For future) Should there be a base RankingScenario class?
30
34
 
@@ -1,7 +1,128 @@
1
+ """Simple scenarios for debugging and for tutorials.
2
+
3
+ NOTE: Typically, each scenario should be in its own file,
4
+ but these scenarios are placed in the same module for
5
+ tutorial purposes."""
6
+
1
7
  import random
2
8
  from typing import List
3
9
 
4
- from .scenario import Scenario, Instance, Reference, TRAIN_SPLIT, TEST_SPLIT, CORRECT_TAG, Input, Output
10
+ from helm.benchmark.scenarios.scenario import (
11
+ Scenario,
12
+ Instance,
13
+ Reference,
14
+ TRAIN_SPLIT,
15
+ TEST_SPLIT,
16
+ CORRECT_TAG,
17
+ Input,
18
+ Output,
19
+ )
20
+
21
+
22
+ class SimpleMCQAScenario(Scenario):
23
+ """Simple multiple-choice question answering scenario for tutorials and debugging.
24
+
25
+ The task is to answer questions about whether two-digit numbers are even or odd.
26
+
27
+ Example:
28
+
29
+ Answer the following questions with a single letter only.
30
+
31
+ Question: Is 24 even or odd?
32
+ A. Even
33
+ B. Odd
34
+ Answer: A"""
35
+
36
+ name = "simple_mcqa"
37
+ description = "Answer if two-digit numbers are even or odd."
38
+ tags = ["question answering"]
39
+
40
+ def get_instances(self, output_path: str) -> List[Instance]:
41
+ instances: List[Instance] = []
42
+ for i in range(10, 100):
43
+ # NOTE: For simplicity, the input text and reference output text
44
+ # is the same for all instances.
45
+ # However, for most question answering scenarios, the input text
46
+ # and reference output text can vary between questions.
47
+ input = Input(text=f"Is {i} even or odd?")
48
+ references = [
49
+ Reference(Output(text="Even"), tags=[CORRECT_TAG] if i % 2 == 0 else []),
50
+ Reference(Output(text="Odd"), tags=[CORRECT_TAG] if i % 2 == 1 else []),
51
+ ]
52
+ split = TRAIN_SPLIT if i <= 20 else TEST_SPLIT
53
+ instance = Instance(input=input, references=references, split=split)
54
+ instances.append(instance)
55
+ return instances
56
+
57
+
58
+ class SimpleShortAnswerQAScenario(Scenario):
59
+ """Simple short answer question answering scenario for tutorials and debugging.
60
+
61
+ The task is to answer questions about whether two-digit numbers are even or odd.
62
+
63
+ Example:
64
+
65
+ Answer the following questions with a single word only.
66
+
67
+ Question: Is 24 even or odd?
68
+ Answer: Even"""
69
+
70
+ name = "simple_mcqa"
71
+ description = "Answer if two-digit numbers are even or odd."
72
+ tags = ["question answering"]
73
+
74
+ def get_instances(self, output_path: str) -> List[Instance]:
75
+ instances: List[Instance] = []
76
+ for i in range(10, 100):
77
+ # NOTE: For simplicity, the input text and reference output text
78
+ # is the same for all instances.
79
+ # However, for most question answering scenarios, the input text
80
+ # and reference output text can vary between questions.
81
+ input = Input(text=f"Is {i} even or odd?")
82
+ correct_answer = "Even" if i % 2 == 0 else "Odd"
83
+ # NOTE: Unlike multiple-choice question answering, only the correct
84
+ # references are needed for short-answer question answering.
85
+ references = [
86
+ Reference(Output(text=correct_answer), tags=[CORRECT_TAG]),
87
+ ]
88
+ split = TRAIN_SPLIT if i <= 20 else TEST_SPLIT
89
+ instance = Instance(input=input, references=references, split=split)
90
+ instances.append(instance)
91
+ return instances
92
+
93
+
94
+ class SimpleClassificationScenario(Scenario):
95
+ """Simple multiple-choice question answering scenario for tutorials and debugging.
96
+
97
+ The task is to classify two-digit numbers as even or odd.
98
+
99
+ Example:
100
+
101
+ Classify the following numbers by their pairity. The classes are "Even" and "Odd".
102
+
103
+ Number: 24
104
+ Pairity: Even"""
105
+
106
+ name = "simple_classification"
107
+ description = "Classify numbers by pairity."
108
+ tags = ["classification"]
109
+
110
+ def get_instances(self, output_path: str) -> List[Instance]:
111
+ instances: List[Instance] = []
112
+ for i in range(10, 100):
113
+ input = Input(text=str(i))
114
+ # NOTE: For classification scenarios, the reference outputs should be the same
115
+ # for all instances, and should include both correct and incorrect classes.
116
+ # HELM only supports single-label classification. Exactly one reference
117
+ # should have the CORRECT_TAG tag.
118
+ references = [
119
+ Reference(Output(text="Even"), tags=[CORRECT_TAG] if i % 2 == 0 else []),
120
+ Reference(Output(text="Odd"), tags=[CORRECT_TAG] if i % 2 == 1 else []),
121
+ ]
122
+ split = TRAIN_SPLIT if i <= 20 else TEST_SPLIT
123
+ instance = Instance(input=input, references=references, split=split)
124
+ instances.append(instance)
125
+ return instances
5
126
 
6
127
 
7
128
  class Simple1Scenario(Scenario):
@@ -1,9 +1,15 @@
1
+ import pytest
1
2
  from tempfile import TemporaryDirectory
2
3
 
3
4
  from helm.benchmark.scenarios.math_scenario import MATHScenario
4
5
  from helm.benchmark.scenarios.scenario import Input, Output, Reference
5
6
 
6
7
 
8
+ # TODO: Fix the test for newer versions of diffusers: https://github.com/stanford-crfm/helm/issues/2168
9
+ @pytest.mark.skip(
10
+ reason="Incompatible with newer versions with diffusers>0.24.0. Fails with "
11
+ '"Loading a dataset cached in a LocalFileSystem is not supported"'
12
+ )
7
13
  def test_math_scenario_get_instances():
8
14
  math_scenario = MATHScenario(subject="number_theory", level="1")
9
15
  with TemporaryDirectory() as tmpdir:
@@ -1,10 +1,13 @@
1
- from helm.benchmark.run_specs import get_scenario_spec_tiny
2
- from helm.benchmark.scenarios.scenario import create_scenario, Scenario, Input, PassageQuestionInput
1
+ from helm.benchmark.scenarios.scenario import ScenarioSpec, create_scenario, Scenario, Input, PassageQuestionInput
3
2
 
4
3
 
5
4
  class TestScenario:
6
5
  def setup_method(self, method):
7
- self.scenario: Scenario = create_scenario(get_scenario_spec_tiny())
6
+ scenario_spec: ScenarioSpec = ScenarioSpec(
7
+ class_name="helm.benchmark.scenarios.simple_scenarios.Simple1Scenario",
8
+ args={"num_input_tokens": 5, "vocab_size": 20, "num_train_instances": 2, "num_test_instances": 2},
9
+ )
10
+ self.scenario: Scenario = create_scenario(scenario_spec)
8
11
 
9
12
  def test_render_lines(self):
10
13
  instances = self.scenario.get_instances(output_path="")
@@ -0,0 +1,50 @@
1
+ import pytest
2
+ from tempfile import TemporaryDirectory
3
+
4
+ from helm.benchmark.scenarios.simple_scenarios import (
5
+ SimpleMCQAScenario,
6
+ SimpleShortAnswerQAScenario,
7
+ SimpleClassificationScenario,
8
+ )
9
+ from helm.benchmark.scenarios.scenario import CORRECT_TAG, Input, Output, Reference
10
+
11
+
12
+ @pytest.mark.scenarios
13
+ def test_simple_mcqa_scenario():
14
+ scenario = SimpleMCQAScenario()
15
+ with TemporaryDirectory() as tmpdir:
16
+ instances = scenario.get_instances(tmpdir)
17
+ assert len(instances) == 90
18
+ assert instances[0].input == Input(text="Is 10 even or odd?")
19
+ assert instances[0].references == [
20
+ Reference(output=Output(text="Even"), tags=[CORRECT_TAG]),
21
+ Reference(output=Output(text="Odd"), tags=[]),
22
+ ]
23
+ assert instances[0].split == "train"
24
+
25
+
26
+ @pytest.mark.scenarios
27
+ def test_simple_short_answer_qa_scenario():
28
+ scenario = SimpleShortAnswerQAScenario()
29
+ with TemporaryDirectory() as tmpdir:
30
+ instances = scenario.get_instances(tmpdir)
31
+ assert len(instances) == 90
32
+ assert instances[0].input == Input(text="Is 10 even or odd?")
33
+ assert instances[0].references == [
34
+ Reference(output=Output(text="Even"), tags=[CORRECT_TAG]),
35
+ ]
36
+ assert instances[0].split == "train"
37
+
38
+
39
+ @pytest.mark.scenarios
40
+ def test_simple_classification_scenario():
41
+ scenario = SimpleClassificationScenario()
42
+ with TemporaryDirectory() as tmpdir:
43
+ instances = scenario.get_instances(tmpdir)
44
+ assert len(instances) == 90
45
+ assert instances[0].input == Input(text="10")
46
+ assert instances[0].references == [
47
+ Reference(output=Output(text="Even"), tags=[CORRECT_TAG]),
48
+ Reference(output=Output(text="Odd"), tags=[]),
49
+ ]
50
+ assert instances[0].split == "train"