crfm-helm 0.5.6__py3-none-any.whl → 0.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crfm-helm might be problematic. Click here for more details.
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.8.dist-info}/METADATA +60 -125
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.8.dist-info}/RECORD +293 -229
- helm/benchmark/adaptation/adapter_spec.py +5 -0
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +11 -3
- helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +11 -8
- helm/benchmark/annotation/aci_bench_annotator.py +11 -22
- helm/benchmark/annotation/air_bench_annotator.py +1 -1
- helm/benchmark/annotation/alrage_annotator.py +90 -0
- helm/benchmark/annotation/chw_care_plan_annotator.py +10 -21
- helm/benchmark/annotation/dischargeme_annotator.py +11 -22
- helm/benchmark/annotation/live_qa_annotator.py +1 -1
- helm/benchmark/annotation/med_dialog_annotator.py +11 -22
- helm/benchmark/annotation/medalign_annotator.py +11 -22
- helm/benchmark/annotation/medi_qa_annotator.py +11 -22
- helm/benchmark/annotation/medication_qa_annotator.py +11 -22
- helm/benchmark/annotation/mental_health_annotator.py +11 -22
- helm/benchmark/annotation/mimic_bhc_annotator.py +11 -22
- helm/benchmark/annotation/mimic_rrs_annotator.py +11 -22
- helm/benchmark/annotation/model_as_judge.py +23 -18
- helm/benchmark/annotation/mtsamples_procedures_annotator.py +11 -22
- helm/benchmark/annotation/mtsamples_replicate_annotator.py +11 -22
- helm/benchmark/annotation/starr_patient_instructions_annotator.py +11 -22
- helm/benchmark/metrics/air_bench_metrics.py +3157 -1
- helm/benchmark/metrics/alrage_metric.py +35 -0
- helm/benchmark/metrics/basic_metrics.py +267 -2
- helm/benchmark/metrics/classification_metrics.py +19 -1
- helm/benchmark/metrics/codeinsights_code_efficiency_metrics.py +186 -0
- helm/benchmark/metrics/codeinsights_code_evaluation_metrics.py +477 -0
- helm/benchmark/metrics/codeinsights_correct_code_metrics.py +366 -0
- helm/benchmark/metrics/codeinsights_edge_case_metrics.py +92 -0
- helm/benchmark/metrics/codeinsights_metric_specs.py +51 -0
- helm/benchmark/metrics/comet_metric.py +1 -1
- helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +12 -1
- helm/benchmark/metrics/copyright_metrics.py +1 -1
- helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +1 -1
- helm/benchmark/metrics/dry_run_metrics.py +30 -1
- helm/benchmark/metrics/efficiency_metrics.py +74 -0
- helm/benchmark/metrics/ehr_sql_metrics.py +57 -1
- helm/benchmark/metrics/evaluate_reference_metrics.py +300 -1
- helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +13 -1
- helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +13 -1
- helm/benchmark/metrics/ifeval_metrics.py +13 -1
- helm/benchmark/metrics/image_generation/clip_score_metrics.py +13 -2
- helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +1 -1
- helm/benchmark/metrics/instruction_following_critique_metrics.py +41 -1
- helm/benchmark/metrics/kpi_edgar_metrics.py +21 -0
- helm/benchmark/metrics/language_modeling_metrics.py +13 -1
- helm/benchmark/metrics/live_qa_metrics.py +13 -1
- helm/benchmark/metrics/llm_jury_metrics.py +13 -1
- helm/benchmark/metrics/lmkt_metric_specs.py +12 -0
- helm/benchmark/metrics/lmkt_metrics.py +47 -0
- helm/benchmark/metrics/medcalc_bench_metrics.py +14 -1
- helm/benchmark/metrics/medec_metrics.py +25 -2
- helm/benchmark/metrics/melt_toxicity_metric.py +1 -1
- helm/benchmark/metrics/metric.py +25 -0
- helm/benchmark/metrics/mimiciv_billing_code_metrics.py +32 -1
- helm/benchmark/metrics/omni_math_metrics.py +13 -1
- helm/benchmark/metrics/seahelm_metrics.py +14 -1
- helm/benchmark/metrics/summac/model_summac.py +3 -3
- helm/benchmark/metrics/summarization_metrics.py +129 -1
- helm/benchmark/metrics/toxicity_metrics.py +31 -1
- helm/benchmark/metrics/wildbench_metrics.py +21 -1
- helm/benchmark/model_deployment_registry.py +11 -19
- helm/benchmark/presentation/create_plots.py +11 -2
- helm/benchmark/presentation/schema.py +10 -22
- helm/benchmark/presentation/summarize.py +189 -14
- helm/benchmark/presentation/taxonomy_info.py +20 -0
- helm/benchmark/presentation/test_create_plots.py +4 -1
- helm/benchmark/run.py +7 -1
- helm/benchmark/run_expander.py +4 -0
- helm/benchmark/run_specs/arabic_run_specs.py +191 -0
- helm/benchmark/run_specs/bluex_run_specs.py +40 -0
- helm/benchmark/run_specs/classic_run_specs.py +2 -55
- helm/benchmark/run_specs/codeinsights_run_specs.py +192 -0
- helm/benchmark/run_specs/healthqa_br_run_specs.py +40 -0
- helm/benchmark/run_specs/heim_run_specs.py +3 -1
- helm/benchmark/run_specs/lmkt_run_specs.py +144 -0
- helm/benchmark/run_specs/long_context_run_specs.py +48 -1
- helm/benchmark/run_specs/medhelm/__init__.py +0 -0
- helm/benchmark/run_specs/medhelm/benchmark_config.py +219 -0
- helm/benchmark/run_specs/medhelm_run_specs.py +360 -50
- helm/benchmark/run_specs/multilingual_run_specs.py +50 -0
- helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +5 -11
- helm/benchmark/scenarios/aci_bench_scenario.py +23 -0
- helm/benchmark/scenarios/air_bench_scenario.py +21 -0
- helm/benchmark/scenarios/alghafa_scenario.py +126 -0
- helm/benchmark/scenarios/alrage_scenario.py +54 -0
- helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +23 -1
- helm/benchmark/scenarios/arabic_exams_scenario.py +114 -0
- helm/benchmark/scenarios/arabic_mmlu_scenario.py +82 -0
- helm/benchmark/scenarios/aratrust_scenario.py +95 -0
- helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +1 -1
- helm/benchmark/scenarios/audio_language/mustard_scenario.py +1 -1
- helm/benchmark/scenarios/audio_language/{ultra_suite_asr_classification.py → ultra_suite_asr_classification_scenario.py} +9 -8
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_transcription_scenario.py +99 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +13 -5
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +13 -5
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +13 -5
- helm/benchmark/scenarios/babi_qa_scenario.py +15 -0
- helm/benchmark/scenarios/bbq_scenario.py +15 -0
- helm/benchmark/scenarios/best_chatgpt_prompts.yaml +473 -0
- helm/benchmark/scenarios/bluex_scenario.py +70 -0
- helm/benchmark/scenarios/bold_scenario.py +15 -0
- helm/benchmark/scenarios/boolq_scenario.py +20 -0
- helm/benchmark/scenarios/chw_care_plan_scenario.py +23 -0
- helm/benchmark/scenarios/civil_comments_scenario.py +13 -0
- helm/benchmark/scenarios/clear_scenario.py +23 -0
- helm/benchmark/scenarios/cleva_scenario.py +480 -1
- helm/benchmark/scenarios/code_scenario.py +28 -0
- helm/benchmark/scenarios/codeinsights_code_efficiency_scenario.py +197 -0
- helm/benchmark/scenarios/codeinsights_correct_code_scenario.py +78 -0
- helm/benchmark/scenarios/codeinsights_edge_case_scenario.py +192 -0
- helm/benchmark/scenarios/codeinsights_student_coding_scenario.py +162 -0
- helm/benchmark/scenarios/codeinsights_student_mistake_scenario.py +188 -0
- helm/benchmark/scenarios/commonsense_scenario.py +26 -0
- helm/benchmark/scenarios/compositional_instructions.yaml +70 -0
- helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +21 -0
- helm/benchmark/scenarios/copyright_scenario.py +35 -1
- helm/benchmark/scenarios/cti_to_mitre_scenario.py +21 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +22 -1
- helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +23 -1
- helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +22 -1
- helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +21 -1
- helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +13 -0
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +13 -1
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +13 -1
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +13 -1
- helm/benchmark/scenarios/dischargeme_scenario.py +24 -0
- helm/benchmark/scenarios/disinformation_scenario.py +22 -0
- helm/benchmark/scenarios/dyck_language_scenario.py +15 -0
- helm/benchmark/scenarios/ehrshot_scenario.py +22 -0
- helm/benchmark/scenarios/enem_challenge_scenario.py +19 -0
- helm/benchmark/scenarios/entity_data_imputation_scenario.py +14 -0
- helm/benchmark/scenarios/entity_matching_scenario.py +14 -0
- helm/benchmark/scenarios/exams_multilingual_scenario.py +115 -0
- helm/benchmark/scenarios/financial_phrasebank_scenario.py +21 -0
- helm/benchmark/scenarios/gold_commodity_news_scenario.py +21 -0
- helm/benchmark/scenarios/gpqa_scenario.py +18 -0
- helm/benchmark/scenarios/grammar_scenario.py +20 -1
- helm/benchmark/scenarios/gsm_scenario.py +15 -0
- helm/benchmark/scenarios/headqa_scenario.py +22 -0
- helm/benchmark/scenarios/healthqa_br_scenario.py +80 -0
- helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +13 -0
- helm/benchmark/scenarios/ice_scenario.py +21 -1
- helm/benchmark/scenarios/ifeval_scenario.py +18 -0
- helm/benchmark/scenarios/imdb_scenario.py +15 -0
- helm/benchmark/scenarios/infinite_bench_en_mc_scenario.py +90 -0
- helm/benchmark/scenarios/infinite_bench_en_qa_scenario.py +1 -1
- helm/benchmark/scenarios/koala_scenario.py +21 -1
- helm/benchmark/scenarios/kpi_edgar_scenario.py +21 -0
- helm/benchmark/scenarios/legal_contract_summarization_scenario.py +20 -0
- helm/benchmark/scenarios/legal_summarization_scenario.py +50 -0
- helm/benchmark/scenarios/legal_support_scenario.py +13 -0
- helm/benchmark/scenarios/legalbench_scenario.py +20 -0
- helm/benchmark/scenarios/lex_glue_scenario.py +11 -0
- helm/benchmark/scenarios/lextreme_scenario.py +11 -0
- helm/benchmark/scenarios/lmkt_scenarios.py +288 -0
- helm/benchmark/scenarios/lsat_qa_scenario.py +14 -0
- helm/benchmark/scenarios/madinah_qa_scenario.py +73 -0
- helm/benchmark/scenarios/math_scenario.py +47 -20
- helm/benchmark/scenarios/mbzuai_human_translated_arabic_mmlu.py +68 -0
- helm/benchmark/scenarios/med_dialog_scenario.py +32 -1
- helm/benchmark/scenarios/med_mcqa_scenario.py +14 -0
- helm/benchmark/scenarios/med_qa_scenario.py +14 -0
- helm/benchmark/scenarios/medalign_scenario.py +23 -0
- helm/benchmark/scenarios/medalign_scenario_helper.py +19 -125
- helm/benchmark/scenarios/medbullets_scenario.py +22 -0
- helm/benchmark/scenarios/medcalc_bench_scenario.py +22 -0
- helm/benchmark/scenarios/medec_scenario.py +23 -0
- helm/benchmark/scenarios/medhallu_scenario.py +23 -0
- helm/benchmark/scenarios/medhelm/__init__.py +0 -0
- helm/benchmark/scenarios/medhelm/judges.yaml +14 -0
- helm/benchmark/scenarios/medhelm_configurable_scenario.py +101 -0
- helm/benchmark/scenarios/medi_qa_scenario.py +23 -0
- helm/benchmark/scenarios/medication_qa_scenario.py +31 -1
- helm/benchmark/scenarios/melt_scenarios.py +2 -2
- helm/benchmark/scenarios/mental_health_scenario.py +23 -0
- helm/benchmark/scenarios/mimic_bhc_scenario.py +25 -1
- helm/benchmark/scenarios/mimic_rrs_scenario.py +23 -0
- helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +22 -0
- helm/benchmark/scenarios/mmlu_pro_scenario.py +18 -0
- helm/benchmark/scenarios/mmlu_scenario.py +15 -0
- helm/benchmark/scenarios/mmmlu_scenario.py +85 -0
- helm/benchmark/scenarios/msmarco_scenario.py +30 -0
- helm/benchmark/scenarios/mtsamples_procedures_scenario.py +22 -0
- helm/benchmark/scenarios/mtsamples_replicate_scenario.py +22 -0
- helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +20 -0
- helm/benchmark/scenarios/narrativeqa_scenario.py +20 -0
- helm/benchmark/scenarios/natural_qa_scenario.py +32 -0
- helm/benchmark/scenarios/omni_math_scenario.py +18 -0
- helm/benchmark/scenarios/open_assistant_scenario.py +22 -0
- helm/benchmark/scenarios/pubmed_qa_scenario.py +22 -0
- helm/benchmark/scenarios/quac_scenario.py +14 -0
- helm/benchmark/scenarios/race_based_med_scenario.py +23 -0
- helm/benchmark/scenarios/raft_scenario.py +15 -0
- helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +14 -1
- helm/benchmark/scenarios/scenario.py +31 -0
- helm/benchmark/scenarios/seahelm_scenario.py +350 -2
- helm/benchmark/scenarios/self_instruct_scenario.py +29 -1
- helm/benchmark/scenarios/shc_bmt_scenario.py +22 -0
- helm/benchmark/scenarios/shc_cdi_scenario.py +20 -0
- helm/benchmark/scenarios/shc_conf_scenario.py +23 -0
- helm/benchmark/scenarios/shc_ent_scenario.py +21 -0
- helm/benchmark/scenarios/shc_gip_scenario.py +20 -0
- helm/benchmark/scenarios/shc_privacy_scenario.py +22 -0
- helm/benchmark/scenarios/shc_proxy_scenario.py +22 -0
- helm/benchmark/scenarios/shc_ptbm_scenario.py +23 -0
- helm/benchmark/scenarios/shc_sequoia_scenario.py +21 -0
- helm/benchmark/scenarios/situation_prompts.yaml +49 -0
- helm/benchmark/scenarios/starr_patient_instructions_scenario.py +22 -0
- helm/benchmark/scenarios/summarization_scenario.py +37 -0
- helm/benchmark/scenarios/synthetic_efficiency_scenario.py +22 -1
- helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +13 -0
- helm/benchmark/scenarios/test_alghafa_scenario.py +29 -0
- helm/benchmark/scenarios/test_alrage_scenario.py +23 -0
- helm/benchmark/scenarios/test_arabic_exams_scenario.py +21 -0
- helm/benchmark/scenarios/test_aratrust_scenario.py +21 -0
- helm/benchmark/scenarios/test_bluex_scenario.py +59 -0
- helm/benchmark/scenarios/test_exams_multilingual_scenario.py +29 -0
- helm/benchmark/scenarios/test_healtha_br_scenario.py +57 -0
- helm/benchmark/scenarios/the_pile_scenario.py +13 -1
- helm/benchmark/scenarios/truthful_qa_scenario.py +14 -0
- helm/benchmark/scenarios/twitter_aae_scenario.py +20 -1
- helm/benchmark/scenarios/vicuna_scenario.py +21 -1
- helm/benchmark/scenarios/wikifact_scenario.py +20 -0
- helm/benchmark/scenarios/wildbench_scenario.py +18 -0
- helm/benchmark/scenarios/wmt_14_scenario.py +12 -0
- helm/benchmark/slurm_jobs.py +1 -2
- helm/benchmark/slurm_runner.py +8 -1
- helm/benchmark/static/schema_arabic.yaml +271 -0
- helm/benchmark/static/schema_classic.yaml +0 -17
- helm/benchmark/static/schema_long_context.yaml +24 -6
- helm/benchmark/static/schema_medhelm.yaml +36 -0
- helm/benchmark/static/schema_slp.yaml +219 -0
- helm/benchmark/static_build/assets/index-671a5e06.js +10 -0
- helm/benchmark/static_build/assets/index-9352595e.css +1 -0
- helm/benchmark/static_build/index.html +2 -2
- helm/benchmark/window_services/image_generation/clip_window_service.py +1 -3
- helm/clients/audio_language/llama_omni/arguments.py +61 -0
- helm/clients/audio_language/llama_omni/constants.py +9 -0
- helm/clients/audio_language/llama_omni/conversation.py +213 -0
- helm/clients/audio_language/llama_omni/model/__init__.py +0 -0
- helm/clients/audio_language/llama_omni/model/builder.py +88 -0
- helm/clients/audio_language/llama_omni/model/language_model/omni_speech2s_llama.py +190 -0
- helm/clients/audio_language/llama_omni/model/language_model/omni_speech_llama.py +118 -0
- helm/clients/audio_language/llama_omni/model/omni_speech_arch.py +249 -0
- helm/clients/audio_language/llama_omni/model/speech_encoder/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_encoder/speech_encoder.py +27 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/generation.py +622 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/speech_generator.py +104 -0
- helm/clients/audio_language/llama_omni/model/speech_projector/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_projector/speech_projector.py +27 -0
- helm/clients/audio_language/llama_omni/preprocess.py +295 -0
- helm/clients/audio_language/llama_omni/utils.py +202 -0
- helm/clients/audio_language/qwen2_5_omni_client.py +19 -7
- helm/clients/audio_language/qwen_omni/configuration_qwen2_5_omni.py +519 -0
- helm/clients/audio_language/qwen_omni/modeling_qwen2_5_omni.py +4308 -0
- helm/clients/audio_language/qwen_omni/processing_qwen2_5_omni.py +270 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/__init__.py +0 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/__init__.py +8 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/audio_process.py +56 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/vision_process.py +380 -0
- helm/clients/huggingface_client.py +2 -2
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +1 -1
- helm/clients/image_generation/mindalle/models/stage1/layers.py +2 -2
- helm/clients/openai_client.py +33 -20
- helm/clients/openai_responses_client.py +34 -8
- helm/clients/openrouter_client.py +31 -0
- helm/clients/test_huggingface_client.py +3 -3
- helm/clients/test_openrouter_client.py +69 -0
- helm/clients/together_client.py +48 -13
- helm/clients/vertexai_client.py +19 -11
- helm/clients/vllm_client.py +43 -7
- helm/clients/vllm_granite_thinking_client.py +56 -0
- helm/common/critique_request.py +0 -1
- helm/common/hierarchical_logger.py +83 -34
- helm/common/object_spec.py +23 -8
- helm/common/test_logging.py +94 -0
- helm/config/model_deployments.yaml +525 -172
- helm/config/model_metadata.yaml +185 -10
- helm/config/tokenizer_configs.yaml +100 -2
- helm/proxy/cli.py +1 -1
- helm/proxy/example_queries.py +8 -8
- helm/proxy/retry.py +5 -0
- helm/proxy/server.py +2 -1
- helm/proxy/static/index.css +4 -0
- helm/proxy/static/index.js +7 -1
- helm/tokenizers/grok_tokenizer.py +2 -0
- helm/benchmark/metrics/aci_bench_metrics.py +0 -14
- helm/benchmark/metrics/chw_care_plan_metrics.py +0 -14
- helm/benchmark/metrics/dischargeme_metrics.py +0 -14
- helm/benchmark/metrics/med_dialog_metrics.py +0 -14
- helm/benchmark/metrics/medalign_metrics.py +0 -14
- helm/benchmark/metrics/medi_qa_metrics.py +0 -14
- helm/benchmark/metrics/medication_qa_metrics.py +0 -14
- helm/benchmark/metrics/mental_health_metrics.py +0 -14
- helm/benchmark/metrics/mimic_bhc_metrics.py +0 -14
- helm/benchmark/metrics/mimic_rrs_metrics.py +0 -14
- helm/benchmark/metrics/mtsamples_procedures_metrics.py +0 -14
- helm/benchmark/metrics/mtsamples_replicate_metrics.py +0 -14
- helm/benchmark/metrics/numeracy_metrics.py +0 -72
- helm/benchmark/metrics/starr_patient_instructions_metrics.py +0 -14
- helm/benchmark/metrics/test_numeracy_metrics.py +0 -95
- helm/benchmark/scenarios/numeracy_scenario.py +0 -794
- helm/benchmark/static_build/assets/index-94295e78.js +0 -10
- helm/benchmark/static_build/assets/index-b9779128.css +0 -1
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.8.dist-info}/WHEEL +0 -0
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.8.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.8.dist-info}/licenses/LICENSE +0 -0
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.8.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,7 @@
|
|
|
1
|
-
#
|
|
1
|
+
# type: ignore
|
|
2
|
+
# flake8: noqa
|
|
3
|
+
# fmt: off
|
|
4
|
+
|
|
2
5
|
import argparse
|
|
3
6
|
from collections import defaultdict
|
|
4
7
|
from dataclasses import dataclass
|
|
@@ -637,8 +640,14 @@ def main():
|
|
|
637
640
|
default="png",
|
|
638
641
|
choices=["png", "pdf"],
|
|
639
642
|
)
|
|
643
|
+
parser.add_argument(
|
|
644
|
+
"--log-config",
|
|
645
|
+
type=str,
|
|
646
|
+
default=None,
|
|
647
|
+
help="PATH to a YAML file to customize logging",
|
|
648
|
+
)
|
|
640
649
|
args = parser.parse_args()
|
|
641
|
-
setup_default_logging()
|
|
650
|
+
setup_default_logging(args.log_config)
|
|
642
651
|
create_plots(args)
|
|
643
652
|
|
|
644
653
|
|
|
@@ -8,6 +8,7 @@ import mako.template
|
|
|
8
8
|
import yaml
|
|
9
9
|
import importlib_resources as resources
|
|
10
10
|
|
|
11
|
+
from helm.benchmark.presentation.taxonomy_info import TaxonomyInfo
|
|
11
12
|
from helm.common.general import hlog
|
|
12
13
|
from helm.benchmark.metrics.metric_name import MetricName
|
|
13
14
|
from helm.benchmark.augmentations.perturbation_description import PERTURBATION_WORST
|
|
@@ -131,24 +132,6 @@ THIS_GROUP_ONLY = "this_group_only"
|
|
|
131
132
|
NO_GROUPS = "no_groups"
|
|
132
133
|
|
|
133
134
|
|
|
134
|
-
@dataclass(frozen=True)
|
|
135
|
-
class TaxonomyInfo:
|
|
136
|
-
# Task (e.g., question answering)
|
|
137
|
-
task: Optional[str] = None
|
|
138
|
-
|
|
139
|
-
# Domain - genre (e.g., Wikipedia)
|
|
140
|
-
what: Optional[str] = None
|
|
141
|
-
|
|
142
|
-
# Domain - when it was written (e.g., 2010s)
|
|
143
|
-
when: Optional[str] = None
|
|
144
|
-
|
|
145
|
-
# Domain - demographics (e.g., web users)
|
|
146
|
-
who: Optional[str] = None
|
|
147
|
-
|
|
148
|
-
# Language (e.g., English)
|
|
149
|
-
language: Optional[str] = None
|
|
150
|
-
|
|
151
|
-
|
|
152
135
|
@dataclass(frozen=True)
|
|
153
136
|
class RunGroup(Field):
|
|
154
137
|
"""
|
|
@@ -205,22 +188,27 @@ class RunGroup(Field):
|
|
|
205
188
|
# TODO: remove when we don't want helm-summarize to support runs before November 2023 anymore.
|
|
206
189
|
adapter_keys_shown: List[str] = field(default_factory=lambda: ["model_deployment", "model"])
|
|
207
190
|
|
|
191
|
+
# Optional short description of the run group.
|
|
192
|
+
# This description is used in some space-constrained places in frontend tables.
|
|
193
|
+
# If unset, the description field will be used instead.
|
|
194
|
+
short_description: Optional[str] = None
|
|
195
|
+
|
|
208
196
|
|
|
209
197
|
@dataclass
|
|
210
198
|
class Schema:
|
|
211
199
|
"""Specifies information about what to display on the frontend."""
|
|
212
200
|
|
|
213
201
|
# Information about each field
|
|
214
|
-
metrics: List[Field]
|
|
202
|
+
metrics: List[Field] = field(default_factory=list)
|
|
215
203
|
|
|
216
204
|
# Information about each perturbation
|
|
217
|
-
perturbations: List[Field]
|
|
205
|
+
perturbations: List[Field] = field(default_factory=list)
|
|
218
206
|
|
|
219
207
|
# Group the metrics
|
|
220
|
-
metric_groups: List[MetricGroup]
|
|
208
|
+
metric_groups: List[MetricGroup] = field(default_factory=list)
|
|
221
209
|
|
|
222
210
|
# Group the scenarios
|
|
223
|
-
run_groups: List[RunGroup]
|
|
211
|
+
run_groups: List[RunGroup] = field(default_factory=list)
|
|
224
212
|
|
|
225
213
|
# Adapter fields (e.g., temperature)
|
|
226
214
|
# Automatically populated from the docstrings in the AdapterSpec class definition.
|
|
@@ -9,6 +9,7 @@ Usage:
|
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
11
|
import argparse
|
|
12
|
+
import dataclasses
|
|
12
13
|
import os
|
|
13
14
|
import datetime
|
|
14
15
|
import urllib.parse
|
|
@@ -31,18 +32,26 @@ from helm.common.general import (
|
|
|
31
32
|
)
|
|
32
33
|
from helm.common.codec import from_json
|
|
33
34
|
from helm.common.hierarchical_logger import hlog, htrack, htrack_block, hwarn, setup_default_logging
|
|
34
|
-
from helm.benchmark.scenarios.scenario import ScenarioSpec
|
|
35
|
+
from helm.benchmark.scenarios.scenario import Scenario, ScenarioMetadata, ScenarioSpec, create_scenario
|
|
35
36
|
from helm.benchmark.adaptation.adapter_spec import AdapterSpec
|
|
36
37
|
from helm.benchmark.metrics.metric_name import MetricName
|
|
37
|
-
from helm.benchmark.metrics.metric import
|
|
38
|
+
from helm.benchmark.metrics.metric import (
|
|
39
|
+
MetricInterface,
|
|
40
|
+
MetricMetadata,
|
|
41
|
+
MetricSpec,
|
|
42
|
+
create_metric,
|
|
43
|
+
get_all_stats_by_name,
|
|
44
|
+
)
|
|
38
45
|
from helm.benchmark.metrics.statistic import Stat, merge_stat
|
|
39
46
|
from helm.benchmark.run_spec import RunSpec
|
|
40
47
|
from helm.benchmark.runner import LATEST_SYMLINK
|
|
41
48
|
from helm.benchmark.presentation.table import Cell, HeaderCell, Table, Hyperlink, table_to_latex
|
|
42
49
|
from helm.benchmark.presentation.schema import (
|
|
50
|
+
MetricGroup,
|
|
43
51
|
MetricNameMatcher,
|
|
44
52
|
RunGroup,
|
|
45
53
|
Field,
|
|
54
|
+
Schema,
|
|
46
55
|
read_schema,
|
|
47
56
|
get_default_schema_path,
|
|
48
57
|
BY_GROUP,
|
|
@@ -294,7 +303,6 @@ def compute_aggregate_row_means(table: Table) -> List[Optional[float]]:
|
|
|
294
303
|
|
|
295
304
|
|
|
296
305
|
class AggregationStrategy:
|
|
297
|
-
# TODO: Convert to StrEnum after upgrading to Python 3.11
|
|
298
306
|
WIN_RATE = "win_rate"
|
|
299
307
|
MEAN = "mean"
|
|
300
308
|
|
|
@@ -342,7 +350,7 @@ class Summarizer:
|
|
|
342
350
|
release: Optional[str],
|
|
343
351
|
suites: Optional[List[str]],
|
|
344
352
|
suite: Optional[str],
|
|
345
|
-
schema_path: str,
|
|
353
|
+
schema_path: Optional[str],
|
|
346
354
|
output_path: str,
|
|
347
355
|
verbose: bool,
|
|
348
356
|
num_threads: int,
|
|
@@ -377,10 +385,8 @@ class Summarizer:
|
|
|
377
385
|
self.verbose: bool = verbose
|
|
378
386
|
self.num_threads: int = num_threads
|
|
379
387
|
self.allow_unknown_models: bool = allow_unknown_models
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
self.schema = read_schema(schema_path)
|
|
388
|
+
self.schema = read_schema(schema_path) if schema_path else Schema()
|
|
389
|
+
self.metric_metadata: List[MetricMetadata] = []
|
|
384
390
|
|
|
385
391
|
def read_run(self, run_path: str) -> Run:
|
|
386
392
|
"""Load the `Run` object from `run_path`."""
|
|
@@ -427,6 +433,8 @@ class Summarizer:
|
|
|
427
433
|
|
|
428
434
|
def read_runs_for_suite(self, suite, run_suite_path):
|
|
429
435
|
"""Load the runs in the run suite path."""
|
|
436
|
+
if not os.path.exists(run_suite_path):
|
|
437
|
+
raise Exception(f"Suite {suite} does not exist at {run_suite_path}")
|
|
430
438
|
# run_suite_path can contain subdirectories that are not runs (e.g. eval_cache, groups)
|
|
431
439
|
# so filter them out.
|
|
432
440
|
run_dir_names = sorted(
|
|
@@ -510,6 +518,150 @@ class Summarizer:
|
|
|
510
518
|
model_field_dicts.append(asdict_without_nones(model_field))
|
|
511
519
|
return model_field_dicts
|
|
512
520
|
|
|
521
|
+
def get_metric_metadata(self) -> List[MetricMetadata]:
|
|
522
|
+
if self.metric_metadata:
|
|
523
|
+
return self.metric_metadata
|
|
524
|
+
metric_specs: List[MetricSpec] = []
|
|
525
|
+
for run in self.runs:
|
|
526
|
+
metric_specs.extend(run.run_spec.metric_specs)
|
|
527
|
+
metric_specs = list(set(metric_specs))
|
|
528
|
+
metric_name_to_metadata: Dict[str, MetricMetadata] = {}
|
|
529
|
+
for metric_spec in metric_specs:
|
|
530
|
+
try:
|
|
531
|
+
metric: MetricInterface = create_metric(metric_spec)
|
|
532
|
+
metric_metadata_list = metric.get_metadata()
|
|
533
|
+
for metric_metadata in metric_metadata_list:
|
|
534
|
+
metric_name_to_metadata[metric_metadata.name] = metric_metadata
|
|
535
|
+
except NotImplementedError:
|
|
536
|
+
pass
|
|
537
|
+
except (ModuleNotFoundError, AttributeError, TypeError):
|
|
538
|
+
pass
|
|
539
|
+
|
|
540
|
+
run_stat_names: Set[str] = set()
|
|
541
|
+
for run in self.runs:
|
|
542
|
+
for stat in run.stats:
|
|
543
|
+
run_stat_names.add(stat.name.name)
|
|
544
|
+
|
|
545
|
+
metric_names_to_prune = set(metric_name_to_metadata.keys()) - run_stat_names
|
|
546
|
+
for metric_name_to_prune in metric_names_to_prune:
|
|
547
|
+
del metric_name_to_metadata[metric_name_to_prune]
|
|
548
|
+
self.metric_metadata = list(metric_name_to_metadata.values())
|
|
549
|
+
return self.metric_metadata
|
|
550
|
+
|
|
551
|
+
def metric_metadata_to_field(self, metric_metadata: MetricMetadata) -> Field:
|
|
552
|
+
return Field(
|
|
553
|
+
name=metric_metadata.name,
|
|
554
|
+
display_name=metric_metadata.display_name,
|
|
555
|
+
short_display_name=metric_metadata.short_display_name,
|
|
556
|
+
description=metric_metadata.description,
|
|
557
|
+
lower_is_better=metric_metadata.lower_is_better,
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
def auto_generate_metric_fields(self) -> List[Field]:
|
|
561
|
+
return [self.metric_metadata_to_field(metric_metadata) for metric_metadata in self.get_metric_metadata()]
|
|
562
|
+
|
|
563
|
+
def auto_generate_metric_groups(self) -> List[MetricGroup]:
|
|
564
|
+
metric_groups = [
|
|
565
|
+
MetricGroup(
|
|
566
|
+
name="main_metric",
|
|
567
|
+
display_name="Main Metric",
|
|
568
|
+
description="Main Metric",
|
|
569
|
+
metrics=[MetricNameMatcher(name="${main_name}", split="${main_split}")],
|
|
570
|
+
)
|
|
571
|
+
]
|
|
572
|
+
metric_group_to_metrics: Dict[str, List[str]] = {}
|
|
573
|
+
for metric_metadata in self.metric_metadata:
|
|
574
|
+
if metric_metadata.group:
|
|
575
|
+
if metric_metadata.group not in metric_group_to_metrics:
|
|
576
|
+
metric_group_to_metrics[metric_metadata.group] = []
|
|
577
|
+
metric_group_to_metrics[metric_metadata.group].append(metric_metadata.name)
|
|
578
|
+
for metric_group, metric_names in metric_group_to_metrics.items():
|
|
579
|
+
display_name = metric_group.replace("_", " ").capitalize()
|
|
580
|
+
metric_groups.append(
|
|
581
|
+
MetricGroup(
|
|
582
|
+
name=metric_group,
|
|
583
|
+
# TODO: Make display_name and description nicer
|
|
584
|
+
display_name=display_name,
|
|
585
|
+
description=display_name,
|
|
586
|
+
aggregation_strategies=[],
|
|
587
|
+
metrics=[
|
|
588
|
+
MetricNameMatcher(name=metric_name, split="${main_split}") for metric_name in metric_names
|
|
589
|
+
],
|
|
590
|
+
)
|
|
591
|
+
)
|
|
592
|
+
return metric_groups
|
|
593
|
+
|
|
594
|
+
def get_scenario_metadata(self) -> List[ScenarioMetadata]:
|
|
595
|
+
scenario_specs = [run.run_spec.scenario_spec for run in self.runs]
|
|
596
|
+
scenario_specs = list(set(scenario_specs))
|
|
597
|
+
scenario_name_to_metadata: Dict[str, ScenarioMetadata] = {}
|
|
598
|
+
for scenario_spec in scenario_specs:
|
|
599
|
+
try:
|
|
600
|
+
scenario: Scenario = create_scenario(scenario_spec)
|
|
601
|
+
scenario_metadata = scenario.get_metadata()
|
|
602
|
+
scenario_name_to_metadata[scenario_metadata.name] = scenario_metadata
|
|
603
|
+
except NotImplementedError:
|
|
604
|
+
pass
|
|
605
|
+
except (ModuleNotFoundError, AttributeError, TypeError):
|
|
606
|
+
pass
|
|
607
|
+
|
|
608
|
+
run_groups: Set[str] = set()
|
|
609
|
+
for run in self.runs:
|
|
610
|
+
for run_group in run.run_spec.groups:
|
|
611
|
+
run_groups.add(run_group)
|
|
612
|
+
|
|
613
|
+
scenario_names_to_prune = set(scenario_name_to_metadata.keys()) - run_groups
|
|
614
|
+
for scenario_name_to_prune in scenario_names_to_prune:
|
|
615
|
+
del scenario_name_to_metadata[scenario_name_to_prune]
|
|
616
|
+
return list(scenario_name_to_metadata.values())
|
|
617
|
+
|
|
618
|
+
def scenario_metadata_to_run_group(self, scenario_metadata: ScenarioMetadata) -> RunGroup:
|
|
619
|
+
metric_group_names = [metric_group.name for metric_group in self.schema.metric_groups]
|
|
620
|
+
return RunGroup(
|
|
621
|
+
name=scenario_metadata.name,
|
|
622
|
+
display_name=scenario_metadata.display_name,
|
|
623
|
+
short_display_name=scenario_metadata.short_display_name,
|
|
624
|
+
description=scenario_metadata.description,
|
|
625
|
+
metric_groups=metric_group_names,
|
|
626
|
+
environment={
|
|
627
|
+
"main_name": scenario_metadata.main_metric,
|
|
628
|
+
"main_split": scenario_metadata.main_split,
|
|
629
|
+
},
|
|
630
|
+
taxonomy=scenario_metadata.taxonomy,
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
def auto_generate_all_scenarios_run_group(self) -> RunGroup:
|
|
634
|
+
return RunGroup(
|
|
635
|
+
name="all_scenarios",
|
|
636
|
+
display_name="All Scenarios",
|
|
637
|
+
description="All scenarios",
|
|
638
|
+
category="Scenario Groups",
|
|
639
|
+
subgroups=[run_group.name for run_group in self.schema.run_groups if len(run_group.subgroups) == 0],
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
def auto_generate_scenario_run_groups(self) -> List[RunGroup]:
|
|
643
|
+
return [
|
|
644
|
+
self.scenario_metadata_to_run_group(scenario_metadata) for scenario_metadata in self.get_scenario_metadata()
|
|
645
|
+
]
|
|
646
|
+
|
|
647
|
+
def fix_up_schema(self) -> None:
|
|
648
|
+
# if not self.schema.run_groups:
|
|
649
|
+
if not self.schema.metrics:
|
|
650
|
+
self.schema = dataclasses.replace(self.schema, metrics=self.auto_generate_metric_fields())
|
|
651
|
+
# Can only auto-generate metric groups if metrics were also auto-generated
|
|
652
|
+
# because auto_generate_metric_groups() requires self.metric_metadata()
|
|
653
|
+
# which is populated by auto_generate_metric_fields()
|
|
654
|
+
if not self.schema.metric_groups:
|
|
655
|
+
self.schema = dataclasses.replace(self.schema, metric_groups=self.auto_generate_metric_groups())
|
|
656
|
+
if not any([len(run_group.subgroups) == 0 for run_group in self.schema.run_groups]):
|
|
657
|
+
self.schema = dataclasses.replace(
|
|
658
|
+
self.schema, run_groups=self.schema.run_groups + self.auto_generate_scenario_run_groups()
|
|
659
|
+
)
|
|
660
|
+
if not any([len(run_group.subgroups) > 0 for run_group in self.schema.run_groups]):
|
|
661
|
+
self.schema = dataclasses.replace(
|
|
662
|
+
self.schema, run_groups=[self.auto_generate_all_scenarios_run_group()] + self.schema.run_groups
|
|
663
|
+
)
|
|
664
|
+
|
|
513
665
|
def write_schema(self) -> None:
|
|
514
666
|
"""Write the schema file to benchmark_output so the frontend knows about it."""
|
|
515
667
|
# Manually add the model metadata to the schema.json, where the frontend expects it.
|
|
@@ -839,7 +991,8 @@ class Summarizer:
|
|
|
839
991
|
}
|
|
840
992
|
|
|
841
993
|
header_name = header_field.get_short_display_name()
|
|
842
|
-
|
|
994
|
+
run_group_short_description = run_group.short_description or run_group.description or ""
|
|
995
|
+
description = (run_group_short_description + "\n\n" if run_group_short_description else "") + (
|
|
843
996
|
(header_field.display_name if header_field.display_name else header_field.name)
|
|
844
997
|
+ ": "
|
|
845
998
|
+ (header_field.description if header_field.description is not None else "")
|
|
@@ -1070,7 +1223,8 @@ class Summarizer:
|
|
|
1070
1223
|
is_scenario_table=False,
|
|
1071
1224
|
aggregation_strategies=aggregate_strategies,
|
|
1072
1225
|
)
|
|
1073
|
-
|
|
1226
|
+
if len(table.header) > 1:
|
|
1227
|
+
tables.append(table)
|
|
1074
1228
|
return tables
|
|
1075
1229
|
|
|
1076
1230
|
def create_group_tables_by_subgroup(self, group: RunGroup) -> List[Table]:
|
|
@@ -1213,14 +1367,16 @@ class Summarizer:
|
|
|
1213
1367
|
"""Run the entire summarization pipeline."""
|
|
1214
1368
|
self.read_runs()
|
|
1215
1369
|
self.group_runs()
|
|
1216
|
-
self.check_metrics_defined()
|
|
1217
1370
|
|
|
1218
|
-
self.
|
|
1371
|
+
ensure_directory_exists(self.run_release_path)
|
|
1219
1372
|
|
|
1220
1373
|
# Must happen after self.read_runs()
|
|
1221
1374
|
# because it uses self.runs
|
|
1375
|
+
self.fix_up_schema()
|
|
1376
|
+
self.check_metrics_defined()
|
|
1222
1377
|
self.write_schema()
|
|
1223
1378
|
|
|
1379
|
+
self.write_run_display_json(skip_completed)
|
|
1224
1380
|
self.write_executive_summary()
|
|
1225
1381
|
self.write_runs()
|
|
1226
1382
|
self.write_run_specs()
|
|
@@ -1254,7 +1410,15 @@ def summarize(args):
|
|
|
1254
1410
|
else:
|
|
1255
1411
|
raise ValueError("Exactly one of --release or --suite must be specified.")
|
|
1256
1412
|
|
|
1257
|
-
schema_path
|
|
1413
|
+
schema_path: Optional[str]
|
|
1414
|
+
if args.auto_generate_schema:
|
|
1415
|
+
if args.schema_path:
|
|
1416
|
+
raise ValueError("--schema-path must be unset if --auto-generate-schema is set")
|
|
1417
|
+
schema_path = None
|
|
1418
|
+
elif args.schema_path:
|
|
1419
|
+
schema_path = args.schema_path
|
|
1420
|
+
else:
|
|
1421
|
+
schema_path = get_default_schema_path()
|
|
1258
1422
|
|
|
1259
1423
|
register_builtin_configs_from_helm_package()
|
|
1260
1424
|
register_configs_from_directory(args.local_path)
|
|
@@ -1340,8 +1504,19 @@ def main():
|
|
|
1340
1504
|
default=None,
|
|
1341
1505
|
help="EXPERIMENTAL: Full class name of the Summarizer class to use. If unset, uses the default Summarizer.",
|
|
1342
1506
|
)
|
|
1507
|
+
parser.add_argument(
|
|
1508
|
+
"--log-config",
|
|
1509
|
+
type=str,
|
|
1510
|
+
default=None,
|
|
1511
|
+
help="PATH to a YAML file to customize logging",
|
|
1512
|
+
)
|
|
1513
|
+
parser.add_argument(
|
|
1514
|
+
"--auto-generate-schema",
|
|
1515
|
+
action="store_true",
|
|
1516
|
+
help="EXPERIMENTAL: Auto-generate schema",
|
|
1517
|
+
)
|
|
1343
1518
|
args = parser.parse_args()
|
|
1344
|
-
setup_default_logging()
|
|
1519
|
+
setup_default_logging(args.log_config)
|
|
1345
1520
|
summarize(args)
|
|
1346
1521
|
|
|
1347
1522
|
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass(frozen=True)
|
|
6
|
+
class TaxonomyInfo:
|
|
7
|
+
# Task (e.g., question answering)
|
|
8
|
+
task: Optional[str] = None
|
|
9
|
+
|
|
10
|
+
# Domain - genre (e.g., Wikipedia)
|
|
11
|
+
what: Optional[str] = None
|
|
12
|
+
|
|
13
|
+
# Domain - when it was written (e.g., 2010s)
|
|
14
|
+
when: Optional[str] = None
|
|
15
|
+
|
|
16
|
+
# Domain - demographics (e.g., web users)
|
|
17
|
+
who: Optional[str] = None
|
|
18
|
+
|
|
19
|
+
# Language (e.g., English)
|
|
20
|
+
language: Optional[str] = None
|
|
@@ -1,4 +1,7 @@
|
|
|
1
|
-
#
|
|
1
|
+
# type: ignore
|
|
2
|
+
# flake8: noqa
|
|
3
|
+
# fmt: off
|
|
4
|
+
|
|
2
5
|
from helm.common.general import asdict_without_nones
|
|
3
6
|
from helm.benchmark.presentation.table import Table, Cell, HeaderCell
|
|
4
7
|
from helm.benchmark.presentation.create_plots import parse_table
|
helm/benchmark/run.py
CHANGED
|
@@ -365,9 +365,15 @@ def main():
|
|
|
365
365
|
default=None,
|
|
366
366
|
help="Full class name of the Runner class to use. If unset, uses the default Runner.",
|
|
367
367
|
)
|
|
368
|
+
parser.add_argument(
|
|
369
|
+
"--log-config",
|
|
370
|
+
type=str,
|
|
371
|
+
default=None,
|
|
372
|
+
help="PATH to a YAML file to customize logging",
|
|
373
|
+
)
|
|
368
374
|
add_run_args(parser)
|
|
369
375
|
args = parser.parse_args()
|
|
370
|
-
setup_default_logging()
|
|
376
|
+
setup_default_logging(args.log_config)
|
|
371
377
|
return helm_run(args)
|
|
372
378
|
|
|
373
379
|
|
helm/benchmark/run_expander.py
CHANGED
|
@@ -1484,6 +1484,8 @@ class OutputFormatInstructions(RunExpander):
|
|
|
1484
1484
|
instructions = "Answer with only a single letter. Do not include a period in your answer."
|
|
1485
1485
|
elif self.scenario == "mcqa_only_last_question":
|
|
1486
1486
|
instructions = "Answer only the last question with only a single letter."
|
|
1487
|
+
elif self.scenario == "arabic_mcqa":
|
|
1488
|
+
instructions = "اكتب حرف الإجابة فقط، دون أي إضافات أخرى."
|
|
1487
1489
|
else:
|
|
1488
1490
|
instructions = "Answer with only a single letter."
|
|
1489
1491
|
elif run_spec.adapter_spec.method == ADAPT_GENERATION:
|
|
@@ -1525,6 +1527,8 @@ class OutputFormatInstructions(RunExpander):
|
|
|
1525
1527
|
"Answer only the last question with a short answer. "
|
|
1526
1528
|
"Avoid extra, unnecessary information in the answer."
|
|
1527
1529
|
)
|
|
1530
|
+
elif self.scenario == "arabic_mcqa":
|
|
1531
|
+
instructions = "اكتب حرف الإجابة فقط، دون أي إضافات أخرى."
|
|
1528
1532
|
else:
|
|
1529
1533
|
raise ValueError(f"Unknown scenario {self.scenario}")
|
|
1530
1534
|
elif run_spec.adapter_spec.method == ADAPT_MULTIPLE_CHOICE_JOINT_CHAIN_OF_THOUGHT:
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"""Run specs for Arabic leaderboard
|
|
2
|
+
|
|
3
|
+
EXPERIMENTAL: Run specs here may have future reverse incompatible changes."""
|
|
4
|
+
|
|
5
|
+
from helm.benchmark.adaptation.adapter_spec import ADAPT_MULTIPLE_CHOICE_JOINT
|
|
6
|
+
from helm.benchmark.adaptation.common_adapter_specs import get_multiple_choice_adapter_spec, get_generation_adapter_spec
|
|
7
|
+
from helm.benchmark.annotation.annotator import AnnotatorSpec
|
|
8
|
+
from helm.benchmark.metrics.common_metric_specs import get_basic_metric_specs, get_exact_match_metric_specs
|
|
9
|
+
from helm.benchmark.metrics.metric import MetricSpec
|
|
10
|
+
from helm.benchmark.run_spec import RunSpec, run_spec_function
|
|
11
|
+
from helm.benchmark.scenarios.scenario import ScenarioSpec
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
_ARABIC_REFERENCE_PREFIX_CHARACTERS = ["أ", "ب", "ج", "د", "هـ"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@run_spec_function("arabic_mmlu")
|
|
18
|
+
def get_arabic_mmlu_spec(subset: str) -> RunSpec:
|
|
19
|
+
"""EXPERIMENTAL: This run spec here may have future reverse incompatible changes."""
|
|
20
|
+
|
|
21
|
+
scenario_spec = ScenarioSpec(
|
|
22
|
+
class_name="helm.benchmark.scenarios.arabic_mmlu_scenario.ArabicMMLUScenario", args={"subset": subset}
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
adapter_spec = get_multiple_choice_adapter_spec(
|
|
26
|
+
method=ADAPT_MULTIPLE_CHOICE_JOINT,
|
|
27
|
+
instructions="السؤال التالي هو سؤال متعدد الإختيارات. اختر الإجابة الصحيحة", # noqa: E501
|
|
28
|
+
input_noun="السؤال",
|
|
29
|
+
output_noun="الإجابة",
|
|
30
|
+
max_tokens=100,
|
|
31
|
+
reference_prefix_characters=_ARABIC_REFERENCE_PREFIX_CHARACTERS,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
return RunSpec(
|
|
35
|
+
name=f"arabic_mmlu:subset={subset}",
|
|
36
|
+
scenario_spec=scenario_spec,
|
|
37
|
+
adapter_spec=adapter_spec,
|
|
38
|
+
metric_specs=get_exact_match_metric_specs(),
|
|
39
|
+
groups=["arabic_mmlu"],
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@run_spec_function("alghafa")
|
|
44
|
+
def get_alghafa_spec(subset: str) -> RunSpec:
|
|
45
|
+
"""EXPERIMENTAL: This run spec here may have future reverse incompatible changes."""
|
|
46
|
+
scenario_spec = ScenarioSpec(
|
|
47
|
+
class_name="helm.benchmark.scenarios.alghafa_scenario.AlGhafaScenario", args={"subset": subset}
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
adapter_spec = get_multiple_choice_adapter_spec(
|
|
51
|
+
method=ADAPT_MULTIPLE_CHOICE_JOINT,
|
|
52
|
+
instructions="الأسئلة التالية هي أسئلة متعددة الإختيارات مع الجواب الصحيح", # noqa: E501
|
|
53
|
+
input_noun="السؤال",
|
|
54
|
+
output_noun="الإجابة",
|
|
55
|
+
max_tokens=100,
|
|
56
|
+
reference_prefix_characters=_ARABIC_REFERENCE_PREFIX_CHARACTERS,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
return RunSpec(
|
|
60
|
+
name=f"alghafa:subset={subset}",
|
|
61
|
+
scenario_spec=scenario_spec,
|
|
62
|
+
adapter_spec=adapter_spec,
|
|
63
|
+
metric_specs=get_exact_match_metric_specs(),
|
|
64
|
+
groups=["alghafa", f"alghafa_{subset}"],
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@run_spec_function("aratrust")
|
|
69
|
+
def get_aratrust_spec(category: str) -> RunSpec:
|
|
70
|
+
"""EXPERIMENTAL: This run spec here may have future reverse incompatible changes."""
|
|
71
|
+
scenario_spec = ScenarioSpec(
|
|
72
|
+
class_name="helm.benchmark.scenarios.aratrust_scenario.AraTrustScenario",
|
|
73
|
+
args={"category": category},
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
adapter_spec = get_generation_adapter_spec(
|
|
77
|
+
instructions="السؤال التالي هو سؤال متعدد الإختيارات. اختر الإجابة الصحيحة: أ، ب أو ج", # noqa: E501
|
|
78
|
+
input_noun="السؤال",
|
|
79
|
+
output_noun="الإجابة",
|
|
80
|
+
max_tokens=100,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return RunSpec(
|
|
84
|
+
name=f"aratrust:category={category}",
|
|
85
|
+
scenario_spec=scenario_spec,
|
|
86
|
+
adapter_spec=adapter_spec,
|
|
87
|
+
metric_specs=get_exact_match_metric_specs(),
|
|
88
|
+
groups=["aratrust"],
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@run_spec_function("alrage")
|
|
93
|
+
def get_alrage_spec() -> RunSpec:
|
|
94
|
+
"""EXPERIMENTAL: This run spec here may have future reverse incompatible changes."""
|
|
95
|
+
scenario_spec = ScenarioSpec(class_name="helm.benchmark.scenarios.alrage_scenario.ALRAGEScenario")
|
|
96
|
+
|
|
97
|
+
adapter_spec = get_generation_adapter_spec(
|
|
98
|
+
instructions="بناءً على السياقات المقترحة التالية، اجب عن السؤال التالي", # noqa: E501
|
|
99
|
+
input_noun="السؤال",
|
|
100
|
+
output_noun="الإجابة",
|
|
101
|
+
max_tokens=100,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
annotator_specs = [AnnotatorSpec(class_name="helm.benchmark.annotation.alrage_annotator.ALRAGEAnnotator")]
|
|
105
|
+
|
|
106
|
+
metric_specs = [
|
|
107
|
+
MetricSpec(class_name="helm.benchmark.metrics.alrage_metric.ALRAGEMetric")
|
|
108
|
+
] + get_basic_metric_specs([])
|
|
109
|
+
|
|
110
|
+
return RunSpec(
|
|
111
|
+
name="alrage",
|
|
112
|
+
scenario_spec=scenario_spec,
|
|
113
|
+
adapter_spec=adapter_spec,
|
|
114
|
+
annotators=annotator_specs,
|
|
115
|
+
metric_specs=metric_specs,
|
|
116
|
+
groups=["alrage"],
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@run_spec_function("madinah_qa")
|
|
121
|
+
def get_madinah_qa_spec(subset: str) -> RunSpec:
|
|
122
|
+
scenario_spec = ScenarioSpec(
|
|
123
|
+
class_name="helm.benchmark.scenarios.madinah_qa_scenario.MadinahQAScenario", args={"subset": subset}
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
adapter_spec = get_multiple_choice_adapter_spec(
|
|
127
|
+
method=ADAPT_MULTIPLE_CHOICE_JOINT,
|
|
128
|
+
instructions="السؤال التالي هو سؤال متعدد الإختيارات. اختر الإجابة الصحيحة", # noqa: E501
|
|
129
|
+
input_noun="السؤال",
|
|
130
|
+
output_noun="الإجابة",
|
|
131
|
+
max_tokens=100,
|
|
132
|
+
reference_prefix_characters=_ARABIC_REFERENCE_PREFIX_CHARACTERS,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return RunSpec(
|
|
136
|
+
name=f"madinah_qa:subset={subset}",
|
|
137
|
+
scenario_spec=scenario_spec,
|
|
138
|
+
adapter_spec=adapter_spec,
|
|
139
|
+
metric_specs=get_exact_match_metric_specs(),
|
|
140
|
+
groups=["madinah_qa"],
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
@run_spec_function("mbzuai_human_translated_arabic_mmlu")
|
|
145
|
+
def get_arabic_mmmlu_spec(subject: str) -> RunSpec:
|
|
146
|
+
scenario_spec = ScenarioSpec(
|
|
147
|
+
class_name="helm.benchmark.scenarios.mbzuai_human_translated_arabic_mmlu.MBZUAIHumanTranslatedArabicMMLUScenario",
|
|
148
|
+
args={"subject": subject},
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
adapter_spec = get_multiple_choice_adapter_spec(
|
|
152
|
+
method=ADAPT_MULTIPLE_CHOICE_JOINT,
|
|
153
|
+
instructions="السؤال التالي هو سؤال متعدد الإختيارات. اختر الإجابة الصحيحة", # noqa: E501
|
|
154
|
+
input_noun="السؤال",
|
|
155
|
+
output_noun="الإجابة",
|
|
156
|
+
max_tokens=100,
|
|
157
|
+
reference_prefix_characters=_ARABIC_REFERENCE_PREFIX_CHARACTERS,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
return RunSpec(
|
|
161
|
+
name=f"mbzuai_human_translated_arabic_mmlu:subject={subject}",
|
|
162
|
+
scenario_spec=scenario_spec,
|
|
163
|
+
adapter_spec=adapter_spec,
|
|
164
|
+
metric_specs=get_exact_match_metric_specs(),
|
|
165
|
+
groups=["mbzuai_human_translated_arabic_mmlu"],
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
@run_spec_function("arabic_exams")
|
|
170
|
+
def get_arabic_exams_spec(subject: str) -> RunSpec:
|
|
171
|
+
scenario_spec = ScenarioSpec(
|
|
172
|
+
class_name="helm.benchmark.scenarios.arabic_exams_scenario.ArabicEXAMSScenario",
|
|
173
|
+
args={"subject": subject},
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
adapter_spec = get_multiple_choice_adapter_spec(
|
|
177
|
+
method=ADAPT_MULTIPLE_CHOICE_JOINT,
|
|
178
|
+
instructions="السؤال التالي هو سؤال متعدد الإختيارات. اختر الإجابة الصحيحة", # noqa: E501
|
|
179
|
+
input_noun="السؤال",
|
|
180
|
+
output_noun="الإجابة",
|
|
181
|
+
max_tokens=100,
|
|
182
|
+
reference_prefix_characters=_ARABIC_REFERENCE_PREFIX_CHARACTERS,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return RunSpec(
|
|
186
|
+
name=f"arabic_exams:subject={subject}",
|
|
187
|
+
scenario_spec=scenario_spec,
|
|
188
|
+
adapter_spec=adapter_spec,
|
|
189
|
+
metric_specs=get_exact_match_metric_specs(),
|
|
190
|
+
groups=["arabic_exams"],
|
|
191
|
+
)
|