crfm-helm 0.5.4__py3-none-any.whl → 0.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crfm-helm might be problematic. Click here for more details.
- crfm_helm-0.5.6.dist-info/METADATA +427 -0
- crfm_helm-0.5.6.dist-info/RECORD +941 -0
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info}/WHEEL +1 -1
- helm/benchmark/adaptation/adapter_spec.py +13 -1
- helm/benchmark/adaptation/adapters/adapter_factory.py +15 -1
- helm/benchmark/adaptation/adapters/binary_ranking_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/chat_adapter.py +49 -0
- helm/benchmark/adaptation/adapters/ehr_instruction_adapter.py +108 -0
- helm/benchmark/adaptation/adapters/generation_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +4 -4
- helm/benchmark/adaptation/adapters/language_modeling_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multimodal/generation_multimodal_adapter.py +4 -2
- helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +4 -2
- helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +1 -1
- helm/benchmark/adaptation/adapters/multiple_choice_calibrated_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +2 -2
- helm/benchmark/adaptation/adapters/multiple_choice_joint_chain_of_thought_adapter.py +87 -0
- helm/benchmark/adaptation/adapters/multiple_choice_separate_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/test_adapter.py +4 -4
- helm/benchmark/adaptation/adapters/test_generation_adapter.py +3 -3
- helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +2 -2
- helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +2 -2
- helm/benchmark/adaptation/common_adapter_specs.py +69 -4
- helm/benchmark/adaptation/prompt.py +1 -1
- helm/benchmark/annotation/aci_bench_annotator.py +95 -0
- helm/benchmark/annotation/air_bench_annotator.py +21 -6
- helm/benchmark/annotation/annotator.py +5 -0
- helm/benchmark/annotation/annotator_factory.py +3 -20
- helm/benchmark/annotation/autobencher_capabilities_annotator.py +107 -0
- helm/benchmark/annotation/autobencher_safety_annotator.py +98 -0
- helm/benchmark/annotation/bigcodebench_annotator.py +108 -0
- helm/benchmark/annotation/bird_sql_annotator.py +58 -0
- helm/benchmark/annotation/chw_care_plan_annotator.py +93 -0
- helm/benchmark/annotation/czech_bank_qa_annotator.py +78 -0
- helm/benchmark/annotation/dischargeme_annotator.py +107 -0
- helm/benchmark/annotation/ehr_sql_annotator.py +87 -0
- helm/benchmark/annotation/helpdesk_call_summarization_annotator.py +131 -0
- helm/benchmark/annotation/image2struct/image_compiler_annotator.py +6 -1
- helm/benchmark/annotation/live_qa_annotator.py +1 -1
- helm/benchmark/annotation/med_dialog_annotator.py +99 -0
- helm/benchmark/annotation/medalign_annotator.py +100 -0
- helm/benchmark/annotation/medi_qa_annotator.py +98 -0
- helm/benchmark/annotation/medication_qa_annotator.py +87 -63
- helm/benchmark/annotation/mental_health_annotator.py +98 -0
- helm/benchmark/annotation/mimic_bhc_annotator.py +100 -0
- helm/benchmark/annotation/mimic_rrs_annotator.py +100 -0
- helm/benchmark/annotation/model_as_judge.py +214 -6
- helm/benchmark/annotation/mtsamples_procedures_annotator.py +98 -0
- helm/benchmark/annotation/mtsamples_replicate_annotator.py +101 -0
- helm/benchmark/annotation/omni_math/gpt_evaluation_template.txt +152 -0
- helm/benchmark/annotation/omni_math/gpt_evaluation_zero_shot_template.txt +36 -0
- helm/benchmark/annotation/omni_math_annotator.py +131 -0
- helm/benchmark/annotation/spider_annotator.py +18 -0
- helm/benchmark/annotation/starr_patient_instructions_annotator.py +98 -0
- helm/benchmark/annotation/wildbench/eval_template.pairwise.v2.md +75 -0
- helm/benchmark/annotation/wildbench/eval_template.score.v2.md +66 -0
- helm/benchmark/annotation/wildbench_annotator.py +119 -0
- helm/benchmark/annotation_executor.py +35 -15
- helm/benchmark/augmentations/cleva_perturbation.py +9 -8
- helm/benchmark/augmentations/contraction_expansion_perturbation.py +2 -2
- helm/benchmark/augmentations/contrast_sets_perturbation.py +2 -2
- helm/benchmark/augmentations/dialect_perturbation.py +4 -5
- helm/benchmark/augmentations/extra_space_perturbation.py +2 -2
- helm/benchmark/augmentations/filler_words_perturbation.py +2 -2
- helm/benchmark/augmentations/gender_perturbation.py +2 -2
- helm/benchmark/augmentations/lowercase_perturbation.py +2 -2
- helm/benchmark/augmentations/mild_mix_perturbation.py +6 -6
- helm/benchmark/augmentations/misspelling_perturbation.py +2 -2
- helm/benchmark/augmentations/person_name_perturbation.py +4 -5
- helm/benchmark/augmentations/perturbation.py +1 -1
- helm/benchmark/augmentations/space_perturbation.py +2 -2
- helm/benchmark/augmentations/suffix_perturbation.py +2 -2
- helm/benchmark/augmentations/synonym_perturbation.py +4 -3
- helm/benchmark/augmentations/test_perturbation.py +16 -13
- helm/benchmark/augmentations/translate_perturbation.py +2 -2
- helm/benchmark/augmentations/typos_perturbation.py +2 -2
- helm/benchmark/data_preprocessor.py +2 -2
- helm/benchmark/executor.py +11 -12
- helm/benchmark/huggingface_registration.py +2 -7
- helm/benchmark/metrics/aci_bench_metrics.py +14 -0
- helm/benchmark/metrics/basic_metrics.py +6 -6
- helm/benchmark/metrics/bbq_metrics.py +2 -2
- helm/benchmark/metrics/bias_metrics.py +12 -3
- helm/benchmark/metrics/bias_word_lists.py +1 -1
- helm/benchmark/metrics/bigcodebench_metrics.py +25 -0
- helm/benchmark/metrics/bird_sql_metrics.py +28 -0
- helm/benchmark/metrics/chw_care_plan_metrics.py +14 -0
- helm/benchmark/metrics/classification_metrics.py +76 -12
- helm/benchmark/metrics/cleva_harms_metrics.py +10 -9
- helm/benchmark/metrics/code_metrics.py +5 -5
- helm/benchmark/metrics/comet_metric.py +125 -0
- helm/benchmark/metrics/common_metric_specs.py +9 -2
- helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +72 -0
- helm/benchmark/metrics/copyright_metrics.py +4 -4
- helm/benchmark/metrics/czech_bank_qa_metrics.py +29 -0
- helm/benchmark/metrics/decodingtrust_fairness_metrics.py +2 -2
- helm/benchmark/metrics/decodingtrust_privacy_metrics.py +2 -2
- helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +2 -2
- helm/benchmark/metrics/dischargeme_metrics.py +14 -0
- helm/benchmark/metrics/disinformation_metrics.py +4 -4
- helm/benchmark/metrics/dry_run_metrics.py +5 -5
- helm/benchmark/metrics/efficiency_metrics.py +6 -6
- helm/benchmark/metrics/ehr_sql_metrics.py +103 -0
- helm/benchmark/metrics/evaluate_instances_metric.py +3 -3
- helm/benchmark/metrics/evaluate_reference_metrics.py +144 -16
- helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +103 -0
- helm/benchmark/metrics/gpt4_audio_critique_metrics.py +167 -0
- helm/benchmark/metrics/gpt4_audio_refusal_metrics.py +145 -0
- helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +36 -0
- helm/benchmark/metrics/ifeval/__init__.py +0 -0
- helm/benchmark/metrics/ifeval/instructions.py +1574 -0
- helm/benchmark/metrics/ifeval/instructions_registry.py +182 -0
- helm/benchmark/metrics/ifeval/instructions_registry.pyi +3 -0
- helm/benchmark/metrics/ifeval/instructions_util.py +153 -0
- helm/benchmark/metrics/ifeval_metrics.py +55 -0
- helm/benchmark/metrics/image_generation/aesthetics_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/detection_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/detectors/vitdet.py +1 -1
- helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +1 -1
- helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +1 -1
- helm/benchmark/metrics/image_generation/nsfw_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/q16/test_q16.py +3 -1
- helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/skin_tone_metrics.py +2 -2
- helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +1 -1
- helm/benchmark/metrics/image_generation/watermark_metrics.py +1 -1
- helm/benchmark/metrics/instruction_following_critique_metrics.py +4 -4
- helm/benchmark/metrics/kpi_edgar_metrics.py +121 -0
- helm/benchmark/metrics/language_modeling_metrics.py +4 -4
- helm/benchmark/metrics/llm_jury_metrics.py +46 -0
- helm/benchmark/metrics/machine_translation_metrics.py +2 -2
- helm/benchmark/metrics/med_dialog_metrics.py +14 -0
- helm/benchmark/metrics/medalign_metrics.py +14 -0
- helm/benchmark/metrics/medcalc_bench_metrics.py +124 -0
- helm/benchmark/metrics/medec_metrics.py +101 -0
- helm/benchmark/metrics/medi_qa_metrics.py +14 -0
- helm/benchmark/metrics/medication_qa_metrics.py +10 -19
- helm/benchmark/metrics/melt_bias_metric.py +234 -0
- helm/benchmark/metrics/melt_bias_word_lists.py +1367 -0
- helm/benchmark/metrics/melt_metric_specs.py +43 -0
- helm/benchmark/metrics/melt_toxicity_metric.py +107 -0
- helm/benchmark/metrics/mental_health_metrics.py +14 -0
- helm/benchmark/metrics/metric.py +3 -3
- helm/benchmark/metrics/metric_service.py +11 -11
- helm/benchmark/metrics/mimic_bhc_metrics.py +14 -0
- helm/benchmark/metrics/mimic_rrs_metrics.py +14 -0
- helm/benchmark/metrics/mimiciv_billing_code_metrics.py +96 -0
- helm/benchmark/metrics/mtsamples_procedures_metrics.py +14 -0
- helm/benchmark/metrics/mtsamples_replicate_metrics.py +14 -0
- helm/benchmark/metrics/nltk_helper.py +32 -0
- helm/benchmark/metrics/numeracy_metrics.py +4 -4
- helm/benchmark/metrics/omni_math_metrics.py +32 -0
- helm/benchmark/metrics/openai_mrcr_metrics.py +52 -0
- helm/benchmark/metrics/output_processing_metric.py +60 -0
- helm/benchmark/metrics/output_processors.py +15 -0
- helm/benchmark/metrics/paraphrase_generation_metrics.py +2 -2
- helm/benchmark/metrics/ranking_metrics.py +3 -3
- helm/benchmark/metrics/reference_metric.py +3 -3
- helm/benchmark/metrics/ruler_qa_metrics.py +34 -0
- helm/benchmark/metrics/{bhasa_metrics.py → seahelm_metrics.py} +3 -3
- helm/benchmark/metrics/seahelm_metrics_specs.py +10 -0
- helm/benchmark/metrics/spider_metrics.py +7 -0
- helm/benchmark/metrics/starr_patient_instructions_metrics.py +14 -0
- helm/benchmark/metrics/statistic.py +1 -1
- helm/benchmark/metrics/summac/model_summac.py +2 -3
- helm/benchmark/metrics/summarization_critique_metrics.py +4 -4
- helm/benchmark/metrics/summarization_metrics.py +20 -9
- helm/benchmark/metrics/test_bias_metrics.py +5 -1
- helm/benchmark/metrics/test_classification_metrics.py +140 -68
- helm/benchmark/metrics/test_evaluate_reference_metrics.py +15 -0
- helm/benchmark/metrics/test_metric.py +1 -1
- helm/benchmark/metrics/test_statistic.py +2 -2
- helm/benchmark/metrics/tokens/ai21_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/auto_token_cost_estimator.py +6 -6
- helm/benchmark/metrics/tokens/cohere_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/free_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/openai_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +3 -3
- helm/benchmark/metrics/toxicity_metrics.py +6 -6
- helm/benchmark/metrics/unitxt_metrics.py +7 -5
- helm/benchmark/metrics/vision_language/emd_utils.py +4 -2
- helm/benchmark/metrics/vision_language/image_metrics.py +1 -1
- helm/benchmark/metrics/vision_language/image_utils.py +2 -2
- helm/benchmark/metrics/wildbench_metrics.py +34 -0
- helm/benchmark/model_deployment_registry.py +6 -8
- helm/benchmark/model_metadata_registry.py +16 -0
- helm/benchmark/presentation/contamination.py +3 -3
- helm/benchmark/presentation/create_plots.py +33 -12
- helm/benchmark/presentation/run_display.py +13 -0
- helm/benchmark/presentation/schema.py +2 -1
- helm/benchmark/presentation/summarize.py +97 -67
- helm/benchmark/presentation/torr_robustness_summarizer.py +178 -0
- helm/benchmark/reeval_run.py +202 -0
- helm/benchmark/reeval_runner.py +355 -0
- helm/benchmark/run.py +86 -90
- helm/benchmark/run_expander.py +90 -9
- helm/benchmark/run_spec_factory.py +13 -0
- helm/benchmark/run_specs/air_bench_run_specs.py +21 -3
- helm/benchmark/run_specs/audio_run_specs.py +657 -0
- helm/benchmark/run_specs/call_center_run_specs.py +49 -0
- helm/benchmark/run_specs/capabilities_run_specs.py +308 -0
- helm/benchmark/run_specs/classic_run_specs.py +1 -69
- helm/benchmark/run_specs/enem_challenge_specs.py +31 -0
- helm/benchmark/run_specs/enterprise_run_specs.py +280 -0
- helm/benchmark/run_specs/experimental_run_specs.py +142 -3
- helm/benchmark/run_specs/imdb_ptbr_run_specs.py +30 -0
- helm/benchmark/run_specs/lite_run_specs.py +2 -2
- helm/benchmark/run_specs/long_context_run_specs.py +141 -0
- helm/benchmark/run_specs/medhelm_run_specs.py +1260 -0
- helm/benchmark/run_specs/melt_run_specs.py +783 -0
- helm/benchmark/run_specs/mmlu_clinical_afr_run_specs.py +49 -0
- helm/benchmark/run_specs/oab_exams_specs.py +32 -0
- helm/benchmark/run_specs/safety_run_specs.py +37 -0
- helm/benchmark/run_specs/{bhasa_run_specs.py → seahelm_run_specs.py} +44 -44
- helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +169 -0
- helm/benchmark/run_specs/sql_run_specs.py +54 -0
- helm/benchmark/run_specs/tweetsentbr_run_specs.py +32 -0
- helm/benchmark/run_specs/unitxt_run_specs.py +14 -5
- helm/benchmark/run_specs/vlm_run_specs.py +103 -2
- helm/benchmark/run_specs/winogrande_afr_run_specs.py +47 -0
- helm/benchmark/runner.py +5 -5
- helm/benchmark/scenarios/aci_bench_scenario.py +126 -0
- helm/benchmark/scenarios/air_bench_scenario.py +6 -1
- helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +5 -3
- helm/benchmark/scenarios/anthropic_red_team_scenario.py +1 -1
- helm/benchmark/scenarios/audio_language/__init__.py +0 -0
- helm/benchmark/scenarios/audio_language/air_bench_chat_scenario.py +130 -0
- helm/benchmark/scenarios/audio_language/air_bench_foundation_scenario.py +154 -0
- helm/benchmark/scenarios/audio_language/ami_scenario.py +96 -0
- helm/benchmark/scenarios/audio_language/audio_mnist_scenario.py +62 -0
- helm/benchmark/scenarios/audio_language/audio_pairs_scenario.py +62 -0
- helm/benchmark/scenarios/audio_language/audiocaps_scenario.py +59 -0
- helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +152 -0
- helm/benchmark/scenarios/audio_language/common_voice_15_scenario.py +99 -0
- helm/benchmark/scenarios/audio_language/corebench_scenario.py +77 -0
- helm/benchmark/scenarios/audio_language/covost2_scenario.py +163 -0
- helm/benchmark/scenarios/audio_language/fleurs_fairness_scenario.py +83 -0
- helm/benchmark/scenarios/audio_language/fleurs_scenario.py +312 -0
- helm/benchmark/scenarios/audio_language/iemocap_audio_scenario.py +83 -0
- helm/benchmark/scenarios/audio_language/librispeech_fairness_scenario.py +96 -0
- helm/benchmark/scenarios/audio_language/librispeech_scenario.py +80 -0
- helm/benchmark/scenarios/audio_language/meld_audio_scenario.py +113 -0
- helm/benchmark/scenarios/audio_language/multilingual_librispeech_scenario.py +80 -0
- helm/benchmark/scenarios/audio_language/mustard_scenario.py +142 -0
- helm/benchmark/scenarios/audio_language/mutox_scenario.py +254 -0
- helm/benchmark/scenarios/audio_language/parade_scenario.py +97 -0
- helm/benchmark/scenarios/audio_language/speech_robust_bench_scenario.py +124 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification.py +103 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +110 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +78 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +109 -0
- helm/benchmark/scenarios/audio_language/vocal_sound_scenario.py +83 -0
- helm/benchmark/scenarios/audio_language/voice_jailbreak_attacks_scenario.py +87 -0
- helm/benchmark/scenarios/audio_language/voxceleb2_scenario.py +105 -0
- helm/benchmark/scenarios/autobencher_capabilities_scenario.py +68 -0
- helm/benchmark/scenarios/autobencher_safety_scenario.py +51 -0
- helm/benchmark/scenarios/babi_qa_scenario.py +1 -1
- helm/benchmark/scenarios/banking77_scenario.py +6 -1
- helm/benchmark/scenarios/bbq_scenario.py +1 -1
- helm/benchmark/scenarios/big_bench_scenario.py +11 -1
- helm/benchmark/scenarios/bigcodebench_scenario.py +58 -0
- helm/benchmark/scenarios/bird_sql_scenario.py +94 -0
- helm/benchmark/scenarios/bird_sql_scenario_helper.py +118 -0
- helm/benchmark/scenarios/blimp_scenario.py +1 -1
- helm/benchmark/scenarios/bold_scenario.py +1 -1
- helm/benchmark/scenarios/boolq_scenario.py +1 -1
- helm/benchmark/scenarios/casehold_scenario.py +79 -0
- helm/benchmark/scenarios/chw_care_plan_scenario.py +106 -0
- helm/benchmark/scenarios/civil_comments_scenario.py +1 -1
- helm/benchmark/scenarios/clear_scenario.py +157 -0
- helm/benchmark/scenarios/cleva_scenario.py +2 -2
- helm/benchmark/scenarios/code_scenario.py +17 -4
- helm/benchmark/scenarios/commonsense_scenario.py +1 -1
- helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +97 -0
- helm/benchmark/scenarios/copyright_scenario.py +1 -1
- helm/benchmark/scenarios/covid_dialog_scenario.py +10 -1
- helm/benchmark/scenarios/cti_to_mitre_scenario.py +240 -0
- helm/benchmark/scenarios/custom_mcqa_scenario.py +1 -1
- helm/benchmark/scenarios/czech_bank_qa_scenario.py +130 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +1 -1
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +1 -1
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +1 -1
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +1 -1
- helm/benchmark/scenarios/dialogue_scenarios.py +13 -2
- helm/benchmark/scenarios/dischargeme_scenario.py +172 -0
- helm/benchmark/scenarios/disinformation_scenario.py +10 -1
- helm/benchmark/scenarios/dyck_language_scenario.py +10 -1
- helm/benchmark/scenarios/echr_judgment_classification_scenario.py +113 -0
- helm/benchmark/scenarios/ehr_sql_scenario.py +137 -0
- helm/benchmark/scenarios/ehrshot_scenario.py +1519 -0
- helm/benchmark/scenarios/enem_challenge_scenario.py +58 -0
- helm/benchmark/scenarios/entity_data_imputation_scenario.py +11 -1
- helm/benchmark/scenarios/entity_matching_scenario.py +12 -2
- helm/benchmark/scenarios/financial_phrasebank_scenario.py +94 -0
- helm/benchmark/scenarios/gold_commodity_news_scenario.py +124 -0
- helm/benchmark/scenarios/gpqa_scenario.py +80 -0
- helm/benchmark/scenarios/grammar.py +2 -2
- helm/benchmark/scenarios/grammar_scenario.py +2 -2
- helm/benchmark/scenarios/gsm_scenario.py +10 -1
- helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +50 -0
- helm/benchmark/scenarios/harm_bench_scenario.py +1 -1
- helm/benchmark/scenarios/headqa_scenario.py +136 -0
- helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +37 -0
- helm/benchmark/scenarios/ice_scenario.py +8 -4
- helm/benchmark/scenarios/ifeval_scenario.py +53 -0
- helm/benchmark/scenarios/imdb_ptbr_scenario.py +60 -0
- helm/benchmark/scenarios/imdb_scenario.py +11 -2
- helm/benchmark/scenarios/infinite_bench_en_qa_scenario.py +85 -0
- helm/benchmark/scenarios/infinite_bench_en_sum_scenario.py +79 -0
- helm/benchmark/scenarios/interactive_qa_mmlu_scenario.py +2 -2
- helm/benchmark/scenarios/koala_scenario.py +1 -1
- helm/benchmark/scenarios/kpi_edgar_scenario.py +151 -0
- helm/benchmark/scenarios/legal_contract_summarization_scenario.py +129 -0
- helm/benchmark/scenarios/legal_opinion_sentiment_classification_scenario.py +77 -0
- helm/benchmark/scenarios/legal_summarization_scenario.py +11 -1
- helm/benchmark/scenarios/legal_support_scenario.py +11 -1
- helm/benchmark/scenarios/legalbench_scenario.py +22 -3
- helm/benchmark/scenarios/lex_glue_scenario.py +12 -2
- helm/benchmark/scenarios/lextreme_scenario.py +11 -1
- helm/benchmark/scenarios/live_qa_scenario.py +1 -1
- helm/benchmark/scenarios/lm_entry_scenario.py +1 -1
- helm/benchmark/scenarios/lsat_qa_scenario.py +1 -1
- helm/benchmark/scenarios/math_scenario.py +9 -1
- helm/benchmark/scenarios/me_q_sum_scenario.py +10 -1
- helm/benchmark/scenarios/med_dialog_scenario.py +25 -22
- helm/benchmark/scenarios/med_mcqa_scenario.py +10 -1
- helm/benchmark/scenarios/med_paragraph_simplification_scenario.py +10 -1
- helm/benchmark/scenarios/med_qa_scenario.py +10 -1
- helm/benchmark/scenarios/medalign_scenario.py +94 -0
- helm/benchmark/scenarios/medalign_scenario_helper.py +432 -0
- helm/benchmark/scenarios/medbullets_scenario.py +145 -0
- helm/benchmark/scenarios/medcalc_bench_scenario.py +127 -0
- helm/benchmark/scenarios/medec_scenario.py +125 -0
- helm/benchmark/scenarios/medhallu_scenario.py +72 -0
- helm/benchmark/scenarios/medi_qa_scenario.py +111 -0
- helm/benchmark/scenarios/medication_qa_scenario.py +8 -2
- helm/benchmark/scenarios/melt_ir_scenario.py +171 -0
- helm/benchmark/scenarios/melt_knowledge_scenario.py +246 -0
- helm/benchmark/scenarios/melt_lm_scenarios.py +252 -0
- helm/benchmark/scenarios/melt_scenarios.py +793 -0
- helm/benchmark/scenarios/melt_srn_scenario.py +342 -0
- helm/benchmark/scenarios/melt_synthetic_reasoning_scenario.py +222 -0
- helm/benchmark/scenarios/melt_translation_scenario.py +152 -0
- helm/benchmark/scenarios/mental_health_scenario.py +123 -0
- helm/benchmark/scenarios/mimic_bhc_scenario.py +103 -0
- helm/benchmark/scenarios/mimic_rrs_scenario.py +98 -0
- helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +77 -0
- helm/benchmark/scenarios/mmlu_clinical_afr_scenario.py +74 -0
- helm/benchmark/scenarios/mmlu_pro_scenario.py +95 -0
- helm/benchmark/scenarios/mmlu_scenario.py +11 -1
- helm/benchmark/scenarios/msmarco_scenario.py +1 -1
- helm/benchmark/scenarios/mtsamples_procedures_scenario.py +144 -0
- helm/benchmark/scenarios/mtsamples_replicate_scenario.py +142 -0
- helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +277 -0
- helm/benchmark/scenarios/narrativeqa_scenario.py +1 -1
- helm/benchmark/scenarios/natural_qa_scenario.py +1 -1
- helm/benchmark/scenarios/newsqa_scenario.py +1 -1
- helm/benchmark/scenarios/numeracy_scenario.py +12 -2
- helm/benchmark/scenarios/oab_exams_scenario.py +57 -0
- helm/benchmark/scenarios/omni_math_scenario.py +53 -0
- helm/benchmark/scenarios/open_assistant_scenario.py +11 -2
- helm/benchmark/scenarios/openai_mrcr_scenario.py +79 -0
- helm/benchmark/scenarios/opinions_qa_scenario.py +1 -1
- helm/benchmark/scenarios/pubmed_qa_scenario.py +59 -43
- helm/benchmark/scenarios/quac_scenario.py +10 -1
- helm/benchmark/scenarios/race_based_med_scenario.py +152 -0
- helm/benchmark/scenarios/raft_scenario.py +17 -2
- helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +1 -1
- helm/benchmark/scenarios/ruler_qa_scenario_helper.py +171 -0
- helm/benchmark/scenarios/ruler_qa_scenarios.py +88 -0
- helm/benchmark/scenarios/scenario.py +9 -1
- helm/benchmark/scenarios/{bhasa_scenario.py → seahelm_scenario.py} +7 -2
- helm/benchmark/scenarios/self_instruct_scenario.py +1 -1
- helm/benchmark/scenarios/shc_bmt_scenario.py +75 -0
- helm/benchmark/scenarios/shc_cdi_scenario.py +75 -0
- helm/benchmark/scenarios/shc_conf_scenario.py +76 -0
- helm/benchmark/scenarios/shc_ent_scenario.py +77 -0
- helm/benchmark/scenarios/shc_gip_scenario.py +74 -0
- helm/benchmark/scenarios/shc_privacy_scenario.py +78 -0
- helm/benchmark/scenarios/shc_proxy_scenario.py +76 -0
- helm/benchmark/scenarios/shc_ptbm_scenario.py +81 -0
- helm/benchmark/scenarios/shc_sei_scenario.py +94 -0
- helm/benchmark/scenarios/shc_sequoia_scenario.py +77 -0
- helm/benchmark/scenarios/simple_safety_tests_scenario.py +1 -1
- helm/benchmark/scenarios/spider_scenario.py +91 -0
- helm/benchmark/scenarios/starr_patient_instructions_scenario.py +97 -0
- helm/benchmark/scenarios/summarization_scenario.py +11 -1
- helm/benchmark/scenarios/sumosum_scenario.py +157 -0
- helm/benchmark/scenarios/synthetic_efficiency_scenario.py +1 -1
- helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +11 -1
- helm/benchmark/scenarios/synthetic_reasoning_scenario.py +11 -1
- helm/benchmark/scenarios/test_bigcodebench_scenario.py +26 -0
- helm/benchmark/scenarios/test_czech_bank_qa_scenario.py +18 -0
- helm/benchmark/scenarios/test_enem_challenge_scenario.py +53 -0
- helm/benchmark/scenarios/test_ewok_scenario.py +6 -2
- helm/benchmark/scenarios/test_gold_commodity_news_scenario.py +18 -0
- helm/benchmark/scenarios/test_gpqa_scenario.py +44 -0
- helm/benchmark/scenarios/test_ifeval_scenario.py +36 -0
- helm/benchmark/scenarios/test_imdb_ptbr_scenario.py +27 -0
- helm/benchmark/scenarios/test_infinite_bench_en_qa_scenario.py +18 -0
- helm/benchmark/scenarios/test_infinite_bench_en_sum_scenario.py +31 -0
- helm/benchmark/scenarios/test_math_scenario.py +1 -0
- helm/benchmark/scenarios/test_mmlu_clinical_afr_scenario.py +21 -0
- helm/benchmark/scenarios/test_mmlu_pro_scenario.py +53 -0
- helm/benchmark/scenarios/test_oab_exams_scenario.py +51 -0
- helm/benchmark/scenarios/test_omni_math_scenario.py +27 -0
- helm/benchmark/scenarios/test_tweetsentbr_scenario.py +24 -0
- helm/benchmark/scenarios/test_wildbench_scenario.py +15 -0
- helm/benchmark/scenarios/test_winogrande_afr_scenario.py +19 -0
- helm/benchmark/scenarios/thai_exam_scenario.py +10 -1
- helm/benchmark/scenarios/the_pile_scenario.py +1 -1
- helm/benchmark/scenarios/truthful_qa_scenario.py +12 -2
- helm/benchmark/scenarios/tweetsentbr_scenario.py +66 -0
- helm/benchmark/scenarios/twitter_aae_scenario.py +1 -1
- helm/benchmark/scenarios/unitxt_scenario.py +8 -2
- helm/benchmark/scenarios/verifiability_judgment_scenario.py +1 -1
- helm/benchmark/scenarios/vicuna_scenario.py +1 -1
- helm/benchmark/scenarios/vision_language/blink_scenario.py +140 -0
- helm/benchmark/scenarios/vision_language/mm_star_scenario.py +95 -0
- helm/benchmark/scenarios/vision_language/msr_vtt_scenario.py +75 -0
- helm/benchmark/scenarios/vision_language/vqa_rad_scenario.py +88 -0
- helm/benchmark/scenarios/wikifact_scenario.py +11 -1
- helm/benchmark/scenarios/wikitext_103_scenario.py +1 -1
- helm/benchmark/scenarios/wildbench_scenario.py +83 -0
- helm/benchmark/scenarios/winogrande_afr_scenario.py +78 -0
- helm/benchmark/scenarios/wmt_14_scenario.py +14 -2
- helm/benchmark/scenarios/xstest_scenario.py +1 -1
- helm/benchmark/server.py +13 -1
- helm/benchmark/slurm_runner.py +1 -1
- helm/benchmark/static/schema_audio.yaml +763 -0
- helm/benchmark/static/schema_autobencher.yaml +150 -0
- helm/benchmark/static/schema_call_center.yaml +97 -60
- helm/benchmark/static/{schema_medical.yaml → schema_capabilities.yaml} +100 -101
- helm/benchmark/static/schema_czech_bank.yaml +148 -0
- helm/benchmark/static/schema_enem_challenge.yaml +146 -0
- helm/benchmark/static/schema_enterprise.yaml +319 -0
- helm/benchmark/static/schema_finance.yaml +14 -12
- helm/benchmark/static/schema_heim.yaml +1389 -0
- helm/benchmark/static/schema_long_context.yaml +283 -0
- helm/benchmark/static/schema_medhelm.yaml +1140 -0
- helm/benchmark/static/schema_melt.yaml +1257 -0
- helm/benchmark/static/schema_mmlu_winogrande_afr.yaml +1045 -0
- helm/benchmark/static/schema_safety.yaml +18 -1
- helm/benchmark/static/{schema_bhasa.yaml → schema_seahelm.yaml} +30 -16
- helm/benchmark/static/schema_slphelm.yaml +162 -0
- helm/benchmark/static/schema_social_audio.yaml +224 -0
- helm/benchmark/static/schema_sql.yaml +171 -0
- helm/benchmark/static/{schema_tables.yaml → schema_torr.yaml} +169 -36
- helm/benchmark/static/schema_tweetsentbr.yaml +146 -0
- helm/benchmark/static/schema_vhelm.yaml +129 -56
- helm/benchmark/static/schema_video.yaml +219 -0
- helm/benchmark/static_build/assets/helm-safety-2907a7b6.png +0 -0
- helm/benchmark/static_build/assets/index-94295e78.js +10 -0
- helm/benchmark/static_build/assets/index-b9779128.css +1 -0
- helm/benchmark/static_build/assets/medhelm-overview-eac29843.png +0 -0
- helm/benchmark/static_build/assets/medhelm-v1-overview-3ddfcd65.png +0 -0
- helm/benchmark/static_build/assets/{react-d4a0b69b.js → react-f82877fd.js} +1 -1
- helm/benchmark/static_build/assets/{recharts-6d337683.js → recharts-4037aff0.js} +1 -1
- helm/benchmark/static_build/assets/{tremor-54a99cc4.js → tremor-38a10867.js} +2 -2
- helm/benchmark/static_build/config.js +1 -1
- helm/benchmark/static_build/index.html +6 -6
- helm/benchmark/window_services/default_window_service.py +1 -1
- helm/benchmark/window_services/encoder_decoder_window_service.py +4 -4
- helm/benchmark/window_services/ice_window_service.py +1 -1
- helm/benchmark/window_services/image_generation/lexica_search_window_service.py +1 -1
- helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +1 -1
- helm/benchmark/window_services/local_window_service.py +2 -2
- helm/benchmark/window_services/test_anthropic_window_service.py +3 -3
- helm/benchmark/window_services/test_bloom_window_service.py +3 -3
- helm/benchmark/window_services/test_gpt2_window_service.py +7 -2
- helm/benchmark/window_services/test_gpt4_window_service.py +8 -3
- helm/benchmark/window_services/test_gptj_window_service.py +8 -3
- helm/benchmark/window_services/test_gptneox_window_service.py +3 -3
- helm/benchmark/window_services/test_openai_window_service.py +8 -3
- helm/benchmark/window_services/test_opt_window_service.py +3 -3
- helm/benchmark/window_services/test_palmyra_window_service.py +3 -3
- helm/benchmark/window_services/test_t0pp_window_service.py +3 -3
- helm/benchmark/window_services/test_t511b_window_service.py +3 -3
- helm/benchmark/window_services/test_ul2_window_service.py +3 -3
- helm/benchmark/window_services/test_utils.py +4 -5
- helm/benchmark/window_services/test_yalm_window_service.py +3 -3
- helm/benchmark/window_services/tokenizer_service.py +7 -8
- helm/benchmark/window_services/yalm_window_service.py +1 -1
- helm/clients/ai21_client.py +3 -3
- helm/clients/aleph_alpha_client.py +1 -1
- helm/clients/anthropic_client.py +69 -29
- helm/clients/audio_language/__init__.py +0 -0
- helm/clients/audio_language/diva_llama_client.py +120 -0
- helm/clients/audio_language/llama_omni_client.py +198 -0
- helm/clients/audio_language/qwen2_5_omni_client.py +197 -0
- helm/clients/audio_language/qwen2_audiolm_client.py +190 -0
- helm/clients/audio_language/qwen_audiolm_client.py +152 -0
- helm/clients/audio_language/test.py +62 -0
- helm/clients/auto_client.py +4 -2
- helm/clients/azure_openai_client.py +55 -0
- helm/clients/bedrock_client.py +203 -7
- helm/clients/bedrock_utils.py +33 -0
- helm/clients/client.py +7 -7
- helm/clients/clip_scorers/clip_scorer.py +1 -1
- helm/clients/clip_scorers/multilingual_clip_scorer.py +1 -1
- helm/clients/cohere_client.py +3 -3
- helm/clients/google_client.py +1 -1
- helm/clients/grok_client.py +36 -0
- helm/clients/http_model_client.py +1 -1
- helm/clients/huggingface_client.py +52 -21
- helm/clients/huggingface_pipeline_client.py +138 -0
- helm/clients/ibm_client.py +267 -0
- helm/clients/image_generation/adobe_vision_client.py +1 -1
- helm/clients/image_generation/aleph_alpha_image_generation_client.py +1 -1
- helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +3 -3
- helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +5 -2
- helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +5 -2
- helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +2 -2
- helm/clients/image_generation/cogview2_client.py +1 -1
- helm/clients/image_generation/dalle2_client.py +1 -1
- helm/clients/image_generation/dalle3_client.py +2 -2
- helm/clients/image_generation/dalle_mini/__init__.py +1 -1
- helm/clients/image_generation/dalle_mini/data.py +1 -1
- helm/clients/image_generation/dalle_mini/model/__init__.py +5 -5
- helm/clients/image_generation/dalle_mini/model/configuration.py +2 -2
- helm/clients/image_generation/dalle_mini/model/modeling.py +3 -3
- helm/clients/image_generation/dalle_mini/model/processor.py +5 -5
- helm/clients/image_generation/dalle_mini/model/tokenizer.py +2 -2
- helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -1
- helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +2 -2
- helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +1 -1
- helm/clients/image_generation/dalle_mini_client.py +1 -1
- helm/clients/image_generation/deep_floyd_client.py +1 -1
- helm/clients/image_generation/huggingface_diffusers_client.py +1 -1
- helm/clients/image_generation/lexica_client.py +1 -1
- helm/clients/image_generation/mindalle/models/__init__.py +6 -6
- helm/clients/image_generation/mindalle/models/stage1/vqgan.py +1 -1
- helm/clients/image_generation/mindalle/models/stage2/transformer.py +1 -1
- helm/clients/image_generation/mindalle/utils/__init__.py +3 -3
- helm/clients/image_generation/mindalle_client.py +1 -1
- helm/clients/image_generation/together_image_generation_client.py +1 -1
- helm/clients/lit_gpt_client.py +2 -2
- helm/clients/mistral_client.py +62 -18
- helm/clients/nvidia_nim_client.py +0 -3
- helm/clients/openai_client.py +308 -43
- helm/clients/openai_responses_client.py +174 -0
- helm/clients/palmyra_client.py +3 -9
- helm/clients/reka_client.py +3 -3
- helm/clients/stanfordhealthcare_azure_openai_client.py +58 -0
- helm/clients/stanfordhealthcare_claude_client.py +31 -0
- helm/clients/stanfordhealthcare_google_client.py +43 -0
- helm/clients/stanfordhealthcare_http_model_client.py +93 -0
- helm/clients/stanfordhealthcare_openai_client.py +62 -0
- helm/clients/stanfordhealthcare_shc_openai_client.py +42 -0
- helm/clients/test_client.py +1 -1
- helm/clients/test_together_client.py +6 -1
- helm/clients/together_client.py +76 -9
- helm/clients/upstage_client.py +23 -0
- helm/clients/vertexai_client.py +45 -13
- helm/clients/vision_language/huggingface_vision2seq_client.py +6 -4
- helm/clients/vision_language/huggingface_vlm_client.py +2 -2
- helm/clients/vision_language/idefics_client.py +6 -2
- helm/clients/vision_language/open_flamingo/__init__.py +2 -2
- helm/clients/vision_language/open_flamingo/src/factory.py +3 -3
- helm/clients/vision_language/open_flamingo/src/flamingo.py +2 -2
- helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +2 -2
- helm/clients/vision_language/paligemma_client.py +2 -2
- helm/clients/vision_language/qwen2_vlm_client.py +188 -0
- helm/clients/vision_language/qwen_vlm_client.py +7 -5
- helm/clients/vllm_client.py +4 -6
- helm/clients/writer_client.py +102 -0
- helm/clients/yi_client.py +0 -3
- helm/common/audio_utils.py +111 -0
- helm/common/context.py +80 -0
- helm/common/credentials_utils.py +5 -5
- helm/common/file_caches/local_file_cache.py +1 -1
- helm/common/file_caches/test_local_file_cache.py +1 -1
- helm/common/general.py +9 -2
- helm/common/hierarchical_logger.py +46 -3
- helm/common/images_utils.py +2 -2
- helm/common/local_context.py +140 -0
- helm/common/media_object.py +2 -2
- helm/common/multimodal_request_utils.py +26 -0
- helm/common/reeval_parameters.py +12 -0
- helm/common/remote_context.py +61 -0
- helm/common/request.py +14 -2
- helm/common/response_format.py +18 -0
- helm/common/test_media_object.py +1 -1
- helm/config/model_deployments.yaml +1792 -28
- helm/config/model_metadata.yaml +1606 -51
- helm/config/tokenizer_configs.yaml +521 -4
- helm/proxy/cli.py +5 -3
- helm/proxy/critique/mechanical_turk_utils.py +1 -1
- helm/proxy/example_queries.py +1 -1
- helm/proxy/server.py +11 -4
- helm/proxy/services/remote_service.py +1 -1
- helm/proxy/services/server_service.py +22 -86
- helm/proxy/services/test_remote_service.py +2 -2
- helm/proxy/services/test_service.py +1 -1
- helm/proxy/static/general.js +122 -0
- helm/proxy/static/help.html +99 -0
- helm/proxy/static/index.css +57 -0
- helm/proxy/static/index.html +40 -0
- helm/proxy/static/index.js +456 -0
- helm/proxy/static/info-icon.png +0 -0
- helm/proxy/test_retry.py +1 -1
- helm/proxy/token_counters/auto_token_counter.py +1 -1
- helm/tokenizers/aleph_alpha_tokenizer.py +1 -1
- helm/tokenizers/caching_tokenizer.py +2 -30
- helm/tokenizers/grok_tokenizer.py +53 -0
- helm/tokenizers/http_model_tokenizer.py +1 -1
- helm/tokenizers/huggingface_tokenizer.py +3 -3
- helm/tokenizers/lit_gpt_tokenizer.py +1 -1
- helm/tokenizers/test_anthropic_tokenizer.py +6 -2
- helm/tokenizers/test_grok_tokenizer.py +33 -0
- helm/tokenizers/test_huggingface_tokenizer.py +1 -1
- helm/tokenizers/test_yalm_tokenizer.py +1 -1
- helm/tokenizers/tiktoken_tokenizer.py +1 -1
- helm/tokenizers/tokenizer.py +3 -1
- helm/tokenizers/yalm_tokenizer.py +3 -3
- helm/tokenizers/yalm_tokenizer_data/test_yalm_tokenizer.py +1 -1
- crfm_helm-0.5.4.dist-info/METADATA +0 -350
- crfm_helm-0.5.4.dist-info/RECORD +0 -697
- helm/benchmark/metrics/bhasa_metrics_specs.py +0 -10
- helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
- helm/benchmark/static_build/assets/accenture-6f97eeda.png +0 -0
- helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
- helm/benchmark/static_build/assets/aisingapore-6dfc9acf.png +0 -0
- helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
- helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
- helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
- helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
- helm/benchmark/static_build/assets/cresta-9e22b983.png +0 -0
- helm/benchmark/static_build/assets/cuhk-8c5631e9.png +0 -0
- helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
- helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
- helm/benchmark/static_build/assets/index-05c76bb1.css +0 -1
- helm/benchmark/static_build/assets/index-3ee38b3d.js +0 -10
- helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
- helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
- helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
- helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
- helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
- helm/benchmark/static_build/assets/scb10x-204bd786.png +0 -0
- helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
- helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
- helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
- helm/benchmark/static_build/assets/wellsfargo-a86a6c4a.png +0 -0
- helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
- helm/tokenizers/anthropic_tokenizer.py +0 -52
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info/licenses}/LICENSE +0 -0
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
---
|
|
2
|
+
############################################################
|
|
3
|
+
metrics:
|
|
4
|
+
# Infrastructure metrics:
|
|
5
|
+
- name: num_perplexity_tokens
|
|
6
|
+
display_name: '# tokens'
|
|
7
|
+
description: Average number of tokens in the predicted output (for language modeling, the input too).
|
|
8
|
+
- name: num_bytes
|
|
9
|
+
display_name: '# bytes'
|
|
10
|
+
description: Average number of bytes in the predicted output (for language modeling, the input too).
|
|
11
|
+
|
|
12
|
+
- name: num_references
|
|
13
|
+
display_name: '# ref'
|
|
14
|
+
description: Number of references.
|
|
15
|
+
- name: num_train_trials
|
|
16
|
+
display_name: '# trials'
|
|
17
|
+
description: Number of trials, where in each trial we choose an independent, random set of training instances.
|
|
18
|
+
- name: estimated_num_tokens_cost
|
|
19
|
+
display_name: 'cost'
|
|
20
|
+
description: An estimate of the number of tokens (including prompt and output completions) needed to perform the request.
|
|
21
|
+
- name: num_prompt_tokens
|
|
22
|
+
display_name: '# prompt tokens'
|
|
23
|
+
description: Number of tokens in the prompt.
|
|
24
|
+
- name: num_prompt_characters
|
|
25
|
+
display_name: '# prompt chars'
|
|
26
|
+
description: Number of characters in the prompt.
|
|
27
|
+
- name: num_completion_tokens
|
|
28
|
+
display_name: '# completion tokens'
|
|
29
|
+
description: Actual number of completion tokens (over all completions).
|
|
30
|
+
- name: num_output_tokens
|
|
31
|
+
display_name: '# output tokens'
|
|
32
|
+
description: Actual number of output tokens.
|
|
33
|
+
- name: max_num_output_tokens
|
|
34
|
+
display_name: 'Max output tokens'
|
|
35
|
+
description: Maximum number of output tokens (overestimate since we might stop earlier due to stop sequences).
|
|
36
|
+
- name: num_requests
|
|
37
|
+
display_name: '# requests'
|
|
38
|
+
description: Number of distinct API requests.
|
|
39
|
+
- name: num_instances
|
|
40
|
+
display_name: '# eval'
|
|
41
|
+
description: Number of evaluation instances.
|
|
42
|
+
- name: num_train_instances
|
|
43
|
+
display_name: '# train'
|
|
44
|
+
description: Number of training instances (e.g., in-context examples).
|
|
45
|
+
- name: prompt_truncated
|
|
46
|
+
display_name: truncated
|
|
47
|
+
description: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).
|
|
48
|
+
- name: finish_reason_length
|
|
49
|
+
display_name: finish b/c length
|
|
50
|
+
description: Fraction of instances where the the output was terminated because of the max tokens limit.
|
|
51
|
+
- name: finish_reason_stop
|
|
52
|
+
display_name: finish b/c stop
|
|
53
|
+
description: Fraction of instances where the the output was terminated because of the stop sequences.
|
|
54
|
+
- name: finish_reason_endoftext
|
|
55
|
+
display_name: finish b/c endoftext
|
|
56
|
+
description: Fraction of instances where the the output was terminated because the end of text token was generated.
|
|
57
|
+
- name: finish_reason_unknown
|
|
58
|
+
display_name: finish b/c unknown
|
|
59
|
+
description: Fraction of instances where the the output was terminated for unknown reasons.
|
|
60
|
+
- name: num_completions
|
|
61
|
+
display_name: '# completions'
|
|
62
|
+
description: Number of completions.
|
|
63
|
+
- name: predicted_index
|
|
64
|
+
display_name: Predicted index
|
|
65
|
+
description: Integer index of the reference (0, 1, ...) that was predicted by the model (for multiple-choice).
|
|
66
|
+
|
|
67
|
+
# Accuracy metrics:
|
|
68
|
+
- name: program_accuracy
|
|
69
|
+
display_name: Program Accuracy
|
|
70
|
+
description: Accuracy of the generated programs
|
|
71
|
+
lower_is_better: false
|
|
72
|
+
- name: execution_accuracy
|
|
73
|
+
display_name: Execution Accuracy
|
|
74
|
+
description: Accuracy of the final result of the generated program
|
|
75
|
+
lower_is_better: false
|
|
76
|
+
- name: annotation_financebench_label_correct_answer
|
|
77
|
+
display_name: Correct Answer
|
|
78
|
+
description: Whether the final result was correct, as judged by a GPT-4o
|
|
79
|
+
lower_is_better: false
|
|
80
|
+
- name: quasi_exact_match
|
|
81
|
+
display_name: Quasi-exact match
|
|
82
|
+
short_display_name: EM
|
|
83
|
+
description: Fraction of instances that the predicted output matches a correct reference up to light processing.
|
|
84
|
+
lower_is_better: false
|
|
85
|
+
- name: error_rate
|
|
86
|
+
display_name: SQL Error Rate
|
|
87
|
+
short_display_name: SQL Error Rate
|
|
88
|
+
description: Fraction of generated queries that result in a SQL execution error
|
|
89
|
+
lower_is_better: true
|
|
90
|
+
|
|
91
|
+
############################################################
|
|
92
|
+
perturbations: []
|
|
93
|
+
|
|
94
|
+
############################################################
|
|
95
|
+
metric_groups:
|
|
96
|
+
- name: accuracy
|
|
97
|
+
display_name: Accuracy
|
|
98
|
+
hide_win_rates: true
|
|
99
|
+
metrics:
|
|
100
|
+
- name: ${main_name}
|
|
101
|
+
split: ${main_split}
|
|
102
|
+
|
|
103
|
+
- name: efficiency
|
|
104
|
+
display_name: Efficiency
|
|
105
|
+
metrics:
|
|
106
|
+
- name: inference_runtime
|
|
107
|
+
split: ${main_split}
|
|
108
|
+
|
|
109
|
+
- name: general_information
|
|
110
|
+
display_name: General information
|
|
111
|
+
hide_win_rates: true
|
|
112
|
+
metrics:
|
|
113
|
+
- name: num_instances
|
|
114
|
+
split: ${main_split}
|
|
115
|
+
- name: num_train_instances
|
|
116
|
+
split: ${main_split}
|
|
117
|
+
- name: prompt_truncated
|
|
118
|
+
split: ${main_split}
|
|
119
|
+
- name: num_prompt_tokens
|
|
120
|
+
split: ${main_split}
|
|
121
|
+
- name: num_output_tokens
|
|
122
|
+
split: ${main_split}
|
|
123
|
+
|
|
124
|
+
############################################################
|
|
125
|
+
run_groups:
|
|
126
|
+
- name: financial_scenarios
|
|
127
|
+
display_name: Financial Scenarios
|
|
128
|
+
description: Scenarios for the financial domain
|
|
129
|
+
category: All scenarios
|
|
130
|
+
subgroups:
|
|
131
|
+
- czech_bank_qa
|
|
132
|
+
|
|
133
|
+
- name: czech_bank_qa
|
|
134
|
+
display_name: CzechBankQA
|
|
135
|
+
description: The CzechBankQA
|
|
136
|
+
metric_groups:
|
|
137
|
+
- accuracy
|
|
138
|
+
- efficiency
|
|
139
|
+
- general_information
|
|
140
|
+
environment:
|
|
141
|
+
main_name: error_rate
|
|
142
|
+
main_split: test
|
|
143
|
+
taxonomy:
|
|
144
|
+
task: text-to-SQL
|
|
145
|
+
what: queries from financial experts
|
|
146
|
+
who: financial experts
|
|
147
|
+
when: "1999"
|
|
148
|
+
language: English
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
############################################################
|
|
2
|
+
metrics:
|
|
3
|
+
# Infrastructure metrics:
|
|
4
|
+
- name: num_perplexity_tokens
|
|
5
|
+
display_name: '# tokens'
|
|
6
|
+
description: Average number of tokens in the predicted output (for language modeling, the input too).
|
|
7
|
+
- name: num_bytes
|
|
8
|
+
display_name: '# bytes'
|
|
9
|
+
description: Average number of bytes in the predicted output (for language modeling, the input too).
|
|
10
|
+
|
|
11
|
+
- name: num_references
|
|
12
|
+
display_name: '# ref'
|
|
13
|
+
description: Number of references.
|
|
14
|
+
- name: num_train_trials
|
|
15
|
+
display_name: '# trials'
|
|
16
|
+
description: Number of trials, where in each trial we choose an independent, random set of training instances.
|
|
17
|
+
- name: estimated_num_tokens_cost
|
|
18
|
+
display_name: 'cost'
|
|
19
|
+
description: An estimate of the number of tokens (including prompt and output completions) needed to perform the request.
|
|
20
|
+
- name: num_prompt_tokens
|
|
21
|
+
display_name: '# prompt tokens'
|
|
22
|
+
description: Number of tokens in the prompt.
|
|
23
|
+
- name: num_prompt_characters
|
|
24
|
+
display_name: '# prompt chars'
|
|
25
|
+
description: Number of characters in the prompt.
|
|
26
|
+
- name: num_completion_tokens
|
|
27
|
+
display_name: '# completion tokens'
|
|
28
|
+
description: Actual number of completion tokens (over all completions).
|
|
29
|
+
- name: num_output_tokens
|
|
30
|
+
display_name: '# output tokens'
|
|
31
|
+
description: Actual number of output tokens.
|
|
32
|
+
- name: max_num_output_tokens
|
|
33
|
+
display_name: 'Max output tokens'
|
|
34
|
+
description: Maximum number of output tokens (overestimate since we might stop earlier due to stop sequences).
|
|
35
|
+
- name: num_requests
|
|
36
|
+
display_name: '# requests'
|
|
37
|
+
description: Number of distinct API requests.
|
|
38
|
+
- name: num_instances
|
|
39
|
+
display_name: '# eval'
|
|
40
|
+
description: Number of evaluation instances.
|
|
41
|
+
- name: num_train_instances
|
|
42
|
+
display_name: '# train'
|
|
43
|
+
description: Number of training instances (e.g., in-context examples).
|
|
44
|
+
- name: prompt_truncated
|
|
45
|
+
display_name: truncated
|
|
46
|
+
description: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).
|
|
47
|
+
- name: finish_reason_length
|
|
48
|
+
display_name: finish b/c length
|
|
49
|
+
description: Fraction of instances where the the output was terminated because of the max tokens limit.
|
|
50
|
+
- name: finish_reason_stop
|
|
51
|
+
display_name: finish b/c stop
|
|
52
|
+
description: Fraction of instances where the the output was terminated because of the stop sequences.
|
|
53
|
+
- name: finish_reason_endoftext
|
|
54
|
+
display_name: finish b/c endoftext
|
|
55
|
+
description: Fraction of instances where the the output was terminated because the end of text token was generated.
|
|
56
|
+
- name: finish_reason_unknown
|
|
57
|
+
display_name: finish b/c unknown
|
|
58
|
+
description: Fraction of instances where the the output was terminated for unknown reasons.
|
|
59
|
+
- name: num_completions
|
|
60
|
+
display_name: '# completions'
|
|
61
|
+
description: Number of completions.
|
|
62
|
+
- name: predicted_index
|
|
63
|
+
display_name: Predicted index
|
|
64
|
+
description: Integer index of the reference (0, 1, ...) that was predicted by the model (for multiple-choice).
|
|
65
|
+
|
|
66
|
+
# Accuracy metrics:
|
|
67
|
+
- name: exact_match
|
|
68
|
+
display_name: Exact match
|
|
69
|
+
short_display_name: EM
|
|
70
|
+
description: Fraction of instances that the predicted output matches a correct reference exactly.
|
|
71
|
+
lower_is_better: false
|
|
72
|
+
- name: quasi_exact_match
|
|
73
|
+
display_name: Quasi-exact match
|
|
74
|
+
short_display_name: EM
|
|
75
|
+
description: Fraction of instances that the predicted output matches a correct reference up to light processing.
|
|
76
|
+
lower_is_better: false
|
|
77
|
+
- name: prefix_exact_match
|
|
78
|
+
display_name: Prefix exact match
|
|
79
|
+
short_display_name: PEM
|
|
80
|
+
description: Fraction of instances that the predicted output matches the prefix of a correct reference exactly.
|
|
81
|
+
lower_is_better: false
|
|
82
|
+
- name: quasi_prefix_exact_match
|
|
83
|
+
# TODO: should call this prefix_quasi_exact_match
|
|
84
|
+
display_name: Prefix quasi-exact match
|
|
85
|
+
short_display_name: PEM
|
|
86
|
+
description: Fraction of instances that the predicted output matches the prefix of a correct reference up to light processing.
|
|
87
|
+
lower_is_better: false
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
############################################################
|
|
91
|
+
perturbations: []
|
|
92
|
+
|
|
93
|
+
############################################################
|
|
94
|
+
metric_groups:
|
|
95
|
+
- name: accuracy
|
|
96
|
+
display_name: Accuracy
|
|
97
|
+
metrics:
|
|
98
|
+
- name: ${main_name}
|
|
99
|
+
split: ${main_split}
|
|
100
|
+
|
|
101
|
+
# - name: efficiency
|
|
102
|
+
# display_name: Efficiency
|
|
103
|
+
# metrics:
|
|
104
|
+
# - name: inference_runtime
|
|
105
|
+
# split: ${main_split}
|
|
106
|
+
|
|
107
|
+
- name: general_information
|
|
108
|
+
display_name: General information
|
|
109
|
+
hide_win_rates: true
|
|
110
|
+
metrics:
|
|
111
|
+
- name: num_instances
|
|
112
|
+
split: ${main_split}
|
|
113
|
+
- name: num_train_instances
|
|
114
|
+
split: ${main_split}
|
|
115
|
+
- name: prompt_truncated
|
|
116
|
+
split: ${main_split}
|
|
117
|
+
- name: num_prompt_tokens
|
|
118
|
+
split: ${main_split}
|
|
119
|
+
- name: num_output_tokens
|
|
120
|
+
split: ${main_split}
|
|
121
|
+
|
|
122
|
+
############################################################
|
|
123
|
+
run_groups:
|
|
124
|
+
- name: core_scenarios
|
|
125
|
+
display_name: Core Scenarios
|
|
126
|
+
description: Core Scenarios
|
|
127
|
+
category: All scenarios
|
|
128
|
+
subgroups:
|
|
129
|
+
- enem_challenge
|
|
130
|
+
|
|
131
|
+
- name: enem_challenge
|
|
132
|
+
display_name: ENEM Challenge
|
|
133
|
+
description: ENEM Challenge
|
|
134
|
+
metric_groups:
|
|
135
|
+
- accuracy
|
|
136
|
+
# - efficiency
|
|
137
|
+
- general_information
|
|
138
|
+
environment:
|
|
139
|
+
main_name: exact_match
|
|
140
|
+
main_split: test
|
|
141
|
+
taxonomy:
|
|
142
|
+
task: "multiple-choice question answering"
|
|
143
|
+
what: "general academic subjects"
|
|
144
|
+
who: "brazilian ministry of education"
|
|
145
|
+
when: "between 2009 and 2023"
|
|
146
|
+
language: Portuguese
|
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
---
|
|
2
|
+
############################################################
|
|
3
|
+
metrics:
|
|
4
|
+
# Infrastructure metrics:
|
|
5
|
+
- name: num_perplexity_tokens
|
|
6
|
+
display_name: '# tokens'
|
|
7
|
+
description: Average number of tokens in the predicted output (for language modeling, the input too).
|
|
8
|
+
- name: num_bytes
|
|
9
|
+
display_name: '# bytes'
|
|
10
|
+
description: Average number of bytes in the predicted output (for language modeling, the input too).
|
|
11
|
+
|
|
12
|
+
- name: num_references
|
|
13
|
+
display_name: '# ref'
|
|
14
|
+
description: Number of references.
|
|
15
|
+
- name: num_train_trials
|
|
16
|
+
display_name: '# trials'
|
|
17
|
+
description: Number of trials, where in each trial we choose an independent, random set of training instances.
|
|
18
|
+
- name: num_prompt_tokens
|
|
19
|
+
display_name: '# prompt tokens'
|
|
20
|
+
description: Number of tokens in the prompt.
|
|
21
|
+
- name: num_completion_tokens
|
|
22
|
+
display_name: '# completion tokens'
|
|
23
|
+
description: Actual number of completion tokens (over all completions).
|
|
24
|
+
- name: num_output_tokens
|
|
25
|
+
display_name: '# output tokens'
|
|
26
|
+
description: Actual number of output tokens.
|
|
27
|
+
- name: num_instances
|
|
28
|
+
display_name: '# eval'
|
|
29
|
+
description: Number of evaluation instances.
|
|
30
|
+
- name: num_train_instances
|
|
31
|
+
display_name: '# train'
|
|
32
|
+
description: Number of training instances (e.g., in-context examples).
|
|
33
|
+
- name: prompt_truncated
|
|
34
|
+
display_name: truncated
|
|
35
|
+
description: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).
|
|
36
|
+
- name: finish_reason_length
|
|
37
|
+
display_name: finish b/c length
|
|
38
|
+
description: Fraction of instances where the the output was terminated because of the max tokens limit.
|
|
39
|
+
- name: finish_reason_stop
|
|
40
|
+
display_name: finish b/c stop
|
|
41
|
+
description: Fraction of instances where the the output was terminated because of the stop sequences.
|
|
42
|
+
- name: finish_reason_endoftext
|
|
43
|
+
display_name: finish b/c endoftext
|
|
44
|
+
description: Fraction of instances where the the output was terminated because the end of text token was generated.
|
|
45
|
+
- name: finish_reason_unknown
|
|
46
|
+
display_name: finish b/c unknown
|
|
47
|
+
description: Fraction of instances where the the output was terminated for unknown reasons.
|
|
48
|
+
# Accuracy metrics:
|
|
49
|
+
- name: exact_match
|
|
50
|
+
display_name: Exact match
|
|
51
|
+
short_display_name: EM
|
|
52
|
+
description: Fraction of instances that the predicted output matches a correct reference exactly.
|
|
53
|
+
lower_is_better: false
|
|
54
|
+
- name: quasi_exact_match
|
|
55
|
+
display_name: Quasi-exact match
|
|
56
|
+
short_display_name: EM
|
|
57
|
+
description: Fraction of instances that the predicted output matches a correct reference up to light processing.
|
|
58
|
+
lower_is_better: false
|
|
59
|
+
- name: rouge_1
|
|
60
|
+
display_name: ROUGE-1
|
|
61
|
+
description: Average ROUGE score [(Lin, 2004)](https://aclanthology.org/W04-1013/) based on 1-gram overlap.
|
|
62
|
+
lower_is_better: false
|
|
63
|
+
- name: rouge_2
|
|
64
|
+
display_name: ROUGE-2
|
|
65
|
+
description: Average ROUGE score [(Lin, 2004)](https://aclanthology.org/W04-1013/) based on 2-gram overlap.
|
|
66
|
+
lower_is_better: false
|
|
67
|
+
- name: rouge_l
|
|
68
|
+
display_name: ROUGE-L
|
|
69
|
+
description: Average ROUGE score [(Lin, 2004)](https://aclanthology.org/W04-1013/) based on longest common subsequence overlap.
|
|
70
|
+
lower_is_better: false
|
|
71
|
+
- name: classification_weighted_f1
|
|
72
|
+
display_name: Weighted F1
|
|
73
|
+
description: Weighted F1 score
|
|
74
|
+
lower_is_better: false
|
|
75
|
+
- name: float_equiv
|
|
76
|
+
display_name: Float Equivalence
|
|
77
|
+
description: Float Equivalence
|
|
78
|
+
lower_is_better: false
|
|
79
|
+
- name: adjusted_macro_f1_score
|
|
80
|
+
display_name: Adjusted Macro F1 Score
|
|
81
|
+
short_display_name: Adjusted Macro F1 Score
|
|
82
|
+
description: Entity type classification F1 score, adjusted for partial matches following the KPI-Edgar paper, macro-averaged across entity types
|
|
83
|
+
|
|
84
|
+
############################################################
|
|
85
|
+
perturbations: []
|
|
86
|
+
|
|
87
|
+
############################################################
|
|
88
|
+
metric_groups:
|
|
89
|
+
- name: accuracy
|
|
90
|
+
display_name: Accuracy
|
|
91
|
+
metrics:
|
|
92
|
+
- name: ${main_name}
|
|
93
|
+
split: ${main_split}
|
|
94
|
+
|
|
95
|
+
- name: efficiency
|
|
96
|
+
display_name: Efficiency
|
|
97
|
+
metrics:
|
|
98
|
+
- name: inference_runtime
|
|
99
|
+
split: ${main_split}
|
|
100
|
+
|
|
101
|
+
- name: general_information
|
|
102
|
+
display_name: General information
|
|
103
|
+
hide_win_rates: true
|
|
104
|
+
metrics:
|
|
105
|
+
- name: num_instances
|
|
106
|
+
split: ${main_split}
|
|
107
|
+
- name: num_train_instances
|
|
108
|
+
split: ${main_split}
|
|
109
|
+
- name: prompt_truncated
|
|
110
|
+
split: ${main_split}
|
|
111
|
+
- name: num_prompt_tokens
|
|
112
|
+
split: ${main_split}
|
|
113
|
+
- name: num_output_tokens
|
|
114
|
+
split: ${main_split}
|
|
115
|
+
|
|
116
|
+
############################################################
|
|
117
|
+
run_groups:
|
|
118
|
+
- name: financial_scenarios
|
|
119
|
+
display_name: Financial Scenarios
|
|
120
|
+
description: Scenarios for the financial domain
|
|
121
|
+
category: All scenarios
|
|
122
|
+
subgroups:
|
|
123
|
+
- gold_commodity_news
|
|
124
|
+
- financial_phrasebank
|
|
125
|
+
- conv_fin_qa_calc
|
|
126
|
+
- kpi_edgar
|
|
127
|
+
|
|
128
|
+
- name: legal_scenarios
|
|
129
|
+
display_name: Legal Scenarios
|
|
130
|
+
description: Scenarios for the legal domain
|
|
131
|
+
category: All scenarios
|
|
132
|
+
subgroups:
|
|
133
|
+
- legal_contract_summarization
|
|
134
|
+
- casehold
|
|
135
|
+
- echr_judgment_classification
|
|
136
|
+
- legal_opinion_sentiment_classification
|
|
137
|
+
|
|
138
|
+
- name: climate_scenarios
|
|
139
|
+
display_name: Climate Scenarios
|
|
140
|
+
description: Scenarios for the climate domain
|
|
141
|
+
category: All scenarios
|
|
142
|
+
subgroups:
|
|
143
|
+
- sumosum
|
|
144
|
+
|
|
145
|
+
- name: cyber_security_scenarios
|
|
146
|
+
display_name: Cyber Security Scenarios
|
|
147
|
+
description: Scenarios for the cyber security domain
|
|
148
|
+
category: All scenarios
|
|
149
|
+
subgroups:
|
|
150
|
+
- cti_to_mitre
|
|
151
|
+
|
|
152
|
+
- name: financial_phrasebank
|
|
153
|
+
display_name: Financial Phrasebank (Sentiment Classification)
|
|
154
|
+
description: A sentiment classification benchmark based on the dataset from Good Debt or Bad Debt - Detecting Semantic Orientations in Economic Texts [(Malo et al., 2013)](https://arxiv.org/abs/1307.5336).
|
|
155
|
+
metric_groups:
|
|
156
|
+
- accuracy
|
|
157
|
+
- efficiency
|
|
158
|
+
- general_information
|
|
159
|
+
environment:
|
|
160
|
+
main_name: classification_weighted_f1
|
|
161
|
+
main_split: test
|
|
162
|
+
taxonomy:
|
|
163
|
+
task: sentiment analysis
|
|
164
|
+
what: phrases from financial news texts and company press releases
|
|
165
|
+
who: annotators with adequate business education background
|
|
166
|
+
when: before 2013
|
|
167
|
+
language: English
|
|
168
|
+
|
|
169
|
+
- name: conv_fin_qa_calc
|
|
170
|
+
display_name: ConvFinQACalc
|
|
171
|
+
description: "A mathematical calculation benchmark based on ConvFinQA: Exploring the Chain of Numerical Reasoning in Conversational Finance Question Answering [(Chen ey al., 2022)](https://arxiv.org/pdf/2210.03849.pdf)."
|
|
172
|
+
metric_groups:
|
|
173
|
+
- accuracy
|
|
174
|
+
- efficiency
|
|
175
|
+
- general_information
|
|
176
|
+
environment:
|
|
177
|
+
main_name: float_equiv
|
|
178
|
+
main_split: valid
|
|
179
|
+
taxonomy:
|
|
180
|
+
task: question answering with numeric reasoning
|
|
181
|
+
what: financial reports
|
|
182
|
+
who: financial experts
|
|
183
|
+
when: 1999 to 2019
|
|
184
|
+
language: English
|
|
185
|
+
|
|
186
|
+
- name: gold_commodity_news
|
|
187
|
+
display_name: Gold Commodity News
|
|
188
|
+
description: A classification benchmark based on a dataset of human-annotated gold commodity news headlines ([Sinha & Khandait, 2019](https://arxiv.org/abs/2009.04202)).
|
|
189
|
+
metric_groups:
|
|
190
|
+
- accuracy
|
|
191
|
+
- efficiency
|
|
192
|
+
- general_information
|
|
193
|
+
environment:
|
|
194
|
+
main_name: classification_weighted_f1
|
|
195
|
+
main_split: test
|
|
196
|
+
taxonomy:
|
|
197
|
+
task: text classification
|
|
198
|
+
what: gold commodity news headlines
|
|
199
|
+
who: financial journalists
|
|
200
|
+
when: 2000-2019
|
|
201
|
+
language: English
|
|
202
|
+
|
|
203
|
+
- name: kpi_edgar
|
|
204
|
+
display_name: KPI-EDGAR Financial Documents (Named Entity Recognition)
|
|
205
|
+
description: A named entity recognition beenchmark based on the paper KPI-EDGAR - A Novel Dataset and Accompanying Metric for Relation Extraction from Financial Documents [(Deußer et al., 2022)](https://arxiv.org/pdf/2210.09163.pdf).
|
|
206
|
+
metric_groups:
|
|
207
|
+
- accuracy
|
|
208
|
+
- general_information
|
|
209
|
+
environment:
|
|
210
|
+
main_name: adjusted_macro_f1_score
|
|
211
|
+
main_split: test
|
|
212
|
+
taxonomy:
|
|
213
|
+
task: named entity recognition
|
|
214
|
+
what: financial reports
|
|
215
|
+
who: financial experts
|
|
216
|
+
when: before 2022
|
|
217
|
+
language: English
|
|
218
|
+
|
|
219
|
+
- name: legal_contract_summarization
|
|
220
|
+
display_name: Legal Contract Summarization
|
|
221
|
+
description: Plain English Summarization of Contracts [(Manor et al., 2019)](https://aclanthology.org/W19-2201.pdf).
|
|
222
|
+
metric_groups:
|
|
223
|
+
- accuracy
|
|
224
|
+
- efficiency
|
|
225
|
+
- general_information
|
|
226
|
+
environment:
|
|
227
|
+
main_name: rouge_l
|
|
228
|
+
main_split: test
|
|
229
|
+
taxonomy:
|
|
230
|
+
task: summarization
|
|
231
|
+
what: legal contracts (e.g. terms of service, license agreements)
|
|
232
|
+
who: lawyers
|
|
233
|
+
when: before 2019
|
|
234
|
+
language: English
|
|
235
|
+
|
|
236
|
+
- name: casehold
|
|
237
|
+
display_name: CaseHOLD
|
|
238
|
+
description: CaseHOLD (Case Holdings On Legal Decisions) is a multiple choice question answering scenario where the task is to identify the relevant holding of a cited case [(Zheng et al, 2021)](https://arxiv.org/pdf/2104.08671.pdf).
|
|
239
|
+
metric_groups:
|
|
240
|
+
- accuracy
|
|
241
|
+
- efficiency
|
|
242
|
+
- general_information
|
|
243
|
+
environment:
|
|
244
|
+
main_name: quasi_exact_match
|
|
245
|
+
main_split: test
|
|
246
|
+
taxonomy:
|
|
247
|
+
task: question answering
|
|
248
|
+
what: Harvard Law Library case law corpus
|
|
249
|
+
who: legal professionals
|
|
250
|
+
when: before 2021
|
|
251
|
+
language: English
|
|
252
|
+
|
|
253
|
+
- name: echr_judgment_classification
|
|
254
|
+
display_name: ECHR Judgment Classification
|
|
255
|
+
description: The "Binary Violation" Classification task from the paper Neural Legal Judgment Prediction in English [(Chalkidis et al., 2019)](https://arxiv.org/pdf/1906.02059.pdf). The task is to analyze the description of a legal case from the European Court of Human Rights (ECHR), and classify it as positive if any human rights article or protocol has been violated and negative otherwise.
|
|
256
|
+
metric_groups:
|
|
257
|
+
- accuracy
|
|
258
|
+
- general_information
|
|
259
|
+
environment:
|
|
260
|
+
main_name: classification_weighted_f1
|
|
261
|
+
main_split: test
|
|
262
|
+
taxonomy:
|
|
263
|
+
task: text classification
|
|
264
|
+
what: casees from the European Court of Human Rights
|
|
265
|
+
who: judiciary of the European Court of Human Rights
|
|
266
|
+
when: 2014-2018 (train) and 2014-2018 (test)
|
|
267
|
+
language: English
|
|
268
|
+
|
|
269
|
+
- name: legal_opinion_sentiment_classification
|
|
270
|
+
display_name: Legal Opinion Sentiment Classification
|
|
271
|
+
description: A legal opinion sentiment classification task based on the paper Effective Approach to Develop a Sentiment Annotator For Legal Domain in a Low Resource Setting [(Ratnayaka et al., 2020)](https://arxiv.org/pdf/2011.00318.pdf).
|
|
272
|
+
metric_groups:
|
|
273
|
+
- accuracy
|
|
274
|
+
- general_information
|
|
275
|
+
environment:
|
|
276
|
+
main_name: quasi_exact_match
|
|
277
|
+
main_split: test
|
|
278
|
+
taxonomy:
|
|
279
|
+
task: sentiment analysis
|
|
280
|
+
what: United States legal opinion texts
|
|
281
|
+
who: United States courts
|
|
282
|
+
when: Before 2020
|
|
283
|
+
language: English
|
|
284
|
+
|
|
285
|
+
- name: sumosum
|
|
286
|
+
display_name: SUMO Web Claims Summarization
|
|
287
|
+
description: A summarization benchmark based on the climate subset of the SUMO dataset ([Mishra et al., 2020](https://aclanthology.org/2020.wnut-1.12/)).
|
|
288
|
+
metric_groups:
|
|
289
|
+
- accuracy
|
|
290
|
+
- efficiency
|
|
291
|
+
- general_information
|
|
292
|
+
environment:
|
|
293
|
+
main_name: rouge_l
|
|
294
|
+
main_split: test
|
|
295
|
+
taxonomy:
|
|
296
|
+
task: summarization
|
|
297
|
+
what: Articles from climatefeedback.org
|
|
298
|
+
who: Writers of news articles and web documents
|
|
299
|
+
when: Before 2020
|
|
300
|
+
language: English
|
|
301
|
+
main_name: quasi_exact_match
|
|
302
|
+
main_split: test
|
|
303
|
+
|
|
304
|
+
- name: cti_to_mitre
|
|
305
|
+
display_name: CTI-to-MITRE Cyber Threat Intelligence
|
|
306
|
+
description: A classification benchmark based on Automatic Mapping of Unstructured Cyber Threat Intelligence - An Experimental Study [(Orbinato et al., 2022)](https://arxiv.org/pdf/2208.12144.pdf).
|
|
307
|
+
metric_groups:
|
|
308
|
+
- accuracy
|
|
309
|
+
- efficiency
|
|
310
|
+
- general_information
|
|
311
|
+
environment:
|
|
312
|
+
main_name: quasi_exact_match
|
|
313
|
+
main_split: test
|
|
314
|
+
taxonomy:
|
|
315
|
+
task: text classification
|
|
316
|
+
what: Descriptions of malicious techniques
|
|
317
|
+
who: Security professionals
|
|
318
|
+
when: Before 2022
|
|
319
|
+
language: English
|
|
@@ -83,6 +83,14 @@ metrics:
|
|
|
83
83
|
description: Fraction of instances that the predicted output matches a correct reference up to light processing.
|
|
84
84
|
lower_is_better: false
|
|
85
85
|
|
|
86
|
+
# Efficiency metrics:
|
|
87
|
+
- name: inference_runtime
|
|
88
|
+
display_name: Observed inference runtime (s)
|
|
89
|
+
short_display_name: Observed inference time (s)
|
|
90
|
+
lower_is_better: true
|
|
91
|
+
description: Average observed time to process a request to the model (via an API, and thus depends on particular deployment).
|
|
92
|
+
|
|
93
|
+
|
|
86
94
|
############################################################
|
|
87
95
|
perturbations: []
|
|
88
96
|
|
|
@@ -90,12 +98,16 @@ perturbations: []
|
|
|
90
98
|
metric_groups:
|
|
91
99
|
- name: accuracy
|
|
92
100
|
display_name: Accuracy
|
|
101
|
+
aggregation_strategies:
|
|
102
|
+
- mean
|
|
93
103
|
metrics:
|
|
94
104
|
- name: ${main_name}
|
|
95
105
|
split: ${main_split}
|
|
96
106
|
|
|
97
107
|
- name: efficiency
|
|
98
108
|
display_name: Efficiency
|
|
109
|
+
aggregation_strategies:
|
|
110
|
+
- mean
|
|
99
111
|
metrics:
|
|
100
112
|
- name: inference_runtime
|
|
101
113
|
split: ${main_split}
|
|
@@ -145,7 +157,7 @@ run_groups:
|
|
|
145
157
|
|
|
146
158
|
- name: financebench
|
|
147
159
|
display_name: FinanceBench
|
|
148
|
-
description: FinanceBench is a benchmark for open book financial question answering. It comprises 10,231 questions about publicly traded companies, with corresponding answers and evidence strings
|
|
160
|
+
description: FinanceBench is a benchmark for open book financial question answering. It comprises 10,231 questions about publicly traded companies, with corresponding answers and evidence strings [(Islam et al., 2023)](https://arxiv.org/abs/2311.11944/).
|
|
149
161
|
metric_groups:
|
|
150
162
|
- accuracy
|
|
151
163
|
- efficiency
|
|
@@ -163,7 +175,7 @@ run_groups:
|
|
|
163
175
|
- name: banking77
|
|
164
176
|
display_name: BANKING77
|
|
165
177
|
short_display_name: BANKING77
|
|
166
|
-
description: BANKING77 is a benchmark for intent classification of customer service queries in the banking domain [(Casanueva et al., 2020)](https://aclanthology.org/2020.nlp4convai-1.5/)
|
|
178
|
+
description: BANKING77 is a benchmark for intent classification of customer service queries in the banking domain [(Casanueva et al., 2020)](https://aclanthology.org/2020.nlp4convai-1.5/).
|
|
167
179
|
metric_groups:
|
|
168
180
|
- accuracy
|
|
169
181
|
- efficiency
|
|
@@ -177,13 +189,3 @@ run_groups:
|
|
|
177
189
|
who: banking customers
|
|
178
190
|
when: During or before 2020
|
|
179
191
|
language: English
|
|
180
|
-
|
|
181
|
-
# - name: financial_scenarios_ablations
|
|
182
|
-
# display_name: Financial Scenarios Ablations
|
|
183
|
-
# description: Scenarios for the financial domain with ablations
|
|
184
|
-
# category: All scenarios
|
|
185
|
-
# subgroups:
|
|
186
|
-
# - fin_qa
|
|
187
|
-
# adapter_keys_shown:
|
|
188
|
-
# - model
|
|
189
|
-
# - max_train_instances
|