crfm-helm 0.5.4__py3-none-any.whl → 0.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crfm-helm might be problematic. Click here for more details.
- crfm_helm-0.5.6.dist-info/METADATA +427 -0
- crfm_helm-0.5.6.dist-info/RECORD +941 -0
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info}/WHEEL +1 -1
- helm/benchmark/adaptation/adapter_spec.py +13 -1
- helm/benchmark/adaptation/adapters/adapter_factory.py +15 -1
- helm/benchmark/adaptation/adapters/binary_ranking_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/chat_adapter.py +49 -0
- helm/benchmark/adaptation/adapters/ehr_instruction_adapter.py +108 -0
- helm/benchmark/adaptation/adapters/generation_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +4 -4
- helm/benchmark/adaptation/adapters/language_modeling_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multimodal/generation_multimodal_adapter.py +4 -2
- helm/benchmark/adaptation/adapters/multimodal/in_context_learning_multimodal_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +4 -2
- helm/benchmark/adaptation/adapters/multimodal/test_multimodal_prompt.py +1 -1
- helm/benchmark/adaptation/adapters/multiple_choice_calibrated_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +2 -2
- helm/benchmark/adaptation/adapters/multiple_choice_joint_chain_of_thought_adapter.py +87 -0
- helm/benchmark/adaptation/adapters/multiple_choice_separate_adapter.py +1 -1
- helm/benchmark/adaptation/adapters/test_adapter.py +4 -4
- helm/benchmark/adaptation/adapters/test_generation_adapter.py +3 -3
- helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +2 -2
- helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +2 -2
- helm/benchmark/adaptation/common_adapter_specs.py +69 -4
- helm/benchmark/adaptation/prompt.py +1 -1
- helm/benchmark/annotation/aci_bench_annotator.py +95 -0
- helm/benchmark/annotation/air_bench_annotator.py +21 -6
- helm/benchmark/annotation/annotator.py +5 -0
- helm/benchmark/annotation/annotator_factory.py +3 -20
- helm/benchmark/annotation/autobencher_capabilities_annotator.py +107 -0
- helm/benchmark/annotation/autobencher_safety_annotator.py +98 -0
- helm/benchmark/annotation/bigcodebench_annotator.py +108 -0
- helm/benchmark/annotation/bird_sql_annotator.py +58 -0
- helm/benchmark/annotation/chw_care_plan_annotator.py +93 -0
- helm/benchmark/annotation/czech_bank_qa_annotator.py +78 -0
- helm/benchmark/annotation/dischargeme_annotator.py +107 -0
- helm/benchmark/annotation/ehr_sql_annotator.py +87 -0
- helm/benchmark/annotation/helpdesk_call_summarization_annotator.py +131 -0
- helm/benchmark/annotation/image2struct/image_compiler_annotator.py +6 -1
- helm/benchmark/annotation/live_qa_annotator.py +1 -1
- helm/benchmark/annotation/med_dialog_annotator.py +99 -0
- helm/benchmark/annotation/medalign_annotator.py +100 -0
- helm/benchmark/annotation/medi_qa_annotator.py +98 -0
- helm/benchmark/annotation/medication_qa_annotator.py +87 -63
- helm/benchmark/annotation/mental_health_annotator.py +98 -0
- helm/benchmark/annotation/mimic_bhc_annotator.py +100 -0
- helm/benchmark/annotation/mimic_rrs_annotator.py +100 -0
- helm/benchmark/annotation/model_as_judge.py +214 -6
- helm/benchmark/annotation/mtsamples_procedures_annotator.py +98 -0
- helm/benchmark/annotation/mtsamples_replicate_annotator.py +101 -0
- helm/benchmark/annotation/omni_math/gpt_evaluation_template.txt +152 -0
- helm/benchmark/annotation/omni_math/gpt_evaluation_zero_shot_template.txt +36 -0
- helm/benchmark/annotation/omni_math_annotator.py +131 -0
- helm/benchmark/annotation/spider_annotator.py +18 -0
- helm/benchmark/annotation/starr_patient_instructions_annotator.py +98 -0
- helm/benchmark/annotation/wildbench/eval_template.pairwise.v2.md +75 -0
- helm/benchmark/annotation/wildbench/eval_template.score.v2.md +66 -0
- helm/benchmark/annotation/wildbench_annotator.py +119 -0
- helm/benchmark/annotation_executor.py +35 -15
- helm/benchmark/augmentations/cleva_perturbation.py +9 -8
- helm/benchmark/augmentations/contraction_expansion_perturbation.py +2 -2
- helm/benchmark/augmentations/contrast_sets_perturbation.py +2 -2
- helm/benchmark/augmentations/dialect_perturbation.py +4 -5
- helm/benchmark/augmentations/extra_space_perturbation.py +2 -2
- helm/benchmark/augmentations/filler_words_perturbation.py +2 -2
- helm/benchmark/augmentations/gender_perturbation.py +2 -2
- helm/benchmark/augmentations/lowercase_perturbation.py +2 -2
- helm/benchmark/augmentations/mild_mix_perturbation.py +6 -6
- helm/benchmark/augmentations/misspelling_perturbation.py +2 -2
- helm/benchmark/augmentations/person_name_perturbation.py +4 -5
- helm/benchmark/augmentations/perturbation.py +1 -1
- helm/benchmark/augmentations/space_perturbation.py +2 -2
- helm/benchmark/augmentations/suffix_perturbation.py +2 -2
- helm/benchmark/augmentations/synonym_perturbation.py +4 -3
- helm/benchmark/augmentations/test_perturbation.py +16 -13
- helm/benchmark/augmentations/translate_perturbation.py +2 -2
- helm/benchmark/augmentations/typos_perturbation.py +2 -2
- helm/benchmark/data_preprocessor.py +2 -2
- helm/benchmark/executor.py +11 -12
- helm/benchmark/huggingface_registration.py +2 -7
- helm/benchmark/metrics/aci_bench_metrics.py +14 -0
- helm/benchmark/metrics/basic_metrics.py +6 -6
- helm/benchmark/metrics/bbq_metrics.py +2 -2
- helm/benchmark/metrics/bias_metrics.py +12 -3
- helm/benchmark/metrics/bias_word_lists.py +1 -1
- helm/benchmark/metrics/bigcodebench_metrics.py +25 -0
- helm/benchmark/metrics/bird_sql_metrics.py +28 -0
- helm/benchmark/metrics/chw_care_plan_metrics.py +14 -0
- helm/benchmark/metrics/classification_metrics.py +76 -12
- helm/benchmark/metrics/cleva_harms_metrics.py +10 -9
- helm/benchmark/metrics/code_metrics.py +5 -5
- helm/benchmark/metrics/comet_metric.py +125 -0
- helm/benchmark/metrics/common_metric_specs.py +9 -2
- helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +72 -0
- helm/benchmark/metrics/copyright_metrics.py +4 -4
- helm/benchmark/metrics/czech_bank_qa_metrics.py +29 -0
- helm/benchmark/metrics/decodingtrust_fairness_metrics.py +2 -2
- helm/benchmark/metrics/decodingtrust_privacy_metrics.py +2 -2
- helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +2 -2
- helm/benchmark/metrics/dischargeme_metrics.py +14 -0
- helm/benchmark/metrics/disinformation_metrics.py +4 -4
- helm/benchmark/metrics/dry_run_metrics.py +5 -5
- helm/benchmark/metrics/efficiency_metrics.py +6 -6
- helm/benchmark/metrics/ehr_sql_metrics.py +103 -0
- helm/benchmark/metrics/evaluate_instances_metric.py +3 -3
- helm/benchmark/metrics/evaluate_reference_metrics.py +144 -16
- helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +103 -0
- helm/benchmark/metrics/gpt4_audio_critique_metrics.py +167 -0
- helm/benchmark/metrics/gpt4_audio_refusal_metrics.py +145 -0
- helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +36 -0
- helm/benchmark/metrics/ifeval/__init__.py +0 -0
- helm/benchmark/metrics/ifeval/instructions.py +1574 -0
- helm/benchmark/metrics/ifeval/instructions_registry.py +182 -0
- helm/benchmark/metrics/ifeval/instructions_registry.pyi +3 -0
- helm/benchmark/metrics/ifeval/instructions_util.py +153 -0
- helm/benchmark/metrics/ifeval_metrics.py +55 -0
- helm/benchmark/metrics/image_generation/aesthetics_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/detection_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/detectors/vitdet.py +1 -1
- helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +1 -1
- helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +1 -1
- helm/benchmark/metrics/image_generation/nsfw_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/q16/test_q16.py +3 -1
- helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +1 -1
- helm/benchmark/metrics/image_generation/skin_tone_metrics.py +2 -2
- helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +1 -1
- helm/benchmark/metrics/image_generation/watermark_metrics.py +1 -1
- helm/benchmark/metrics/instruction_following_critique_metrics.py +4 -4
- helm/benchmark/metrics/kpi_edgar_metrics.py +121 -0
- helm/benchmark/metrics/language_modeling_metrics.py +4 -4
- helm/benchmark/metrics/llm_jury_metrics.py +46 -0
- helm/benchmark/metrics/machine_translation_metrics.py +2 -2
- helm/benchmark/metrics/med_dialog_metrics.py +14 -0
- helm/benchmark/metrics/medalign_metrics.py +14 -0
- helm/benchmark/metrics/medcalc_bench_metrics.py +124 -0
- helm/benchmark/metrics/medec_metrics.py +101 -0
- helm/benchmark/metrics/medi_qa_metrics.py +14 -0
- helm/benchmark/metrics/medication_qa_metrics.py +10 -19
- helm/benchmark/metrics/melt_bias_metric.py +234 -0
- helm/benchmark/metrics/melt_bias_word_lists.py +1367 -0
- helm/benchmark/metrics/melt_metric_specs.py +43 -0
- helm/benchmark/metrics/melt_toxicity_metric.py +107 -0
- helm/benchmark/metrics/mental_health_metrics.py +14 -0
- helm/benchmark/metrics/metric.py +3 -3
- helm/benchmark/metrics/metric_service.py +11 -11
- helm/benchmark/metrics/mimic_bhc_metrics.py +14 -0
- helm/benchmark/metrics/mimic_rrs_metrics.py +14 -0
- helm/benchmark/metrics/mimiciv_billing_code_metrics.py +96 -0
- helm/benchmark/metrics/mtsamples_procedures_metrics.py +14 -0
- helm/benchmark/metrics/mtsamples_replicate_metrics.py +14 -0
- helm/benchmark/metrics/nltk_helper.py +32 -0
- helm/benchmark/metrics/numeracy_metrics.py +4 -4
- helm/benchmark/metrics/omni_math_metrics.py +32 -0
- helm/benchmark/metrics/openai_mrcr_metrics.py +52 -0
- helm/benchmark/metrics/output_processing_metric.py +60 -0
- helm/benchmark/metrics/output_processors.py +15 -0
- helm/benchmark/metrics/paraphrase_generation_metrics.py +2 -2
- helm/benchmark/metrics/ranking_metrics.py +3 -3
- helm/benchmark/metrics/reference_metric.py +3 -3
- helm/benchmark/metrics/ruler_qa_metrics.py +34 -0
- helm/benchmark/metrics/{bhasa_metrics.py → seahelm_metrics.py} +3 -3
- helm/benchmark/metrics/seahelm_metrics_specs.py +10 -0
- helm/benchmark/metrics/spider_metrics.py +7 -0
- helm/benchmark/metrics/starr_patient_instructions_metrics.py +14 -0
- helm/benchmark/metrics/statistic.py +1 -1
- helm/benchmark/metrics/summac/model_summac.py +2 -3
- helm/benchmark/metrics/summarization_critique_metrics.py +4 -4
- helm/benchmark/metrics/summarization_metrics.py +20 -9
- helm/benchmark/metrics/test_bias_metrics.py +5 -1
- helm/benchmark/metrics/test_classification_metrics.py +140 -68
- helm/benchmark/metrics/test_evaluate_reference_metrics.py +15 -0
- helm/benchmark/metrics/test_metric.py +1 -1
- helm/benchmark/metrics/test_statistic.py +2 -2
- helm/benchmark/metrics/tokens/ai21_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/auto_token_cost_estimator.py +6 -6
- helm/benchmark/metrics/tokens/cohere_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/free_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/openai_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/test_ai21_token_cost_estimator.py +1 -1
- helm/benchmark/metrics/tokens/test_openai_token_cost_estimator.py +3 -3
- helm/benchmark/metrics/toxicity_metrics.py +6 -6
- helm/benchmark/metrics/unitxt_metrics.py +7 -5
- helm/benchmark/metrics/vision_language/emd_utils.py +4 -2
- helm/benchmark/metrics/vision_language/image_metrics.py +1 -1
- helm/benchmark/metrics/vision_language/image_utils.py +2 -2
- helm/benchmark/metrics/wildbench_metrics.py +34 -0
- helm/benchmark/model_deployment_registry.py +6 -8
- helm/benchmark/model_metadata_registry.py +16 -0
- helm/benchmark/presentation/contamination.py +3 -3
- helm/benchmark/presentation/create_plots.py +33 -12
- helm/benchmark/presentation/run_display.py +13 -0
- helm/benchmark/presentation/schema.py +2 -1
- helm/benchmark/presentation/summarize.py +97 -67
- helm/benchmark/presentation/torr_robustness_summarizer.py +178 -0
- helm/benchmark/reeval_run.py +202 -0
- helm/benchmark/reeval_runner.py +355 -0
- helm/benchmark/run.py +86 -90
- helm/benchmark/run_expander.py +90 -9
- helm/benchmark/run_spec_factory.py +13 -0
- helm/benchmark/run_specs/air_bench_run_specs.py +21 -3
- helm/benchmark/run_specs/audio_run_specs.py +657 -0
- helm/benchmark/run_specs/call_center_run_specs.py +49 -0
- helm/benchmark/run_specs/capabilities_run_specs.py +308 -0
- helm/benchmark/run_specs/classic_run_specs.py +1 -69
- helm/benchmark/run_specs/enem_challenge_specs.py +31 -0
- helm/benchmark/run_specs/enterprise_run_specs.py +280 -0
- helm/benchmark/run_specs/experimental_run_specs.py +142 -3
- helm/benchmark/run_specs/imdb_ptbr_run_specs.py +30 -0
- helm/benchmark/run_specs/lite_run_specs.py +2 -2
- helm/benchmark/run_specs/long_context_run_specs.py +141 -0
- helm/benchmark/run_specs/medhelm_run_specs.py +1260 -0
- helm/benchmark/run_specs/melt_run_specs.py +783 -0
- helm/benchmark/run_specs/mmlu_clinical_afr_run_specs.py +49 -0
- helm/benchmark/run_specs/oab_exams_specs.py +32 -0
- helm/benchmark/run_specs/safety_run_specs.py +37 -0
- helm/benchmark/run_specs/{bhasa_run_specs.py → seahelm_run_specs.py} +44 -44
- helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +169 -0
- helm/benchmark/run_specs/sql_run_specs.py +54 -0
- helm/benchmark/run_specs/tweetsentbr_run_specs.py +32 -0
- helm/benchmark/run_specs/unitxt_run_specs.py +14 -5
- helm/benchmark/run_specs/vlm_run_specs.py +103 -2
- helm/benchmark/run_specs/winogrande_afr_run_specs.py +47 -0
- helm/benchmark/runner.py +5 -5
- helm/benchmark/scenarios/aci_bench_scenario.py +126 -0
- helm/benchmark/scenarios/air_bench_scenario.py +6 -1
- helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +5 -3
- helm/benchmark/scenarios/anthropic_red_team_scenario.py +1 -1
- helm/benchmark/scenarios/audio_language/__init__.py +0 -0
- helm/benchmark/scenarios/audio_language/air_bench_chat_scenario.py +130 -0
- helm/benchmark/scenarios/audio_language/air_bench_foundation_scenario.py +154 -0
- helm/benchmark/scenarios/audio_language/ami_scenario.py +96 -0
- helm/benchmark/scenarios/audio_language/audio_mnist_scenario.py +62 -0
- helm/benchmark/scenarios/audio_language/audio_pairs_scenario.py +62 -0
- helm/benchmark/scenarios/audio_language/audiocaps_scenario.py +59 -0
- helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +152 -0
- helm/benchmark/scenarios/audio_language/common_voice_15_scenario.py +99 -0
- helm/benchmark/scenarios/audio_language/corebench_scenario.py +77 -0
- helm/benchmark/scenarios/audio_language/covost2_scenario.py +163 -0
- helm/benchmark/scenarios/audio_language/fleurs_fairness_scenario.py +83 -0
- helm/benchmark/scenarios/audio_language/fleurs_scenario.py +312 -0
- helm/benchmark/scenarios/audio_language/iemocap_audio_scenario.py +83 -0
- helm/benchmark/scenarios/audio_language/librispeech_fairness_scenario.py +96 -0
- helm/benchmark/scenarios/audio_language/librispeech_scenario.py +80 -0
- helm/benchmark/scenarios/audio_language/meld_audio_scenario.py +113 -0
- helm/benchmark/scenarios/audio_language/multilingual_librispeech_scenario.py +80 -0
- helm/benchmark/scenarios/audio_language/mustard_scenario.py +142 -0
- helm/benchmark/scenarios/audio_language/mutox_scenario.py +254 -0
- helm/benchmark/scenarios/audio_language/parade_scenario.py +97 -0
- helm/benchmark/scenarios/audio_language/speech_robust_bench_scenario.py +124 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification.py +103 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +110 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +78 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +109 -0
- helm/benchmark/scenarios/audio_language/vocal_sound_scenario.py +83 -0
- helm/benchmark/scenarios/audio_language/voice_jailbreak_attacks_scenario.py +87 -0
- helm/benchmark/scenarios/audio_language/voxceleb2_scenario.py +105 -0
- helm/benchmark/scenarios/autobencher_capabilities_scenario.py +68 -0
- helm/benchmark/scenarios/autobencher_safety_scenario.py +51 -0
- helm/benchmark/scenarios/babi_qa_scenario.py +1 -1
- helm/benchmark/scenarios/banking77_scenario.py +6 -1
- helm/benchmark/scenarios/bbq_scenario.py +1 -1
- helm/benchmark/scenarios/big_bench_scenario.py +11 -1
- helm/benchmark/scenarios/bigcodebench_scenario.py +58 -0
- helm/benchmark/scenarios/bird_sql_scenario.py +94 -0
- helm/benchmark/scenarios/bird_sql_scenario_helper.py +118 -0
- helm/benchmark/scenarios/blimp_scenario.py +1 -1
- helm/benchmark/scenarios/bold_scenario.py +1 -1
- helm/benchmark/scenarios/boolq_scenario.py +1 -1
- helm/benchmark/scenarios/casehold_scenario.py +79 -0
- helm/benchmark/scenarios/chw_care_plan_scenario.py +106 -0
- helm/benchmark/scenarios/civil_comments_scenario.py +1 -1
- helm/benchmark/scenarios/clear_scenario.py +157 -0
- helm/benchmark/scenarios/cleva_scenario.py +2 -2
- helm/benchmark/scenarios/code_scenario.py +17 -4
- helm/benchmark/scenarios/commonsense_scenario.py +1 -1
- helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +97 -0
- helm/benchmark/scenarios/copyright_scenario.py +1 -1
- helm/benchmark/scenarios/covid_dialog_scenario.py +10 -1
- helm/benchmark/scenarios/cti_to_mitre_scenario.py +240 -0
- helm/benchmark/scenarios/custom_mcqa_scenario.py +1 -1
- helm/benchmark/scenarios/czech_bank_qa_scenario.py +130 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +1 -1
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +1 -1
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +1 -1
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +1 -1
- helm/benchmark/scenarios/dialogue_scenarios.py +13 -2
- helm/benchmark/scenarios/dischargeme_scenario.py +172 -0
- helm/benchmark/scenarios/disinformation_scenario.py +10 -1
- helm/benchmark/scenarios/dyck_language_scenario.py +10 -1
- helm/benchmark/scenarios/echr_judgment_classification_scenario.py +113 -0
- helm/benchmark/scenarios/ehr_sql_scenario.py +137 -0
- helm/benchmark/scenarios/ehrshot_scenario.py +1519 -0
- helm/benchmark/scenarios/enem_challenge_scenario.py +58 -0
- helm/benchmark/scenarios/entity_data_imputation_scenario.py +11 -1
- helm/benchmark/scenarios/entity_matching_scenario.py +12 -2
- helm/benchmark/scenarios/financial_phrasebank_scenario.py +94 -0
- helm/benchmark/scenarios/gold_commodity_news_scenario.py +124 -0
- helm/benchmark/scenarios/gpqa_scenario.py +80 -0
- helm/benchmark/scenarios/grammar.py +2 -2
- helm/benchmark/scenarios/grammar_scenario.py +2 -2
- helm/benchmark/scenarios/gsm_scenario.py +10 -1
- helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +50 -0
- helm/benchmark/scenarios/harm_bench_scenario.py +1 -1
- helm/benchmark/scenarios/headqa_scenario.py +136 -0
- helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +37 -0
- helm/benchmark/scenarios/ice_scenario.py +8 -4
- helm/benchmark/scenarios/ifeval_scenario.py +53 -0
- helm/benchmark/scenarios/imdb_ptbr_scenario.py +60 -0
- helm/benchmark/scenarios/imdb_scenario.py +11 -2
- helm/benchmark/scenarios/infinite_bench_en_qa_scenario.py +85 -0
- helm/benchmark/scenarios/infinite_bench_en_sum_scenario.py +79 -0
- helm/benchmark/scenarios/interactive_qa_mmlu_scenario.py +2 -2
- helm/benchmark/scenarios/koala_scenario.py +1 -1
- helm/benchmark/scenarios/kpi_edgar_scenario.py +151 -0
- helm/benchmark/scenarios/legal_contract_summarization_scenario.py +129 -0
- helm/benchmark/scenarios/legal_opinion_sentiment_classification_scenario.py +77 -0
- helm/benchmark/scenarios/legal_summarization_scenario.py +11 -1
- helm/benchmark/scenarios/legal_support_scenario.py +11 -1
- helm/benchmark/scenarios/legalbench_scenario.py +22 -3
- helm/benchmark/scenarios/lex_glue_scenario.py +12 -2
- helm/benchmark/scenarios/lextreme_scenario.py +11 -1
- helm/benchmark/scenarios/live_qa_scenario.py +1 -1
- helm/benchmark/scenarios/lm_entry_scenario.py +1 -1
- helm/benchmark/scenarios/lsat_qa_scenario.py +1 -1
- helm/benchmark/scenarios/math_scenario.py +9 -1
- helm/benchmark/scenarios/me_q_sum_scenario.py +10 -1
- helm/benchmark/scenarios/med_dialog_scenario.py +25 -22
- helm/benchmark/scenarios/med_mcqa_scenario.py +10 -1
- helm/benchmark/scenarios/med_paragraph_simplification_scenario.py +10 -1
- helm/benchmark/scenarios/med_qa_scenario.py +10 -1
- helm/benchmark/scenarios/medalign_scenario.py +94 -0
- helm/benchmark/scenarios/medalign_scenario_helper.py +432 -0
- helm/benchmark/scenarios/medbullets_scenario.py +145 -0
- helm/benchmark/scenarios/medcalc_bench_scenario.py +127 -0
- helm/benchmark/scenarios/medec_scenario.py +125 -0
- helm/benchmark/scenarios/medhallu_scenario.py +72 -0
- helm/benchmark/scenarios/medi_qa_scenario.py +111 -0
- helm/benchmark/scenarios/medication_qa_scenario.py +8 -2
- helm/benchmark/scenarios/melt_ir_scenario.py +171 -0
- helm/benchmark/scenarios/melt_knowledge_scenario.py +246 -0
- helm/benchmark/scenarios/melt_lm_scenarios.py +252 -0
- helm/benchmark/scenarios/melt_scenarios.py +793 -0
- helm/benchmark/scenarios/melt_srn_scenario.py +342 -0
- helm/benchmark/scenarios/melt_synthetic_reasoning_scenario.py +222 -0
- helm/benchmark/scenarios/melt_translation_scenario.py +152 -0
- helm/benchmark/scenarios/mental_health_scenario.py +123 -0
- helm/benchmark/scenarios/mimic_bhc_scenario.py +103 -0
- helm/benchmark/scenarios/mimic_rrs_scenario.py +98 -0
- helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +77 -0
- helm/benchmark/scenarios/mmlu_clinical_afr_scenario.py +74 -0
- helm/benchmark/scenarios/mmlu_pro_scenario.py +95 -0
- helm/benchmark/scenarios/mmlu_scenario.py +11 -1
- helm/benchmark/scenarios/msmarco_scenario.py +1 -1
- helm/benchmark/scenarios/mtsamples_procedures_scenario.py +144 -0
- helm/benchmark/scenarios/mtsamples_replicate_scenario.py +142 -0
- helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +277 -0
- helm/benchmark/scenarios/narrativeqa_scenario.py +1 -1
- helm/benchmark/scenarios/natural_qa_scenario.py +1 -1
- helm/benchmark/scenarios/newsqa_scenario.py +1 -1
- helm/benchmark/scenarios/numeracy_scenario.py +12 -2
- helm/benchmark/scenarios/oab_exams_scenario.py +57 -0
- helm/benchmark/scenarios/omni_math_scenario.py +53 -0
- helm/benchmark/scenarios/open_assistant_scenario.py +11 -2
- helm/benchmark/scenarios/openai_mrcr_scenario.py +79 -0
- helm/benchmark/scenarios/opinions_qa_scenario.py +1 -1
- helm/benchmark/scenarios/pubmed_qa_scenario.py +59 -43
- helm/benchmark/scenarios/quac_scenario.py +10 -1
- helm/benchmark/scenarios/race_based_med_scenario.py +152 -0
- helm/benchmark/scenarios/raft_scenario.py +17 -2
- helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +1 -1
- helm/benchmark/scenarios/ruler_qa_scenario_helper.py +171 -0
- helm/benchmark/scenarios/ruler_qa_scenarios.py +88 -0
- helm/benchmark/scenarios/scenario.py +9 -1
- helm/benchmark/scenarios/{bhasa_scenario.py → seahelm_scenario.py} +7 -2
- helm/benchmark/scenarios/self_instruct_scenario.py +1 -1
- helm/benchmark/scenarios/shc_bmt_scenario.py +75 -0
- helm/benchmark/scenarios/shc_cdi_scenario.py +75 -0
- helm/benchmark/scenarios/shc_conf_scenario.py +76 -0
- helm/benchmark/scenarios/shc_ent_scenario.py +77 -0
- helm/benchmark/scenarios/shc_gip_scenario.py +74 -0
- helm/benchmark/scenarios/shc_privacy_scenario.py +78 -0
- helm/benchmark/scenarios/shc_proxy_scenario.py +76 -0
- helm/benchmark/scenarios/shc_ptbm_scenario.py +81 -0
- helm/benchmark/scenarios/shc_sei_scenario.py +94 -0
- helm/benchmark/scenarios/shc_sequoia_scenario.py +77 -0
- helm/benchmark/scenarios/simple_safety_tests_scenario.py +1 -1
- helm/benchmark/scenarios/spider_scenario.py +91 -0
- helm/benchmark/scenarios/starr_patient_instructions_scenario.py +97 -0
- helm/benchmark/scenarios/summarization_scenario.py +11 -1
- helm/benchmark/scenarios/sumosum_scenario.py +157 -0
- helm/benchmark/scenarios/synthetic_efficiency_scenario.py +1 -1
- helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +11 -1
- helm/benchmark/scenarios/synthetic_reasoning_scenario.py +11 -1
- helm/benchmark/scenarios/test_bigcodebench_scenario.py +26 -0
- helm/benchmark/scenarios/test_czech_bank_qa_scenario.py +18 -0
- helm/benchmark/scenarios/test_enem_challenge_scenario.py +53 -0
- helm/benchmark/scenarios/test_ewok_scenario.py +6 -2
- helm/benchmark/scenarios/test_gold_commodity_news_scenario.py +18 -0
- helm/benchmark/scenarios/test_gpqa_scenario.py +44 -0
- helm/benchmark/scenarios/test_ifeval_scenario.py +36 -0
- helm/benchmark/scenarios/test_imdb_ptbr_scenario.py +27 -0
- helm/benchmark/scenarios/test_infinite_bench_en_qa_scenario.py +18 -0
- helm/benchmark/scenarios/test_infinite_bench_en_sum_scenario.py +31 -0
- helm/benchmark/scenarios/test_math_scenario.py +1 -0
- helm/benchmark/scenarios/test_mmlu_clinical_afr_scenario.py +21 -0
- helm/benchmark/scenarios/test_mmlu_pro_scenario.py +53 -0
- helm/benchmark/scenarios/test_oab_exams_scenario.py +51 -0
- helm/benchmark/scenarios/test_omni_math_scenario.py +27 -0
- helm/benchmark/scenarios/test_tweetsentbr_scenario.py +24 -0
- helm/benchmark/scenarios/test_wildbench_scenario.py +15 -0
- helm/benchmark/scenarios/test_winogrande_afr_scenario.py +19 -0
- helm/benchmark/scenarios/thai_exam_scenario.py +10 -1
- helm/benchmark/scenarios/the_pile_scenario.py +1 -1
- helm/benchmark/scenarios/truthful_qa_scenario.py +12 -2
- helm/benchmark/scenarios/tweetsentbr_scenario.py +66 -0
- helm/benchmark/scenarios/twitter_aae_scenario.py +1 -1
- helm/benchmark/scenarios/unitxt_scenario.py +8 -2
- helm/benchmark/scenarios/verifiability_judgment_scenario.py +1 -1
- helm/benchmark/scenarios/vicuna_scenario.py +1 -1
- helm/benchmark/scenarios/vision_language/blink_scenario.py +140 -0
- helm/benchmark/scenarios/vision_language/mm_star_scenario.py +95 -0
- helm/benchmark/scenarios/vision_language/msr_vtt_scenario.py +75 -0
- helm/benchmark/scenarios/vision_language/vqa_rad_scenario.py +88 -0
- helm/benchmark/scenarios/wikifact_scenario.py +11 -1
- helm/benchmark/scenarios/wikitext_103_scenario.py +1 -1
- helm/benchmark/scenarios/wildbench_scenario.py +83 -0
- helm/benchmark/scenarios/winogrande_afr_scenario.py +78 -0
- helm/benchmark/scenarios/wmt_14_scenario.py +14 -2
- helm/benchmark/scenarios/xstest_scenario.py +1 -1
- helm/benchmark/server.py +13 -1
- helm/benchmark/slurm_runner.py +1 -1
- helm/benchmark/static/schema_audio.yaml +763 -0
- helm/benchmark/static/schema_autobencher.yaml +150 -0
- helm/benchmark/static/schema_call_center.yaml +97 -60
- helm/benchmark/static/{schema_medical.yaml → schema_capabilities.yaml} +100 -101
- helm/benchmark/static/schema_czech_bank.yaml +148 -0
- helm/benchmark/static/schema_enem_challenge.yaml +146 -0
- helm/benchmark/static/schema_enterprise.yaml +319 -0
- helm/benchmark/static/schema_finance.yaml +14 -12
- helm/benchmark/static/schema_heim.yaml +1389 -0
- helm/benchmark/static/schema_long_context.yaml +283 -0
- helm/benchmark/static/schema_medhelm.yaml +1140 -0
- helm/benchmark/static/schema_melt.yaml +1257 -0
- helm/benchmark/static/schema_mmlu_winogrande_afr.yaml +1045 -0
- helm/benchmark/static/schema_safety.yaml +18 -1
- helm/benchmark/static/{schema_bhasa.yaml → schema_seahelm.yaml} +30 -16
- helm/benchmark/static/schema_slphelm.yaml +162 -0
- helm/benchmark/static/schema_social_audio.yaml +224 -0
- helm/benchmark/static/schema_sql.yaml +171 -0
- helm/benchmark/static/{schema_tables.yaml → schema_torr.yaml} +169 -36
- helm/benchmark/static/schema_tweetsentbr.yaml +146 -0
- helm/benchmark/static/schema_vhelm.yaml +129 -56
- helm/benchmark/static/schema_video.yaml +219 -0
- helm/benchmark/static_build/assets/helm-safety-2907a7b6.png +0 -0
- helm/benchmark/static_build/assets/index-94295e78.js +10 -0
- helm/benchmark/static_build/assets/index-b9779128.css +1 -0
- helm/benchmark/static_build/assets/medhelm-overview-eac29843.png +0 -0
- helm/benchmark/static_build/assets/medhelm-v1-overview-3ddfcd65.png +0 -0
- helm/benchmark/static_build/assets/{react-d4a0b69b.js → react-f82877fd.js} +1 -1
- helm/benchmark/static_build/assets/{recharts-6d337683.js → recharts-4037aff0.js} +1 -1
- helm/benchmark/static_build/assets/{tremor-54a99cc4.js → tremor-38a10867.js} +2 -2
- helm/benchmark/static_build/config.js +1 -1
- helm/benchmark/static_build/index.html +6 -6
- helm/benchmark/window_services/default_window_service.py +1 -1
- helm/benchmark/window_services/encoder_decoder_window_service.py +4 -4
- helm/benchmark/window_services/ice_window_service.py +1 -1
- helm/benchmark/window_services/image_generation/lexica_search_window_service.py +1 -1
- helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +1 -1
- helm/benchmark/window_services/local_window_service.py +2 -2
- helm/benchmark/window_services/test_anthropic_window_service.py +3 -3
- helm/benchmark/window_services/test_bloom_window_service.py +3 -3
- helm/benchmark/window_services/test_gpt2_window_service.py +7 -2
- helm/benchmark/window_services/test_gpt4_window_service.py +8 -3
- helm/benchmark/window_services/test_gptj_window_service.py +8 -3
- helm/benchmark/window_services/test_gptneox_window_service.py +3 -3
- helm/benchmark/window_services/test_openai_window_service.py +8 -3
- helm/benchmark/window_services/test_opt_window_service.py +3 -3
- helm/benchmark/window_services/test_palmyra_window_service.py +3 -3
- helm/benchmark/window_services/test_t0pp_window_service.py +3 -3
- helm/benchmark/window_services/test_t511b_window_service.py +3 -3
- helm/benchmark/window_services/test_ul2_window_service.py +3 -3
- helm/benchmark/window_services/test_utils.py +4 -5
- helm/benchmark/window_services/test_yalm_window_service.py +3 -3
- helm/benchmark/window_services/tokenizer_service.py +7 -8
- helm/benchmark/window_services/yalm_window_service.py +1 -1
- helm/clients/ai21_client.py +3 -3
- helm/clients/aleph_alpha_client.py +1 -1
- helm/clients/anthropic_client.py +69 -29
- helm/clients/audio_language/__init__.py +0 -0
- helm/clients/audio_language/diva_llama_client.py +120 -0
- helm/clients/audio_language/llama_omni_client.py +198 -0
- helm/clients/audio_language/qwen2_5_omni_client.py +197 -0
- helm/clients/audio_language/qwen2_audiolm_client.py +190 -0
- helm/clients/audio_language/qwen_audiolm_client.py +152 -0
- helm/clients/audio_language/test.py +62 -0
- helm/clients/auto_client.py +4 -2
- helm/clients/azure_openai_client.py +55 -0
- helm/clients/bedrock_client.py +203 -7
- helm/clients/bedrock_utils.py +33 -0
- helm/clients/client.py +7 -7
- helm/clients/clip_scorers/clip_scorer.py +1 -1
- helm/clients/clip_scorers/multilingual_clip_scorer.py +1 -1
- helm/clients/cohere_client.py +3 -3
- helm/clients/google_client.py +1 -1
- helm/clients/grok_client.py +36 -0
- helm/clients/http_model_client.py +1 -1
- helm/clients/huggingface_client.py +52 -21
- helm/clients/huggingface_pipeline_client.py +138 -0
- helm/clients/ibm_client.py +267 -0
- helm/clients/image_generation/adobe_vision_client.py +1 -1
- helm/clients/image_generation/aleph_alpha_image_generation_client.py +1 -1
- helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +3 -3
- helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +5 -2
- helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +5 -2
- helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +2 -2
- helm/clients/image_generation/cogview2_client.py +1 -1
- helm/clients/image_generation/dalle2_client.py +1 -1
- helm/clients/image_generation/dalle3_client.py +2 -2
- helm/clients/image_generation/dalle_mini/__init__.py +1 -1
- helm/clients/image_generation/dalle_mini/data.py +1 -1
- helm/clients/image_generation/dalle_mini/model/__init__.py +5 -5
- helm/clients/image_generation/dalle_mini/model/configuration.py +2 -2
- helm/clients/image_generation/dalle_mini/model/modeling.py +3 -3
- helm/clients/image_generation/dalle_mini/model/processor.py +5 -5
- helm/clients/image_generation/dalle_mini/model/tokenizer.py +2 -2
- helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -1
- helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +2 -2
- helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +1 -1
- helm/clients/image_generation/dalle_mini_client.py +1 -1
- helm/clients/image_generation/deep_floyd_client.py +1 -1
- helm/clients/image_generation/huggingface_diffusers_client.py +1 -1
- helm/clients/image_generation/lexica_client.py +1 -1
- helm/clients/image_generation/mindalle/models/__init__.py +6 -6
- helm/clients/image_generation/mindalle/models/stage1/vqgan.py +1 -1
- helm/clients/image_generation/mindalle/models/stage2/transformer.py +1 -1
- helm/clients/image_generation/mindalle/utils/__init__.py +3 -3
- helm/clients/image_generation/mindalle_client.py +1 -1
- helm/clients/image_generation/together_image_generation_client.py +1 -1
- helm/clients/lit_gpt_client.py +2 -2
- helm/clients/mistral_client.py +62 -18
- helm/clients/nvidia_nim_client.py +0 -3
- helm/clients/openai_client.py +308 -43
- helm/clients/openai_responses_client.py +174 -0
- helm/clients/palmyra_client.py +3 -9
- helm/clients/reka_client.py +3 -3
- helm/clients/stanfordhealthcare_azure_openai_client.py +58 -0
- helm/clients/stanfordhealthcare_claude_client.py +31 -0
- helm/clients/stanfordhealthcare_google_client.py +43 -0
- helm/clients/stanfordhealthcare_http_model_client.py +93 -0
- helm/clients/stanfordhealthcare_openai_client.py +62 -0
- helm/clients/stanfordhealthcare_shc_openai_client.py +42 -0
- helm/clients/test_client.py +1 -1
- helm/clients/test_together_client.py +6 -1
- helm/clients/together_client.py +76 -9
- helm/clients/upstage_client.py +23 -0
- helm/clients/vertexai_client.py +45 -13
- helm/clients/vision_language/huggingface_vision2seq_client.py +6 -4
- helm/clients/vision_language/huggingface_vlm_client.py +2 -2
- helm/clients/vision_language/idefics_client.py +6 -2
- helm/clients/vision_language/open_flamingo/__init__.py +2 -2
- helm/clients/vision_language/open_flamingo/src/factory.py +3 -3
- helm/clients/vision_language/open_flamingo/src/flamingo.py +2 -2
- helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +2 -2
- helm/clients/vision_language/paligemma_client.py +2 -2
- helm/clients/vision_language/qwen2_vlm_client.py +188 -0
- helm/clients/vision_language/qwen_vlm_client.py +7 -5
- helm/clients/vllm_client.py +4 -6
- helm/clients/writer_client.py +102 -0
- helm/clients/yi_client.py +0 -3
- helm/common/audio_utils.py +111 -0
- helm/common/context.py +80 -0
- helm/common/credentials_utils.py +5 -5
- helm/common/file_caches/local_file_cache.py +1 -1
- helm/common/file_caches/test_local_file_cache.py +1 -1
- helm/common/general.py +9 -2
- helm/common/hierarchical_logger.py +46 -3
- helm/common/images_utils.py +2 -2
- helm/common/local_context.py +140 -0
- helm/common/media_object.py +2 -2
- helm/common/multimodal_request_utils.py +26 -0
- helm/common/reeval_parameters.py +12 -0
- helm/common/remote_context.py +61 -0
- helm/common/request.py +14 -2
- helm/common/response_format.py +18 -0
- helm/common/test_media_object.py +1 -1
- helm/config/model_deployments.yaml +1792 -28
- helm/config/model_metadata.yaml +1606 -51
- helm/config/tokenizer_configs.yaml +521 -4
- helm/proxy/cli.py +5 -3
- helm/proxy/critique/mechanical_turk_utils.py +1 -1
- helm/proxy/example_queries.py +1 -1
- helm/proxy/server.py +11 -4
- helm/proxy/services/remote_service.py +1 -1
- helm/proxy/services/server_service.py +22 -86
- helm/proxy/services/test_remote_service.py +2 -2
- helm/proxy/services/test_service.py +1 -1
- helm/proxy/static/general.js +122 -0
- helm/proxy/static/help.html +99 -0
- helm/proxy/static/index.css +57 -0
- helm/proxy/static/index.html +40 -0
- helm/proxy/static/index.js +456 -0
- helm/proxy/static/info-icon.png +0 -0
- helm/proxy/test_retry.py +1 -1
- helm/proxy/token_counters/auto_token_counter.py +1 -1
- helm/tokenizers/aleph_alpha_tokenizer.py +1 -1
- helm/tokenizers/caching_tokenizer.py +2 -30
- helm/tokenizers/grok_tokenizer.py +53 -0
- helm/tokenizers/http_model_tokenizer.py +1 -1
- helm/tokenizers/huggingface_tokenizer.py +3 -3
- helm/tokenizers/lit_gpt_tokenizer.py +1 -1
- helm/tokenizers/test_anthropic_tokenizer.py +6 -2
- helm/tokenizers/test_grok_tokenizer.py +33 -0
- helm/tokenizers/test_huggingface_tokenizer.py +1 -1
- helm/tokenizers/test_yalm_tokenizer.py +1 -1
- helm/tokenizers/tiktoken_tokenizer.py +1 -1
- helm/tokenizers/tokenizer.py +3 -1
- helm/tokenizers/yalm_tokenizer.py +3 -3
- helm/tokenizers/yalm_tokenizer_data/test_yalm_tokenizer.py +1 -1
- crfm_helm-0.5.4.dist-info/METADATA +0 -350
- crfm_helm-0.5.4.dist-info/RECORD +0 -697
- helm/benchmark/metrics/bhasa_metrics_specs.py +0 -10
- helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
- helm/benchmark/static_build/assets/accenture-6f97eeda.png +0 -0
- helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
- helm/benchmark/static_build/assets/aisingapore-6dfc9acf.png +0 -0
- helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
- helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
- helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
- helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
- helm/benchmark/static_build/assets/cresta-9e22b983.png +0 -0
- helm/benchmark/static_build/assets/cuhk-8c5631e9.png +0 -0
- helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
- helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
- helm/benchmark/static_build/assets/index-05c76bb1.css +0 -1
- helm/benchmark/static_build/assets/index-3ee38b3d.js +0 -10
- helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
- helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
- helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
- helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
- helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
- helm/benchmark/static_build/assets/scb10x-204bd786.png +0 -0
- helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
- helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
- helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
- helm/benchmark/static_build/assets/wellsfargo-a86a6c4a.png +0 -0
- helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
- helm/tokenizers/anthropic_tokenizer.py +0 -52
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info/licenses}/LICENSE +0 -0
- {crfm_helm-0.5.4.dist-info → crfm_helm-0.5.6.dist-info}/top_level.txt +0 -0
|
@@ -1,8 +1,12 @@
|
|
|
1
|
-
|
|
1
|
+
import json
|
|
2
2
|
import re
|
|
3
|
-
from
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Dict, Optional, TypedDict, Union, Callable, Any, Set
|
|
4
5
|
|
|
6
|
+
from helm.benchmark.adaptation.request_state import RequestState
|
|
7
|
+
from helm.benchmark.annotation.annotator import Annotator
|
|
5
8
|
from helm.clients.auto_client import AutoClient
|
|
9
|
+
from helm.common.hierarchical_logger import hlog, hwarn
|
|
6
10
|
from helm.common.request import Request
|
|
7
11
|
|
|
8
12
|
|
|
@@ -13,7 +17,7 @@ class AnnotatorResponseParseFailure(Exception):
|
|
|
13
17
|
|
|
14
18
|
|
|
15
19
|
@dataclass
|
|
16
|
-
class
|
|
20
|
+
class AnnotatorModelInfo:
|
|
17
21
|
model_name: str
|
|
18
22
|
model_deployment: str
|
|
19
23
|
|
|
@@ -26,9 +30,9 @@ def score_with_reasoning_with_gpt_and_llama(
|
|
|
26
30
|
|
|
27
31
|
Score using GPT-4o and Llama 3.1 for safety scenarios in HELM Safety."""
|
|
28
32
|
# TODO: Make this configurable
|
|
29
|
-
SHORT_NAME_TO_MODEL_INFO: Dict[str,
|
|
30
|
-
"gpt":
|
|
31
|
-
"llama":
|
|
33
|
+
SHORT_NAME_TO_MODEL_INFO: Dict[str, AnnotatorModelInfo] = {
|
|
34
|
+
"gpt": AnnotatorModelInfo(model_name="openai/gpt-4o-2024-05-13", model_deployment="openai/gpt-4o-2024-05-13"),
|
|
35
|
+
"llama": AnnotatorModelInfo(
|
|
32
36
|
model_name="meta/llama-3.1-405b-instruct-turbo", model_deployment="together/llama-3.1-405b-instruct-turbo"
|
|
33
37
|
),
|
|
34
38
|
}
|
|
@@ -94,3 +98,207 @@ def score_with_reasoning(
|
|
|
94
98
|
)
|
|
95
99
|
|
|
96
100
|
return {"reasoning": reasoning, "score": score}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class LLMAsJuryAnnotator(Annotator):
|
|
104
|
+
"""
|
|
105
|
+
A flexible LLM-based annotator that can be configured for different annotation scenarios.
|
|
106
|
+
|
|
107
|
+
This annotator supports:
|
|
108
|
+
- Custom prompt templates
|
|
109
|
+
- Multiple evaluation models
|
|
110
|
+
- Configurable evaluation criteria
|
|
111
|
+
- Robust error handling
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(
|
|
115
|
+
self,
|
|
116
|
+
auto_client: AutoClient,
|
|
117
|
+
prompt_template: str,
|
|
118
|
+
annotation_criteria: Dict[str, Set[str]],
|
|
119
|
+
annotator_models: Dict[str, AnnotatorModelInfo],
|
|
120
|
+
preprocessor: Optional[Callable[[str], str]] = None,
|
|
121
|
+
):
|
|
122
|
+
"""
|
|
123
|
+
Initialize the LLMAsJuryAnnotator.
|
|
124
|
+
|
|
125
|
+
:param auto_client: Client for making API requests
|
|
126
|
+
:param prompt_template: Template for generating prompts
|
|
127
|
+
:param annotation_criteria: Dictionary defining expected annotation structure
|
|
128
|
+
:param annotator_models: Dictionary of models to use for annotation
|
|
129
|
+
:param preprocessor: Optional function to preprocess model responses
|
|
130
|
+
"""
|
|
131
|
+
self._auto_client = auto_client
|
|
132
|
+
self._prompt_template = prompt_template
|
|
133
|
+
self._annotation_criteria = annotation_criteria
|
|
134
|
+
self._annotator_models = annotator_models
|
|
135
|
+
self._preprocessor = preprocessor or self._sanitize_model_response
|
|
136
|
+
|
|
137
|
+
def _sanitize_model_response(self, model_response: str) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Sanitize the model response to extract JSON.
|
|
140
|
+
|
|
141
|
+
:param model_response: Raw model response
|
|
142
|
+
:return: Extracted JSON string
|
|
143
|
+
"""
|
|
144
|
+
json_match = re.search(r"\{.*\}", model_response, re.DOTALL)
|
|
145
|
+
return json_match.group(0) if json_match else model_response
|
|
146
|
+
|
|
147
|
+
def _interpolate_prompt(
|
|
148
|
+
self, request_state: RequestState, custom_replacements: Optional[Dict[str, str]] = None
|
|
149
|
+
) -> str:
|
|
150
|
+
"""
|
|
151
|
+
Interpolate prompt template with request state information.
|
|
152
|
+
|
|
153
|
+
:param request_state: The current request state
|
|
154
|
+
:param custom_replacements: Optional dictionary of additional replacements
|
|
155
|
+
:return: Interpolated prompt
|
|
156
|
+
"""
|
|
157
|
+
base_replacements = {
|
|
158
|
+
"{{QUESTION}}": request_state.instance.input.text,
|
|
159
|
+
"{{RESPONSE}}": (
|
|
160
|
+
request_state.result.completions[0].text
|
|
161
|
+
if request_state.result and request_state.result.completions
|
|
162
|
+
else ""
|
|
163
|
+
),
|
|
164
|
+
"{{GOLD_RESPONSE}}": request_state.instance.references[0].output.text,
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
# Allow custom replacements to override base replacements
|
|
168
|
+
if custom_replacements:
|
|
169
|
+
base_replacements.update(custom_replacements)
|
|
170
|
+
|
|
171
|
+
prompt = self._prompt_template
|
|
172
|
+
for key, value in base_replacements.items():
|
|
173
|
+
prompt = prompt.replace(key, str(value))
|
|
174
|
+
|
|
175
|
+
return prompt
|
|
176
|
+
|
|
177
|
+
def _validate_annotation(self, annotator_criteria: Dict[str, Any], annotator_name: str) -> bool:
|
|
178
|
+
"""
|
|
179
|
+
Validate the annotation meets expected criteria.
|
|
180
|
+
|
|
181
|
+
:param annotator_criteria: Annotation dictionary to validate
|
|
182
|
+
:param annotator_name: Name of the annotator model
|
|
183
|
+
:return: Whether the annotation is valid
|
|
184
|
+
"""
|
|
185
|
+
for key, value in self._annotation_criteria.items():
|
|
186
|
+
if key not in annotator_criteria:
|
|
187
|
+
hwarn(f"Annotator did not find the expected key " f"'{key}' in the response from {annotator_name}.")
|
|
188
|
+
return False
|
|
189
|
+
|
|
190
|
+
for subkey in value:
|
|
191
|
+
if subkey not in annotator_criteria[key]:
|
|
192
|
+
hwarn(
|
|
193
|
+
f"Annotator did not find the expected subkey "
|
|
194
|
+
f"'{subkey}' in the response from {annotator_name}."
|
|
195
|
+
)
|
|
196
|
+
return False
|
|
197
|
+
return True
|
|
198
|
+
|
|
199
|
+
def annotate(self, request_state: RequestState) -> Dict[str, Any]:
|
|
200
|
+
"""
|
|
201
|
+
Annotate the request state using multiple LLM models.
|
|
202
|
+
|
|
203
|
+
:param request_state: The request state to annotate
|
|
204
|
+
:return: Dictionary of annotations from different models
|
|
205
|
+
"""
|
|
206
|
+
assert request_state.result
|
|
207
|
+
assert len(request_state.result.completions) == 1
|
|
208
|
+
|
|
209
|
+
# Check for empty model output
|
|
210
|
+
model_output_text = request_state.result.completions[0].text
|
|
211
|
+
if not model_output_text.strip():
|
|
212
|
+
hwarn("Annotator skipped sending requests because the model response was empty")
|
|
213
|
+
return {
|
|
214
|
+
"prompt_text": None,
|
|
215
|
+
"empty_output_equivalence_judgement": False,
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
# Prepare prompt
|
|
219
|
+
annotator_prompt = self._interpolate_prompt(request_state)
|
|
220
|
+
annotations: Dict[str, Union[Optional[str], Optional[bool], Dict[str, Any]]] = {"prompt_text": annotator_prompt}
|
|
221
|
+
|
|
222
|
+
# Track failed annotations for each model
|
|
223
|
+
failed_counts: Dict[str, int] = {name: 0 for name in self._annotator_models}
|
|
224
|
+
|
|
225
|
+
# Annotate using multiple models
|
|
226
|
+
for annotator_name, annotator_model_info in self._annotator_models.items():
|
|
227
|
+
try:
|
|
228
|
+
annotator_criteria = self._annotate_with_model(annotator_prompt, annotator_model_info, annotator_name)
|
|
229
|
+
|
|
230
|
+
if annotator_criteria is not None:
|
|
231
|
+
annotations[annotator_name] = annotator_criteria
|
|
232
|
+
else:
|
|
233
|
+
failed_counts[annotator_name] += 1
|
|
234
|
+
|
|
235
|
+
except Exception as e:
|
|
236
|
+
hlog(f"ERROR annotating with {annotator_name}: {e}")
|
|
237
|
+
failed_counts[annotator_name] += 1
|
|
238
|
+
|
|
239
|
+
hlog(f"Failed model annotations: {failed_counts}")
|
|
240
|
+
return annotations
|
|
241
|
+
|
|
242
|
+
def _annotate_with_model(
|
|
243
|
+
self, prompt: str, model_info: AnnotatorModelInfo, annotator_name: str
|
|
244
|
+
) -> Optional[Dict[str, Any]]:
|
|
245
|
+
"""
|
|
246
|
+
Annotate using a specific model with enhanced JSON parsing.
|
|
247
|
+
|
|
248
|
+
:param prompt: Interpolated prompt
|
|
249
|
+
:param model_info: Model information
|
|
250
|
+
:param annotator_name: Name of the annotator
|
|
251
|
+
:return: Annotation criteria or None if failed
|
|
252
|
+
"""
|
|
253
|
+
annotator_request = Request(
|
|
254
|
+
model=model_info.model_name,
|
|
255
|
+
model_deployment=model_info.model_deployment,
|
|
256
|
+
prompt=prompt,
|
|
257
|
+
temperature=0.0,
|
|
258
|
+
max_tokens=4096,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
annotator_response = self._auto_client.make_request(annotator_request)
|
|
262
|
+
|
|
263
|
+
if not annotator_response.success:
|
|
264
|
+
hwarn(f"Got an error response from {model_info.model_name}: " f"{annotator_response.error}")
|
|
265
|
+
return None
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
annotator_output = annotator_response.completions[0].text
|
|
269
|
+
annotator_output = self._preprocessor(annotator_output)
|
|
270
|
+
|
|
271
|
+
try:
|
|
272
|
+
annotator_criteria = json.loads(annotator_output)
|
|
273
|
+
except json.JSONDecodeError as e:
|
|
274
|
+
if e.msg == "Expecting ',' delimiter":
|
|
275
|
+
# Attempt to fix incomplete JSON by adding a closing brace
|
|
276
|
+
annotator_output = annotator_output + "}"
|
|
277
|
+
try:
|
|
278
|
+
annotator_criteria = json.loads(annotator_output)
|
|
279
|
+
except Exception as ex:
|
|
280
|
+
hwarn(
|
|
281
|
+
f"Error parsing response from {model_info.model_name} "
|
|
282
|
+
f"after adding closing brace: {ex}. "
|
|
283
|
+
f"Model output: {annotator_output}"
|
|
284
|
+
)
|
|
285
|
+
return None
|
|
286
|
+
else:
|
|
287
|
+
# For other JSON decoding errors
|
|
288
|
+
hwarn(
|
|
289
|
+
f"JSON decoding error from {model_info.model_name}: {e}. " f"Model output: {annotator_output}"
|
|
290
|
+
)
|
|
291
|
+
return None
|
|
292
|
+
|
|
293
|
+
# Validate annotation structure
|
|
294
|
+
if not self._validate_annotation(annotator_criteria, annotator_name):
|
|
295
|
+
return None
|
|
296
|
+
|
|
297
|
+
return annotator_criteria
|
|
298
|
+
|
|
299
|
+
except Exception as e:
|
|
300
|
+
hwarn(
|
|
301
|
+
f"Unexpected error processing response from {model_info.model_name}: {e}. "
|
|
302
|
+
f"Model output: {annotator_output}"
|
|
303
|
+
)
|
|
304
|
+
return None
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
from typing import Dict, Optional, Set
|
|
2
|
+
|
|
3
|
+
from helm.benchmark.annotation.model_as_judge import AnnotatorModelInfo, LLMAsJuryAnnotator
|
|
4
|
+
from helm.clients.auto_client import AutoClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
PROMPT_TEMPLATE = """You are a medical expert tasked with evaluating the quality of a
|
|
8
|
+
generated response of a clinical scenario.
|
|
9
|
+
Your goal is to assess how well the response addresses the patient case, follows clinical best practices,
|
|
10
|
+
and compares to the gold response in terms of accuracy, completeness, and clarity.
|
|
11
|
+
|
|
12
|
+
The user's request will be provided in these tags:
|
|
13
|
+
<user_request>
|
|
14
|
+
{{QUESTION}}
|
|
15
|
+
</user_request>
|
|
16
|
+
|
|
17
|
+
The response will be provided in these tags:
|
|
18
|
+
<response>
|
|
19
|
+
{{RESPONSE}}
|
|
20
|
+
</response>
|
|
21
|
+
|
|
22
|
+
Some potential correct responses will be provided in these tags:
|
|
23
|
+
<gold_response>
|
|
24
|
+
{{GOLD_RESPONSE}}
|
|
25
|
+
</gold_response>
|
|
26
|
+
|
|
27
|
+
Carefully analyze the <response>.
|
|
28
|
+
For each of the following categories, rate the Response on a scale of 1 to 5 (1 = very poor, 5 = excellent),
|
|
29
|
+
and provide a short justification for your score.
|
|
30
|
+
|
|
31
|
+
Evaluation Criteria:
|
|
32
|
+
Accuracy (1-5)
|
|
33
|
+
- Does the response provide correct medical advice based on clinical guidelines and the patient case?
|
|
34
|
+
|
|
35
|
+
Completeness (1-5)
|
|
36
|
+
- Does the response cover all important aspects of care for this clinical scenario?
|
|
37
|
+
|
|
38
|
+
Clarity (1-5)
|
|
39
|
+
- Is the response written clearly and organized in a way that clinicians can easily follow?
|
|
40
|
+
|
|
41
|
+
Output Format:
|
|
42
|
+
Output the evaluation as a single valid JSON object matching the following structure:
|
|
43
|
+
{
|
|
44
|
+
"accuracy": {
|
|
45
|
+
"score": 0,
|
|
46
|
+
"explanation": "Explain why this score was given."
|
|
47
|
+
},
|
|
48
|
+
"completeness": {
|
|
49
|
+
"score": 0,
|
|
50
|
+
"explanation": "Explain why this score was given."
|
|
51
|
+
},
|
|
52
|
+
"clarity": {
|
|
53
|
+
"score": 0,
|
|
54
|
+
"explanation": "Explain why this score was given."
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
Ensure the output is valid JSON:
|
|
59
|
+
- Use **double quotes** (") for all keys and string values.
|
|
60
|
+
- When quoting text or sections inside the explanations, use escaped double quotes (\") to
|
|
61
|
+
maintain valid JSON formatting.
|
|
62
|
+
- Do not include any additional information in the output.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
ANNOTATION_CRITERIA: Dict[str, Set[str]] = {
|
|
66
|
+
"accuracy": {"score", "explanation"},
|
|
67
|
+
"completeness": {"score", "explanation"},
|
|
68
|
+
"clarity": {"score", "explanation"},
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
ANNOTATOR_MODELS: Dict[str, AnnotatorModelInfo] = {
|
|
72
|
+
"gpt": AnnotatorModelInfo(
|
|
73
|
+
model_name="openai/gpt-4o-2024-05-13",
|
|
74
|
+
model_deployment="stanfordhealthcare/gpt-4o-2024-05-13",
|
|
75
|
+
),
|
|
76
|
+
"llama": AnnotatorModelInfo(
|
|
77
|
+
model_name="meta/llama-3.3-70b-instruct",
|
|
78
|
+
model_deployment="stanfordhealthcare/llama-3.3-70b-instruct",
|
|
79
|
+
),
|
|
80
|
+
"claude": AnnotatorModelInfo(
|
|
81
|
+
model_name="anthropic/claude-3-7-sonnet-20250219",
|
|
82
|
+
model_deployment="stanfordhealthcare/claude-3-7-sonnet-20250219",
|
|
83
|
+
),
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class MTSamplesProceduresAnnotator(LLMAsJuryAnnotator):
|
|
88
|
+
"""The MTSamplesProcedures autograder."""
|
|
89
|
+
|
|
90
|
+
name = "mtsamples_procedures"
|
|
91
|
+
|
|
92
|
+
def __init__(self, auto_client: AutoClient, template_name: Optional[str] = None):
|
|
93
|
+
super().__init__(
|
|
94
|
+
auto_client=auto_client,
|
|
95
|
+
prompt_template=PROMPT_TEMPLATE,
|
|
96
|
+
annotation_criteria=ANNOTATION_CRITERIA,
|
|
97
|
+
annotator_models=ANNOTATOR_MODELS,
|
|
98
|
+
)
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from typing import Dict, Optional, Set
|
|
2
|
+
|
|
3
|
+
from helm.benchmark.annotation.model_as_judge import AnnotatorModelInfo, LLMAsJuryAnnotator
|
|
4
|
+
from helm.clients.auto_client import AutoClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
PROMPT_TEMPLATE = """You are a medical expert responsible for evaluating a proposed treatment plan
|
|
8
|
+
based on provided patient information.
|
|
9
|
+
|
|
10
|
+
Your goal is to assess whether the treatment plan uses the patient's medical history,
|
|
11
|
+
medications, symptoms, and other relevant details appropriately, follows clinical best practices,
|
|
12
|
+
and aligns with the gold standard response in accuracy, completeness, and clarity.
|
|
13
|
+
|
|
14
|
+
The patient's information will be provided in these tags:
|
|
15
|
+
<patient_information>
|
|
16
|
+
{{QUESTION}}
|
|
17
|
+
</patient_information>
|
|
18
|
+
|
|
19
|
+
The proposed treatment plan will be provided in these tags:
|
|
20
|
+
<response>
|
|
21
|
+
{{RESPONSE}}
|
|
22
|
+
</response>
|
|
23
|
+
|
|
24
|
+
The gold standard treatment plan will be provided in these tags:
|
|
25
|
+
<gold_response>
|
|
26
|
+
{{GOLD_RESPONSE}}
|
|
27
|
+
</gold_response>
|
|
28
|
+
|
|
29
|
+
Carefully review the <response> based on the <patient_information> and compare it to the <gold_response> when needed.
|
|
30
|
+
|
|
31
|
+
For each category below, rate the response on a scale of 1 to 5 (1 = very poor, 5 = excellent) and
|
|
32
|
+
provide a brief explanation for your score:
|
|
33
|
+
|
|
34
|
+
Evaluation Criteria:
|
|
35
|
+
Accuracy (1-5)
|
|
36
|
+
- Does the treatment plan provide correct medical advice based on clinical guidelines?
|
|
37
|
+
|
|
38
|
+
Completeness (1-5)
|
|
39
|
+
Does the treatment plan include all important medical details?
|
|
40
|
+
|
|
41
|
+
Clarity (1-5)
|
|
42
|
+
- Is the treatment plan written clearly so clinicians can easily understand it?
|
|
43
|
+
|
|
44
|
+
Output Format:
|
|
45
|
+
Generate a valid JSON object with the following structure:
|
|
46
|
+
{
|
|
47
|
+
"accuracy": {
|
|
48
|
+
"score": 0,
|
|
49
|
+
"explanation": "Explain why this score was given."
|
|
50
|
+
},
|
|
51
|
+
"completeness": {
|
|
52
|
+
"score": 0,
|
|
53
|
+
"explanation": "Explain why this score was given."
|
|
54
|
+
},
|
|
55
|
+
"clarity": {
|
|
56
|
+
"score": 0,
|
|
57
|
+
"explanation": "Explain why this score was given."
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
Ensure the output is valid JSON:
|
|
62
|
+
- Use **double quotes** (") for all keys and string values.
|
|
63
|
+
- When quoting text or sections inside the explanations, use escaped double quotes (\") to
|
|
64
|
+
maintain valid JSON formatting.
|
|
65
|
+
- Do not include any additional information in the output.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
ANNOTATION_CRITERIA: Dict[str, Set[str]] = {
|
|
69
|
+
"accuracy": {"score", "explanation"},
|
|
70
|
+
"completeness": {"score", "explanation"},
|
|
71
|
+
"clarity": {"score", "explanation"},
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
ANNOTATOR_MODELS: Dict[str, AnnotatorModelInfo] = {
|
|
75
|
+
"gpt": AnnotatorModelInfo(
|
|
76
|
+
model_name="openai/gpt-4o-2024-05-13",
|
|
77
|
+
model_deployment="stanfordhealthcare/gpt-4o-2024-05-13",
|
|
78
|
+
),
|
|
79
|
+
"llama": AnnotatorModelInfo(
|
|
80
|
+
model_name="meta/llama-3.3-70b-instruct",
|
|
81
|
+
model_deployment="stanfordhealthcare/llama-3.3-70b-instruct",
|
|
82
|
+
),
|
|
83
|
+
"claude": AnnotatorModelInfo(
|
|
84
|
+
model_name="anthropic/claude-3-7-sonnet-20250219",
|
|
85
|
+
model_deployment="stanfordhealthcare/claude-3-7-sonnet-20250219",
|
|
86
|
+
),
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class MTSamplesReplicateAnnotator(LLMAsJuryAnnotator):
|
|
91
|
+
"""The MTSamplesReplicate autograder."""
|
|
92
|
+
|
|
93
|
+
name = "mtsamples_replicate"
|
|
94
|
+
|
|
95
|
+
def __init__(self, auto_client: AutoClient, template_name: Optional[str] = None):
|
|
96
|
+
super().__init__(
|
|
97
|
+
auto_client=auto_client,
|
|
98
|
+
prompt_template=PROMPT_TEMPLATE,
|
|
99
|
+
annotation_criteria=ANNOTATION_CRITERIA,
|
|
100
|
+
annotator_models=ANNOTATOR_MODELS,
|
|
101
|
+
)
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# CONTEXT #
|
|
2
|
+
I am a teacher, and I have some high-level math problems. I am tasked with evaluating the correctness of a student's answer.
|
|
3
|
+
Below, I am provided with a problem and a reference answer. Additionally, a student's answer is provided. My job is to assess whether the student's answer captures the same meaning as the reference answer, even when expressed with different wording or format.
|
|
4
|
+
|
|
5
|
+
# OBJECTIVE #
|
|
6
|
+
I need you to judge whether the student's answer is correct given the ground truth answer.
|
|
7
|
+
|
|
8
|
+
Your tasks include:
|
|
9
|
+
A. Identify Mathematical or Notational Equivalence: Pay special attention to any LaTeX expressions in both answers. Confirm that the mathematical relationships, variables, and operations conveyed are equivalent.
|
|
10
|
+
B. Provide a Justification: Conclude with a brief explanation as to why you believe the student's output is correct or incorrect, highlighting any key differences in meaning or content.
|
|
11
|
+
|
|
12
|
+
# STYLE #
|
|
13
|
+
Teaching report.
|
|
14
|
+
|
|
15
|
+
# TONE #
|
|
16
|
+
Professional, scientific.
|
|
17
|
+
|
|
18
|
+
# AUDIENCE #
|
|
19
|
+
Students. Enable them to better understand whether the answer they produce is correct.
|
|
20
|
+
|
|
21
|
+
# RESPONSE: MARKDOWN REPORT #
|
|
22
|
+
## Student Final Answer
|
|
23
|
+
[Extract the student's final answer, which is enclosed in "\\boxed{}".]
|
|
24
|
+
## Equivalence Judgement
|
|
25
|
+
[Whether the student's answer share the same meaning with the reference answer. (TRUE or FALSE)]
|
|
26
|
+
## Justification
|
|
27
|
+
[Conclude with a brief explanation as to why you believe the student's answer is correct or incorrect.]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# ATTENTION #
|
|
31
|
+
- The reference answer is ALWAYS correct. You should carefully judge whether the student gives the same answer as reference answer.
|
|
32
|
+
- The Equivalence Judgement is only TRUE or FALSE. The answer is FALSE even if the student's final answer almost correct with a minor mistakes.
|
|
33
|
+
- The answer is contained within the "boxed" section, so you can focus solely on comparing the content in the student's answer box with the reference answer, without needing to consider the intermediate steps.
|
|
34
|
+
- Add "=== report over ===" at the end of the report.
|
|
35
|
+
|
|
36
|
+
<example math solution>
|
|
37
|
+
**Question**:
|
|
38
|
+
The numbers $1447$ , $1005$ and $1231$ have something in common: each is a $4$ -digit number beginning with $1$ that has exactly two identical digits. How many such numbers are there?
|
|
39
|
+
|
|
40
|
+
**Reference Answer**
|
|
41
|
+
432
|
|
42
|
+
|
|
43
|
+
**Student Solution**:
|
|
44
|
+
To determine how many 4-digit numbers beginning with 1 have exactly two identical digits, we need to consider the structure of these numbers. A number of this form can be represented as \(1abc\), where \(a\), \(b\), and \(c\) are digits from 0 to 9, and exactly one of the digits \(a\), \(b\), or \(c\) is 1, while the other two are different from each other and different from 1.
|
|
45
|
+
|
|
46
|
+
We can break down the problem into the following steps:
|
|
47
|
+
|
|
48
|
+
1. **Choose the digit that is 1**: There are 3 positions (a, b, c) where the digit 1 can be placed. So, there are 3 choices for the position of the digit 1.
|
|
49
|
+
|
|
50
|
+
2. **Choose the other two digits**: The remaining two digits must be different from each other and different from 1. Therefore, we have 9 choices for the first of these two digits (0-9 except 1) and 8 choices for the second digit (0-9 except 1 and the first digit). This gives us \(9 \times 8 = 72\) choices for the other two digits.
|
|
51
|
+
|
|
52
|
+
Combining these choices, the total number of 4-digit numbers beginning with 1 that have exactly two identical digits is:
|
|
53
|
+
\[3 \times 72 = 216.\]
|
|
54
|
+
|
|
55
|
+
Thus, the number of such numbers is \(\boxed{216}\).
|
|
56
|
+
|
|
57
|
+
</example math solution>
|
|
58
|
+
|
|
59
|
+
## Student Final Answer
|
|
60
|
+
216
|
|
61
|
+
|
|
62
|
+
## Equivalence Judgement
|
|
63
|
+
FALSE
|
|
64
|
+
|
|
65
|
+
## Justification
|
|
66
|
+
The student's answer of 216 is incorrect in the context of the problem, which asks for the total count of 4-digit numbers beginning with 1 that have exactly two identical digits. The reference answer is 432.
|
|
67
|
+
In the student's solution, they consider only cases where the digit '1' is one of the identical digits. However, the problem also includes the scenario where the identical digits could be different from '1'. Thus, the student's calculation does not account for all valid configurations. The discrepancy in figures indicates that the student's answer does not share the same meaning as the reference answer.
|
|
68
|
+
|
|
69
|
+
=== report over ===
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
<example math solution>
|
|
73
|
+
**Question**:
|
|
74
|
+
Susana and Brenda play a game writing polynomials on the board. Susana starts and they play taking turns.
|
|
75
|
+
1) On the preparatory turn (turn 0), Susana choose a positive integer $n_0$ and writes the polynomial $P_0(x)=n_0$.
|
|
76
|
+
2) On turn 1, Brenda choose a positive integer $n_1$, different from $n_0$, and either writes the polynomial
|
|
77
|
+
|
|
78
|
+
$$P_1(x)=n_1x+P_0(x) \textup{ or } P_1(x)=n_1x-P_0(x)$$
|
|
79
|
+
|
|
80
|
+
3) In general, on turn $k$, the respective player chooses an integer $n_k$, different from $n_0, n_1, \ldots, n_{k-1}$, and either writes the polynomial
|
|
81
|
+
$$P_k(x)=n_kx^k+P_{k-1}(x) \textup{ or } P_k(x)=n_kx^k-P_{k-1}(x)$$
|
|
82
|
+
The first player to write a polynomial with at least one whole whole number root wins. Find and describe a winning strategy.
|
|
83
|
+
|
|
84
|
+
**Reference Answer**
|
|
85
|
+
\\text{No winning strategy; the game can last indefinitely.}
|
|
86
|
+
|
|
87
|
+
**Student Solution**:
|
|
88
|
+
To determine a winning strategy for Susana, we need to analyze the game and the conditions under which a player can win. The goal is to write a polynomial that has at least one integer root.
|
|
89
|
+
|
|
90
|
+
Let's start by examining the polynomials that are written on the board after each turn.
|
|
91
|
+
|
|
92
|
+
1. On turn 0, Susana writes the polynomial \( P_0(x) = n_0 \), where \( n_0 \) is a positive integer. This polynomial is a constant polynomial with no integer roots unless \( n_0 = 0 \), which is not allowed since \( n_0 \) is a positive integer.
|
|
93
|
+
|
|
94
|
+
2. On turn 1, Brenda chooses a positive integer \( n_1 \) different from \( n_0 \) and writes either \( P_1(x) = n_1 x + P_0(x) = n_1 x + n_0 \) or \( P_1(x) = n_1 x - P_0(x) = n_1 x - n_0 \). In both cases, \( P_1(x) \) is a linear polynomial. A linear polynomial \( ax + b \) has an integer root if and only if \( b/a \) is an integer. Therefore, Brenda can choose \( n_1 \) and \( n_0 \) such that \( n_1 n_0 \) is divisible by \( n_1 \), which is always possible since \( n_1 \) and \( n_0 \) are different positive integers. For example, if \( n_1 = 2 \) and \( n_0 = 2 \), then \( P_1(x) = 2x - 2 \) has the integer root \( x = 1 \). However, since \( n_1 \) must be different from \( n_0 \), we need to consider the next turn.
|
|
95
|
+
|
|
96
|
+
3. On turn 2, Susana chooses a positive integer \( n_2 \) different from \( n_0 \) and \( n_1 \) and writes either \( P_2(x) = n_2 x^2 + P_1(x) \) or \( P_2(x) = n_2 x^2 - P_1(x) \). To ensure a win, Susana should choose \( n_2 \) such that the resulting polynomial has an integer root. Notice that if \( P_1(x) = n_1 x + n_0 \) or \( P_1(x) = n_1 x - n_0 \), then \( P_2(x) = n_2 x^2 + P_1(x) \) or \( P_2(x) = n_2 x^2 - P_1(x) \) will have an integer root if \( n_2 \) is chosen appropriately. For example, if \( P_1(x) = 2x - 2 \), then \( P_2(x) = n_2 x^2 + 2x - 2 \) or \( P_2(x) = n_2 x^2 - 2x + 2 \) will have an integer root if \( n_2 \) is chosen such that the polynomial can be factored to have an integer root.
|
|
97
|
+
|
|
98
|
+
By following this strategy, Susana can always choose \( n_2 \) such that the resulting polynomial has an integer root. Therefore, Susana has a winning strategy.
|
|
99
|
+
|
|
100
|
+
The winning strategy for Susana is to choose \( n_2 \) such that the resulting polynomial has an integer root. Specifically, if \( P_1(x) = n_1 x + n_0 \) or \( P_1(x) = n_1 x - n_0 \), then Susana should choose \( n_2 \) such that \( P_2(x) = n_2 x^2 + P_1(x) \) or \( P_2(x) = n_2 x^2 - P_1(x) \) has an integer root.
|
|
101
|
+
|
|
102
|
+
The answer is: \(\boxed{\text{Susana has a winning strategy.}}\)
|
|
103
|
+
|
|
104
|
+
</example math solution>
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
## Student Final Answer
|
|
108
|
+
\text{Susana has a winning strategy.}
|
|
109
|
+
|
|
110
|
+
## Equivalence Judgement
|
|
111
|
+
FALSE
|
|
112
|
+
|
|
113
|
+
## Justification
|
|
114
|
+
The student's answer states that "Susana has a winning strategy," which is not in agreement with the reference answer indicating "No winning strategy; the game can last indefinitely." The essence of the problem is that neither player can guarantee a win because the game can go on indefinitely without either player being able to write a polynomial with an integer root. The student's conclusion contradicts this, suggesting that a specific winning strategy exists for Susana, which is fundamentally incorrect according to the problem's conditions. Thus, the student's answer does not capture the same meaning as the reference answer.
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
=== report over ===
|
|
118
|
+
|
|
119
|
+
<example math solution>
|
|
120
|
+
**Question**:
|
|
121
|
+
Determine if there exists a (three-variable) polynomial $P(x,y,z)$ with integer coefficients satisfying the following property: a positive integer $n$ is [i]not[/i] a perfect square if and only if there is a triple $(x,y,z)$ of positive integers such that $P(x,y,z) = n$.
|
|
122
|
+
|
|
123
|
+
**Reference Answer**
|
|
124
|
+
P(x,y,z) = x^2 + y^2 + z^2 + 2xyz
|
|
125
|
+
|
|
126
|
+
**Student Solution**:
|
|
127
|
+
**Claim:** There exists a three-variable polynomial $P(x, y
|
|
128
|
+
|
|
129
|
+
</example math solution>
|
|
130
|
+
|
|
131
|
+
## Student Final Answer
|
|
132
|
+
None
|
|
133
|
+
|
|
134
|
+
## Equivalence Judgement
|
|
135
|
+
FALSE
|
|
136
|
+
|
|
137
|
+
## Justification
|
|
138
|
+
The student's answer does not provide a final conclusion and the final answer is missing, which indicates a lack of conclusion.
|
|
139
|
+
|
|
140
|
+
=== report over ===
|
|
141
|
+
|
|
142
|
+
<math solution>
|
|
143
|
+
**Question**:
|
|
144
|
+
{{Problem}}
|
|
145
|
+
|
|
146
|
+
**Reference Answer**
|
|
147
|
+
{{Reference Answer}}
|
|
148
|
+
|
|
149
|
+
**Student Solution**:
|
|
150
|
+
{{Solution}}
|
|
151
|
+
|
|
152
|
+
</math solution>
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# CONTEXT #
|
|
2
|
+
I am a teacher, and I have some high-level math problems. I am tasked with evaluating the correctness of a student's answer.
|
|
3
|
+
Below, I am provided with a problem and a reference answer. Additionally, a student's answer is provided. My job is to assess whether the student's answer captures the same meaning as the reference answer, even when expressed with different wording or format.
|
|
4
|
+
|
|
5
|
+
# OBJECTIVE #
|
|
6
|
+
I need you to judge whether the student's answer is correct given the ground truth answer.
|
|
7
|
+
|
|
8
|
+
Your tasks include:
|
|
9
|
+
A. Identify Mathematical or Notational Equivalence: Pay special attention to any LaTeX expressions in both answers. Confirm that the mathematical relationships, variables, and operations conveyed are equivalent.
|
|
10
|
+
B. Provide a Justification: Conclude with a brief explanation as to why you believe the student's output is correct or incorrect, highlighting any key differences in meaning or content.
|
|
11
|
+
|
|
12
|
+
# ATTENTION #
|
|
13
|
+
- The reference answer is ALWAYS correct. You should carefully judge whether the student gives the same answer as reference answer.
|
|
14
|
+
- The Equivalence Judgement is only TRUE or FALSE. The answer is FALSE even if the student's final answer almost correct with a minor mistakes.
|
|
15
|
+
- The answer is contained within the "boxed" section, so you can focus solely on comparing the content in the student's answer box with the reference answer, without needing to consider the intermediate steps.
|
|
16
|
+
|
|
17
|
+
# QUESTION #
|
|
18
|
+
{{Problem}}
|
|
19
|
+
|
|
20
|
+
# REFERENCE ANSWER #
|
|
21
|
+
{{Reference Answer}}
|
|
22
|
+
|
|
23
|
+
# STUDENT'S ANSWER #
|
|
24
|
+
{{Solution}}
|
|
25
|
+
|
|
26
|
+
# RESPONSE: MARKDOWN REPORT #
|
|
27
|
+
Respond only with a report in the following Markdown format, without any extra text:
|
|
28
|
+
|
|
29
|
+
## Student Final Answer
|
|
30
|
+
[Extract the student's final answer, which is enclosed in "\\boxed{}", or output None if the student did not provide a final answer.]
|
|
31
|
+
|
|
32
|
+
## Justification
|
|
33
|
+
[A brief one-sentence explanation as to why you believe the student's answer is correct or incorrect.]
|
|
34
|
+
|
|
35
|
+
## Equivalence Judgement
|
|
36
|
+
[Whether the student's answer share the same meaning with the reference answer. (TRUE or FALSE)]
|