crfm-helm 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/METADATA +134 -31
- crfm_helm-0.5.0.dist-info/RECORD +642 -0
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/WHEEL +1 -1
- helm/benchmark/adaptation/adapter_spec.py +31 -3
- helm/benchmark/adaptation/adapters/adapter.py +2 -2
- helm/benchmark/adaptation/adapters/adapter_factory.py +24 -27
- helm/benchmark/adaptation/adapters/generation_adapter.py +1 -0
- helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +20 -4
- helm/benchmark/adaptation/adapters/language_modeling_adapter.py +2 -3
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +104 -0
- helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +2 -1
- helm/benchmark/adaptation/adapters/test_adapter.py +2 -1
- helm/benchmark/adaptation/adapters/test_generation_adapter.py +32 -8
- helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +7 -19
- helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +60 -6
- helm/benchmark/adaptation/common_adapter_specs.py +376 -0
- helm/benchmark/adaptation/request_state.py +6 -1
- helm/benchmark/adaptation/scenario_state.py +6 -2
- helm/benchmark/annotation/annotator.py +43 -0
- helm/benchmark/annotation/annotator_factory.py +61 -0
- helm/benchmark/annotation/image2structure/image_compiler_annotator.py +88 -0
- helm/benchmark/annotation/image2structure/latex_compiler_annotator.py +59 -0
- helm/benchmark/annotation/image2structure/lilypond_compiler_annotator.py +84 -0
- helm/benchmark/annotation/image2structure/webpage_compiler_annotator.py +132 -0
- helm/benchmark/annotation/test_annotator_factory.py +26 -0
- helm/benchmark/annotation/test_dummy_annotator.py +44 -0
- helm/benchmark/annotation_executor.py +124 -0
- helm/benchmark/augmentations/data_augmenter.py +0 -2
- helm/benchmark/augmentations/gender_perturbation.py +1 -1
- helm/benchmark/augmentations/perturbation.py +8 -2
- helm/benchmark/augmentations/perturbation_description.py +1 -1
- helm/benchmark/augmentations/suffix_perturbation.py +29 -0
- helm/benchmark/augmentations/test_perturbation.py +11 -7
- helm/benchmark/augmentations/translate_perturbation.py +30 -0
- helm/benchmark/config_registry.py +7 -1
- helm/benchmark/executor.py +46 -16
- helm/benchmark/huggingface_registration.py +20 -7
- helm/benchmark/metrics/basic_metrics.py +169 -664
- helm/benchmark/metrics/bbq_metrics.py +3 -4
- helm/benchmark/metrics/bias_metrics.py +6 -6
- helm/benchmark/metrics/classification_metrics.py +11 -8
- helm/benchmark/metrics/cleva_accuracy_metrics.py +8 -5
- helm/benchmark/metrics/cleva_harms_metrics.py +2 -2
- helm/benchmark/metrics/code_metrics_helper.py +0 -2
- helm/benchmark/metrics/common_metric_specs.py +167 -0
- helm/benchmark/metrics/decodingtrust_fairness_metrics.py +72 -0
- helm/benchmark/metrics/decodingtrust_ood_knowledge_metrics.py +66 -0
- helm/benchmark/metrics/decodingtrust_privacy_metrics.py +101 -0
- helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +202 -0
- helm/benchmark/metrics/disinformation_metrics.py +4 -110
- helm/benchmark/metrics/dry_run_metrics.py +2 -2
- helm/benchmark/metrics/efficiency_metrics.py +206 -0
- helm/benchmark/metrics/evaluate_instances_metric.py +59 -0
- helm/benchmark/metrics/evaluate_reference_metrics.py +376 -0
- helm/benchmark/metrics/image_generation/aesthetics_metrics.py +54 -0
- helm/benchmark/metrics/image_generation/aesthetics_scorer.py +66 -0
- helm/benchmark/metrics/image_generation/clip_score_metrics.py +73 -0
- helm/benchmark/metrics/image_generation/denoised_runtime_metric.py +42 -0
- helm/benchmark/metrics/image_generation/detection_metrics.py +57 -0
- helm/benchmark/metrics/image_generation/detectors/base_detector.py +8 -0
- helm/benchmark/metrics/image_generation/detectors/vitdet.py +178 -0
- helm/benchmark/metrics/image_generation/efficiency_metrics.py +41 -0
- helm/benchmark/metrics/image_generation/fidelity_metrics.py +168 -0
- helm/benchmark/metrics/image_generation/fractal_dimension/__init__.py +0 -0
- helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +63 -0
- helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +33 -0
- helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +50 -0
- helm/benchmark/metrics/image_generation/gender_metrics.py +58 -0
- helm/benchmark/metrics/image_generation/image_critique_metrics.py +284 -0
- helm/benchmark/metrics/image_generation/lpips_metrics.py +82 -0
- helm/benchmark/metrics/image_generation/multi_scale_ssim_metrics.py +82 -0
- helm/benchmark/metrics/image_generation/nsfw_detector.py +96 -0
- helm/benchmark/metrics/image_generation/nsfw_metrics.py +103 -0
- helm/benchmark/metrics/image_generation/nudity_metrics.py +38 -0
- helm/benchmark/metrics/image_generation/photorealism_critique_metrics.py +153 -0
- helm/benchmark/metrics/image_generation/psnr_metrics.py +78 -0
- helm/benchmark/metrics/image_generation/q16/__init__.py +0 -0
- helm/benchmark/metrics/image_generation/q16/q16_toxicity_detector.py +90 -0
- helm/benchmark/metrics/image_generation/q16/test_q16.py +18 -0
- helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +48 -0
- helm/benchmark/metrics/image_generation/skin_tone_metrics.py +164 -0
- helm/benchmark/metrics/image_generation/uiqi_metrics.py +92 -0
- helm/benchmark/metrics/image_generation/watermark/__init__.py +0 -0
- helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +16 -0
- helm/benchmark/metrics/image_generation/watermark/watermark_detector.py +87 -0
- helm/benchmark/metrics/image_generation/watermark_metrics.py +48 -0
- helm/benchmark/metrics/instruction_following_critique_metrics.py +3 -1
- helm/benchmark/metrics/language_modeling_metrics.py +99 -0
- helm/benchmark/metrics/machine_translation_metrics.py +89 -0
- helm/benchmark/metrics/metric.py +93 -172
- helm/benchmark/metrics/metric_name.py +0 -1
- helm/benchmark/metrics/metric_service.py +16 -0
- helm/benchmark/metrics/paraphrase_generation_metrics.py +3 -4
- helm/benchmark/metrics/ranking_metrics.py +2 -2
- helm/benchmark/metrics/reference_metric.py +148 -0
- helm/benchmark/metrics/summac/model_summac.py +0 -2
- helm/benchmark/metrics/summarization_metrics.py +2 -2
- helm/benchmark/metrics/test_classification_metrics.py +8 -5
- helm/benchmark/metrics/test_disinformation_metrics.py +78 -0
- helm/benchmark/metrics/{test_basic_metrics.py → test_evaluate_reference_metrics.py} +5 -1
- helm/benchmark/metrics/test_metric.py +2 -2
- helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +10 -2
- helm/benchmark/metrics/toxicity_metrics.py +1 -1
- helm/benchmark/metrics/toxicity_utils.py +23 -0
- helm/benchmark/metrics/unitxt_metrics.py +81 -0
- helm/benchmark/metrics/vision_language/__init__.py +0 -0
- helm/benchmark/metrics/vision_language/emd_utils.py +341 -0
- helm/benchmark/metrics/vision_language/image_metrics.py +450 -0
- helm/benchmark/metrics/vision_language/image_utils.py +100 -0
- helm/benchmark/model_deployment_registry.py +74 -0
- helm/benchmark/model_metadata_registry.py +36 -0
- helm/benchmark/multi_gpu_runner.py +133 -0
- helm/benchmark/presentation/create_plots.py +8 -7
- helm/benchmark/presentation/run_display.py +26 -10
- helm/benchmark/presentation/schema.py +15 -40
- helm/benchmark/presentation/summarize.py +119 -79
- helm/benchmark/presentation/table.py +8 -8
- helm/benchmark/presentation/test_contamination.py +2 -2
- helm/benchmark/presentation/test_run_entry.py +1 -2
- helm/benchmark/presentation/test_summarize.py +3 -3
- helm/benchmark/run.py +54 -26
- helm/benchmark/run_expander.py +214 -16
- helm/benchmark/run_spec.py +93 -0
- helm/benchmark/run_spec_factory.py +162 -0
- helm/benchmark/run_specs/__init__.py +0 -0
- helm/benchmark/run_specs/classic_run_specs.py +1510 -0
- helm/benchmark/run_specs/cleva_run_specs.py +277 -0
- helm/benchmark/run_specs/decodingtrust_run_specs.py +314 -0
- helm/benchmark/run_specs/heim_run_specs.py +623 -0
- helm/benchmark/run_specs/instruction_following_run_specs.py +129 -0
- helm/benchmark/run_specs/lite_run_specs.py +307 -0
- helm/benchmark/run_specs/simple_run_specs.py +104 -0
- helm/benchmark/run_specs/unitxt_run_specs.py +42 -0
- helm/benchmark/run_specs/vlm_run_specs.py +501 -0
- helm/benchmark/runner.py +51 -57
- helm/benchmark/runner_config_registry.py +21 -0
- helm/benchmark/scenarios/bbq_scenario.py +1 -1
- helm/benchmark/scenarios/bold_scenario.py +2 -2
- helm/benchmark/scenarios/code_scenario.py +1 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +169 -0
- helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +121 -0
- helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +77 -0
- helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +324 -0
- helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +204 -0
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +559 -0
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +67 -0
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +78 -0
- helm/benchmark/scenarios/dialogue_scenarios.py +0 -1
- helm/benchmark/scenarios/image_generation/__init__.py +0 -0
- helm/benchmark/scenarios/image_generation/common_syntactic_processes_scenario.py +105 -0
- helm/benchmark/scenarios/image_generation/cub200_scenario.py +95 -0
- helm/benchmark/scenarios/image_generation/daily_dalle_scenario.py +124 -0
- helm/benchmark/scenarios/image_generation/demographic_stereotypes_scenario.py +82 -0
- helm/benchmark/scenarios/image_generation/detection_scenario.py +83 -0
- helm/benchmark/scenarios/image_generation/draw_bench_scenario.py +74 -0
- helm/benchmark/scenarios/image_generation/i2p_scenario.py +57 -0
- helm/benchmark/scenarios/image_generation/landing_page_scenario.py +46 -0
- helm/benchmark/scenarios/image_generation/logos_scenario.py +223 -0
- helm/benchmark/scenarios/image_generation/magazine_cover_scenario.py +91 -0
- helm/benchmark/scenarios/image_generation/mental_disorders_scenario.py +46 -0
- helm/benchmark/scenarios/image_generation/mscoco_scenario.py +91 -0
- helm/benchmark/scenarios/image_generation/paint_skills_scenario.py +72 -0
- helm/benchmark/scenarios/image_generation/parti_prompts_scenario.py +94 -0
- helm/benchmark/scenarios/image_generation/radiology_scenario.py +42 -0
- helm/benchmark/scenarios/image_generation/relational_understanding_scenario.py +52 -0
- helm/benchmark/scenarios/image_generation/time_most_significant_historical_figures_scenario.py +124 -0
- helm/benchmark/scenarios/image_generation/winoground_scenario.py +62 -0
- helm/benchmark/scenarios/imdb_scenario.py +0 -1
- helm/benchmark/scenarios/live_qa_scenario.py +94 -0
- helm/benchmark/scenarios/lm_entry_scenario.py +185 -0
- helm/benchmark/scenarios/math_scenario.py +19 -2
- helm/benchmark/scenarios/medication_qa_scenario.py +60 -0
- helm/benchmark/scenarios/numeracy_scenario.py +1 -1
- helm/benchmark/scenarios/opinions_qa_scenario.py +0 -4
- helm/benchmark/scenarios/scenario.py +4 -0
- helm/benchmark/scenarios/simple_scenarios.py +122 -1
- helm/benchmark/scenarios/test_math_scenario.py +6 -0
- helm/benchmark/scenarios/test_scenario.py +6 -3
- helm/benchmark/scenarios/test_simple_scenarios.py +50 -0
- helm/benchmark/scenarios/thai_exam_scenario.py +135 -0
- helm/benchmark/scenarios/unitxt_scenario.py +56 -0
- helm/benchmark/scenarios/verifiability_judgment_scenario.py +3 -1
- helm/benchmark/scenarios/vicuna_scenario.py +1 -1
- helm/benchmark/scenarios/vision_language/bingo_scenario.py +103 -0
- helm/benchmark/scenarios/vision_language/hateful_memes_scenario.py +92 -0
- helm/benchmark/scenarios/vision_language/heim_human_eval_scenario.py +113 -0
- helm/benchmark/scenarios/vision_language/image2structure/__init__.py +0 -0
- helm/benchmark/scenarios/vision_language/image2structure/chart2csv_scenario.py +55 -0
- helm/benchmark/scenarios/vision_language/image2structure/image2structure_scenario.py +214 -0
- helm/benchmark/scenarios/vision_language/image2structure/latex_scenario.py +25 -0
- helm/benchmark/scenarios/vision_language/image2structure/musicsheet_scenario.py +20 -0
- helm/benchmark/scenarios/vision_language/image2structure/utils_latex.py +347 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/__init__.py +0 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/driver.py +84 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/jekyll_server.py +182 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/utils.py +31 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage_scenario.py +225 -0
- helm/benchmark/scenarios/vision_language/mementos_scenario.py +124 -0
- helm/benchmark/scenarios/vision_language/mme_scenario.py +145 -0
- helm/benchmark/scenarios/vision_language/mmmu_scenario.py +187 -0
- helm/benchmark/scenarios/vision_language/multipanelvqa_scenario.py +169 -0
- helm/benchmark/scenarios/vision_language/pope_scenario.py +104 -0
- helm/benchmark/scenarios/vision_language/seed_bench_scenario.py +129 -0
- helm/benchmark/scenarios/vision_language/unicorn_scenario.py +108 -0
- helm/benchmark/scenarios/vision_language/viz_wiz_scenario.py +1 -2
- helm/benchmark/scenarios/vision_language/vqa_scenario.py +1 -1
- helm/benchmark/scenarios/wmt_14_scenario.py +1 -1
- helm/benchmark/server.py +24 -1
- helm/benchmark/slurm_runner.py +70 -49
- helm/benchmark/static/benchmarking.js +1 -1
- helm/benchmark/static/schema_classic.yaml +258 -1066
- helm/benchmark/static/schema_instruction_following.yaml +210 -0
- helm/benchmark/static/schema_lite.yaml +2 -227
- helm/benchmark/static/schema_mmlu.yaml +1507 -0
- helm/benchmark/static/schema_unitxt.yaml +428 -0
- helm/benchmark/static/schema_vlm.yaml +576 -0
- helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
- helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
- helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
- helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
- helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
- helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
- helm/benchmark/static_build/assets/crfm-logo-74391ab8.png +0 -0
- helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
- helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
- helm/benchmark/static_build/assets/heim-logo-3e5e3aa4.png +0 -0
- helm/benchmark/static_build/assets/helm-logo-simple-2ed5400b.png +0 -0
- helm/benchmark/static_build/assets/helmhero-28e90f4d.png +0 -0
- helm/benchmark/static_build/assets/index-5088afcb.css +1 -0
- helm/benchmark/static_build/assets/index-d839df55.js +9 -0
- helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
- helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
- helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
- helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
- helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
- helm/benchmark/static_build/assets/react-d4a0b69b.js +85 -0
- helm/benchmark/static_build/assets/recharts-6d337683.js +97 -0
- helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
- helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
- helm/benchmark/static_build/assets/tremor-54a99cc4.js +10 -0
- helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
- helm/benchmark/static_build/assets/vhelm-framework-cde7618a.png +0 -0
- helm/benchmark/static_build/assets/vhelm-model-6d812526.png +0 -0
- helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
- helm/benchmark/static_build/config.js +4 -0
- helm/benchmark/static_build/index.html +20 -0
- helm/benchmark/test_data_preprocessor.py +3 -3
- helm/benchmark/test_model_deployment_definition.py +14 -16
- helm/benchmark/test_run_expander.py +1 -1
- helm/benchmark/window_services/ai21_window_service.py +22 -33
- helm/benchmark/window_services/cohere_window_service.py +1 -63
- helm/benchmark/window_services/default_window_service.py +2 -44
- helm/benchmark/window_services/encoder_decoder_window_service.py +0 -11
- helm/benchmark/window_services/ice_window_service.py +0 -34
- helm/benchmark/window_services/image_generation/__init__.py +0 -0
- helm/benchmark/window_services/image_generation/clip_window_service.py +15 -0
- helm/benchmark/window_services/image_generation/lexica_search_window_service.py +9 -0
- helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +9 -0
- helm/benchmark/window_services/image_generation/test_clip_window_service.py +29 -0
- helm/benchmark/window_services/image_generation/test_openai_dalle_window_service.py +30 -0
- helm/benchmark/window_services/local_window_service.py +21 -4
- helm/benchmark/window_services/test_anthropic_window_service.py +2 -1
- helm/benchmark/window_services/test_bloom_window_service.py +2 -1
- helm/benchmark/window_services/test_cohere_window_service.py +2 -1
- helm/benchmark/window_services/test_flan_t5_window_service.py +2 -1
- helm/benchmark/window_services/test_gpt2_window_service.py +2 -2
- helm/benchmark/window_services/test_gpt4_window_service.py +2 -1
- helm/benchmark/window_services/test_gptj_window_service.py +3 -2
- helm/benchmark/window_services/test_gptneox_window_service.py +3 -2
- helm/benchmark/window_services/test_ice_window_service.py +2 -1
- helm/benchmark/window_services/test_openai_window_service.py +2 -1
- helm/benchmark/window_services/test_opt_window_service.py +3 -2
- helm/benchmark/window_services/test_palmyra_window_service.py +2 -1
- helm/benchmark/window_services/test_t0pp_window_service.py +2 -1
- helm/benchmark/window_services/test_t511b_window_service.py +2 -1
- helm/benchmark/window_services/test_ul2_window_service.py +2 -1
- helm/benchmark/window_services/test_utils.py +3 -2
- helm/benchmark/window_services/test_yalm_window_service.py +2 -1
- helm/benchmark/window_services/window_service.py +42 -0
- helm/benchmark/window_services/window_service_factory.py +4 -1
- helm/benchmark/window_services/yalm_window_service.py +0 -27
- helm/clients/__init__.py +0 -0
- helm/{proxy/clients → clients}/ai21_client.py +3 -9
- helm/clients/aleph_alpha_client.py +112 -0
- helm/{proxy/clients → clients}/anthropic_client.py +203 -18
- helm/{proxy/clients → clients}/auto_client.py +59 -31
- helm/clients/bedrock_client.py +128 -0
- helm/clients/bedrock_utils.py +72 -0
- helm/{proxy/clients → clients}/client.py +65 -7
- helm/clients/clip_score_client.py +49 -0
- helm/clients/clip_scorers/__init__.py +0 -0
- helm/clients/clip_scorers/base_clip_scorer.py +18 -0
- helm/clients/clip_scorers/clip_scorer.py +50 -0
- helm/clients/clip_scorers/multilingual_clip_scorer.py +50 -0
- helm/{proxy/clients → clients}/cohere_client.py +4 -11
- helm/clients/gcs_client.py +82 -0
- helm/{proxy/clients → clients}/google_client.py +5 -5
- helm/clients/google_translate_client.py +35 -0
- helm/{proxy/clients → clients}/http_model_client.py +5 -7
- helm/{proxy/clients → clients}/huggingface_client.py +43 -64
- helm/clients/image_generation/__init__.py +0 -0
- helm/clients/image_generation/adobe_vision_client.py +78 -0
- helm/clients/image_generation/aleph_alpha_image_generation_client.py +98 -0
- helm/clients/image_generation/cogview2/__init__.py +0 -0
- helm/clients/image_generation/cogview2/coglm_strategy.py +96 -0
- helm/clients/image_generation/cogview2/coglm_utils.py +82 -0
- helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +15 -0
- helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +96 -0
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +254 -0
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_sampling.py +190 -0
- helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +141 -0
- helm/clients/image_generation/cogview2/sr_pipeline/itersr_model.py +269 -0
- helm/clients/image_generation/cogview2/sr_pipeline/itersr_sampling.py +120 -0
- helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +42 -0
- helm/clients/image_generation/cogview2_client.py +191 -0
- helm/clients/image_generation/dalle2_client.py +192 -0
- helm/clients/image_generation/dalle3_client.py +108 -0
- helm/clients/image_generation/dalle_mini/__init__.py +3 -0
- helm/clients/image_generation/dalle_mini/data.py +442 -0
- helm/clients/image_generation/dalle_mini/model/__init__.py +5 -0
- helm/clients/image_generation/dalle_mini/model/configuration.py +175 -0
- helm/clients/image_generation/dalle_mini/model/modeling.py +1834 -0
- helm/clients/image_generation/dalle_mini/model/partitions.py +84 -0
- helm/clients/image_generation/dalle_mini/model/processor.py +63 -0
- helm/clients/image_generation/dalle_mini/model/text.py +251 -0
- helm/clients/image_generation/dalle_mini/model/tokenizer.py +9 -0
- helm/clients/image_generation/dalle_mini/model/utils.py +29 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/configuration_vqgan.py +40 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +107 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +610 -0
- helm/clients/image_generation/dalle_mini_client.py +190 -0
- helm/clients/image_generation/deep_floyd_client.py +78 -0
- helm/clients/image_generation/huggingface_diffusers_client.py +249 -0
- helm/clients/image_generation/image_generation_client_utils.py +9 -0
- helm/clients/image_generation/lexica_client.py +86 -0
- helm/clients/image_generation/mindalle/__init__.py +0 -0
- helm/clients/image_generation/mindalle/models/__init__.py +216 -0
- helm/clients/image_generation/mindalle/models/stage1/__init__.py +0 -0
- helm/clients/image_generation/mindalle/models/stage1/layers.py +312 -0
- helm/clients/image_generation/mindalle/models/stage1/vqgan.py +103 -0
- helm/clients/image_generation/mindalle/models/stage2/__init__.py +0 -0
- helm/clients/image_generation/mindalle/models/stage2/layers.py +144 -0
- helm/clients/image_generation/mindalle/models/stage2/transformer.py +268 -0
- helm/clients/image_generation/mindalle/models/tokenizer.py +30 -0
- helm/clients/image_generation/mindalle/utils/__init__.py +3 -0
- helm/clients/image_generation/mindalle/utils/config.py +129 -0
- helm/clients/image_generation/mindalle/utils/sampling.py +149 -0
- helm/clients/image_generation/mindalle/utils/utils.py +89 -0
- helm/clients/image_generation/mindalle_client.py +115 -0
- helm/clients/image_generation/nudity_check_client.py +64 -0
- helm/clients/image_generation/together_image_generation_client.py +111 -0
- helm/{proxy/clients → clients}/lit_gpt_client.py +4 -4
- helm/{proxy/clients → clients}/megatron_client.py +5 -5
- helm/clients/mistral_client.py +134 -0
- helm/clients/moderation_api_client.py +109 -0
- helm/clients/open_lm_client.py +43 -0
- helm/clients/openai_client.py +302 -0
- helm/{proxy/clients → clients}/palmyra_client.py +6 -8
- helm/{proxy/clients → clients}/perspective_api_client.py +7 -8
- helm/clients/simple_client.py +64 -0
- helm/{proxy/clients → clients}/test_auto_client.py +13 -15
- helm/clients/test_client.py +100 -0
- helm/{proxy/clients → clients}/test_huggingface_client.py +15 -16
- helm/clients/test_simple_client.py +19 -0
- helm/{proxy/clients → clients}/test_together_client.py +20 -8
- helm/{proxy/clients → clients}/together_client.py +12 -72
- helm/clients/vertexai_client.py +391 -0
- helm/clients/vision_language/__init__.py +0 -0
- helm/clients/vision_language/huggingface_vlm_client.py +104 -0
- helm/{proxy/clients → clients}/vision_language/idefics_client.py +53 -48
- helm/clients/vision_language/open_flamingo/__init__.py +2 -0
- helm/clients/vision_language/open_flamingo/src/__init__.py +0 -0
- helm/clients/vision_language/open_flamingo/src/factory.py +147 -0
- helm/clients/vision_language/open_flamingo/src/flamingo.py +337 -0
- helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +155 -0
- helm/clients/vision_language/open_flamingo/src/helpers.py +267 -0
- helm/clients/vision_language/open_flamingo/src/utils.py +47 -0
- helm/clients/vision_language/open_flamingo_client.py +155 -0
- helm/clients/vision_language/qwen_vlm_client.py +171 -0
- helm/clients/vllm_client.py +46 -0
- helm/common/cache.py +16 -4
- helm/common/cache_backend_config.py +47 -0
- helm/common/clip_score_request.py +41 -0
- helm/common/file_caches/__init__.py +0 -0
- helm/common/file_caches/file_cache.py +16 -0
- helm/common/file_caches/local_file_cache.py +61 -0
- helm/common/file_caches/test_local_file_cache.py +25 -0
- helm/common/file_upload_request.py +27 -0
- helm/common/general.py +1 -1
- helm/common/image_generation_parameters.py +25 -0
- helm/common/images_utils.py +24 -1
- helm/common/key_value_store.py +35 -4
- helm/common/media_object.py +13 -0
- helm/common/moderations_api_request.py +71 -0
- helm/common/mongo_key_value_store.py +3 -3
- helm/common/multimodal_request_utils.py +31 -0
- helm/common/nudity_check_request.py +29 -0
- helm/common/request.py +15 -17
- helm/common/test_general.py +6 -0
- helm/common/tokenization_request.py +1 -1
- helm/config/model_deployments.yaml +1069 -546
- helm/config/model_metadata.yaml +753 -31
- helm/config/tokenizer_configs.yaml +142 -43
- helm/proxy/accounts.py +31 -4
- helm/proxy/critique/mechanical_turk_critique_importer.py +3 -0
- helm/proxy/critique/model_critique_client.py +8 -6
- helm/proxy/example_queries.py +29 -17
- helm/proxy/server.py +70 -5
- helm/proxy/services/remote_service.py +31 -0
- helm/proxy/services/server_service.py +96 -16
- helm/proxy/services/service.py +30 -0
- helm/proxy/services/test_remote_service.py +4 -3
- helm/proxy/services/test_service.py +0 -12
- helm/proxy/test_accounts.py +32 -0
- helm/proxy/token_counters/auto_token_counter.py +37 -37
- helm/proxy/token_counters/test_auto_token_counter.py +164 -0
- helm/proxy/token_counters/token_counter.py +3 -5
- helm/tokenizers/__init__.py +0 -0
- helm/{proxy/tokenizers → tokenizers}/ai21_tokenizer.py +3 -3
- helm/{proxy/tokenizers → tokenizers}/anthropic_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/auto_tokenizer.py +6 -9
- helm/{proxy/tokenizers → tokenizers}/cohere_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/http_model_tokenizer.py +3 -3
- helm/{proxy/tokenizers → tokenizers}/huggingface_tokenizer.py +7 -26
- helm/tokenizers/simple_tokenizer.py +33 -0
- helm/{proxy/tokenizers → tokenizers}/test_anthropic_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/test_huggingface_tokenizer.py +3 -0
- helm/tokenizers/test_simple_tokenizer.py +33 -0
- helm/{proxy/tokenizers → tokenizers}/vertexai_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer.py +5 -3
- helm/tokenizers/yalm_tokenizer_data/__init__.py +0 -0
- helm/tokenizers/yalm_tokenizer_data/voc_100b.sp +0 -0
- helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/yalm_tokenizer.py +1 -1
- crfm_helm-0.4.0.dist-info/RECORD +0 -397
- helm/benchmark/run_specs.py +0 -2762
- helm/benchmark/test_model_properties.py +0 -1570
- helm/benchmark/vlm_run_specs.py +0 -97
- helm/benchmark/window_services/flan_t5_window_service.py +0 -29
- helm/benchmark/window_services/gpt2_window_service.py +0 -32
- helm/benchmark/window_services/huggingface_window_service.py +0 -60
- helm/benchmark/window_services/t0pp_window_service.py +0 -35
- helm/benchmark/window_services/t511b_window_service.py +0 -30
- helm/benchmark/window_services/test_mt_nlg_window_service.py +0 -48
- helm/benchmark/window_services/ul2_window_service.py +0 -30
- helm/benchmark/window_services/wider_ai21_window_service.py +0 -24
- helm/common/cache_utils.py +0 -14
- helm/proxy/clients/aleph_alpha_client.py +0 -95
- helm/proxy/clients/goose_ai_client.py +0 -99
- helm/proxy/clients/microsoft_client.py +0 -180
- helm/proxy/clients/openai_client.py +0 -206
- helm/proxy/clients/simple_client.py +0 -60
- helm/proxy/clients/test_client.py +0 -49
- helm/proxy/clients/vertexai_client.py +0 -115
- helm/proxy/token_counters/ai21_token_counter.py +0 -20
- helm/proxy/token_counters/cohere_token_counter.py +0 -13
- helm/proxy/token_counters/free_token_counter.py +0 -12
- helm/proxy/token_counters/gooseai_token_counter.py +0 -24
- helm/proxy/token_counters/openai_token_counter.py +0 -22
- helm/proxy/token_counters/test_ai21_token_counter.py +0 -88
- helm/proxy/token_counters/test_openai_token_counter.py +0 -81
- helm/proxy/tokenizers/simple_tokenizer.py +0 -32
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/LICENSE +0 -0
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/top_level.txt +0 -0
- /helm/{proxy/clients → benchmark/annotation}/__init__.py +0 -0
- /helm/{proxy/clients/vision_language → benchmark/annotation/image2structure}/__init__.py +0 -0
- /helm/{proxy/tokenizers → benchmark/metrics/image_generation}/__init__.py +0 -0
- /helm/{proxy/tokenizers/yalm_tokenizer_data → benchmark/metrics/image_generation/detectors}/__init__.py +0 -0
- /helm/{proxy/clients → clients}/ai21_utils.py +0 -0
- /helm/{proxy/clients → clients}/cohere_utils.py +0 -0
- /helm/{proxy/clients → clients}/lit_gpt_generate.py +0 -0
- /helm/{proxy/clients → clients}/toxicity_classifier_client.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/aleph_alpha_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/caching_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/ice_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/lit_gpt_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/test_ice_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/test_yalm_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/tiktoken_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/test_yalm_tokenizer.py +0 -0
|
@@ -1,24 +1,19 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
1
2
|
import math
|
|
2
|
-
from dataclasses import dataclass
|
|
3
|
-
from typing import List,
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import List, Dict, Set
|
|
4
5
|
from urllib.parse import unquote
|
|
5
|
-
from functools import partial
|
|
6
6
|
|
|
7
|
-
import json
|
|
8
|
-
import string
|
|
9
|
-
import nltk
|
|
10
7
|
import numpy as np
|
|
11
|
-
import re
|
|
12
8
|
import scipy
|
|
13
9
|
import calibration as cal
|
|
14
|
-
|
|
15
|
-
from
|
|
16
|
-
from
|
|
17
|
-
from
|
|
18
|
-
from rouge_score import rouge_scorer
|
|
10
|
+
from helm.benchmark.adaptation.scenario_state import ScenarioState
|
|
11
|
+
from helm.benchmark.metrics.evaluate_reference_metrics import compute_reference_metrics
|
|
12
|
+
from helm.benchmark.metrics.efficiency_metrics import EfficiencyMetric
|
|
13
|
+
from helm.benchmark.metrics.reference_metric import ReferenceMetric
|
|
19
14
|
|
|
20
15
|
from helm.common.hierarchical_logger import hlog
|
|
21
|
-
from helm.common.request import Token,
|
|
16
|
+
from helm.common.request import Token, GeneratedOutput
|
|
22
17
|
from helm.benchmark.adaptation.adapters.adapter_factory import (
|
|
23
18
|
ADAPT_MULTIPLE_CHOICE_SEPARATE_ORIGINAL,
|
|
24
19
|
ADAPT_MULTIPLE_CHOICE_SEPARATE_CALIBRATED,
|
|
@@ -29,196 +24,11 @@ from helm.benchmark.adaptation.adapter_spec import AdapterSpec
|
|
|
29
24
|
from helm.benchmark.window_services.window_service import WindowService
|
|
30
25
|
from helm.benchmark.window_services.window_service_factory import WindowServiceFactory
|
|
31
26
|
from helm.benchmark.window_services.tokenizer_service import TokenizerService
|
|
32
|
-
from helm.benchmark.scenarios.scenario import CORRECT_TAG, Instance
|
|
33
|
-
from
|
|
34
|
-
from
|
|
35
|
-
from helm.benchmark.metrics.cleva_metrics_helper import ChineseTokenizer
|
|
36
|
-
from . import code_metrics_helper
|
|
37
|
-
from .metric import Metric, get_unique_stat_by_name
|
|
38
|
-
from .metric_name import MetricName
|
|
27
|
+
from helm.benchmark.scenarios.scenario import CORRECT_TAG, Instance
|
|
28
|
+
from .metric import Metric, MetricInterface, MetricResult, add_context, get_unique_stat_by_name
|
|
29
|
+
from .metric_name import MetricContext, MetricName
|
|
39
30
|
from .metric_service import MetricService
|
|
40
|
-
from .statistic import Stat
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
try:
|
|
44
|
-
nltk.data.find("tokenizers/punkt")
|
|
45
|
-
except LookupError:
|
|
46
|
-
nltk.download("punkt") # Required for rouge
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
EFFICIENCY_DATA_PACKAGE: str = "helm.benchmark.efficiency_data"
|
|
50
|
-
|
|
51
|
-
INFERENCE_IDEALIZED_RUNTIMES_JSON_FILENAME: str = "inference_idealized_runtimes.json"
|
|
52
|
-
INFERENCE_DENOISED_RUNTIMES_JSON_FILENAME: str = "inference_denoised_runtimes.json"
|
|
53
|
-
TRAINING_EFFICIENCY_JSON_FILENAME: str = "training_efficiency.json"
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
def compute_estimated_time_from_prompt_size_and_num_output_tokens(
|
|
57
|
-
request_state: RequestState,
|
|
58
|
-
inference_runtimes_dict: Dict[str, Dict],
|
|
59
|
-
num_prompt_tokens: int,
|
|
60
|
-
num_output_tokens: int,
|
|
61
|
-
) -> Optional[float]:
|
|
62
|
-
estimated_runtime: Optional[float]
|
|
63
|
-
if request_state.request.model_deployment in inference_runtimes_dict:
|
|
64
|
-
inference_runtimes_dict_for_model = inference_runtimes_dict[request_state.request.model_deployment]
|
|
65
|
-
runtime_per_output_token: float = inference_runtimes_dict_for_model["runtime_per_output_token"]
|
|
66
|
-
raw_runtimes_for_prompt_tokens: Dict[str, float] = inference_runtimes_dict_for_model[
|
|
67
|
-
"runtime_for_prompt_tokens"
|
|
68
|
-
]
|
|
69
|
-
runtimes_for_prompt_tokens: Dict[int, float] = {int(k): v for (k, v) in raw_runtimes_for_prompt_tokens.items()}
|
|
70
|
-
|
|
71
|
-
runtime_for_prompt_tokens: Optional[float] = None
|
|
72
|
-
largest_num_tokens_in_efficiency_dict: int = max(runtimes_for_prompt_tokens.keys())
|
|
73
|
-
# Find the smallest num_prompt_tokens larger than the number of tokens in the given prompt,
|
|
74
|
-
# then scale runtime in dict by (num_prompt_tokens / key) to get more accurate estimate: we
|
|
75
|
-
# assume that we can encode the prompt at the same throughput as the smallest key larger than
|
|
76
|
-
# num_prompt_tokens, and number of compute operations scales linearly with num_prompt_tokens.
|
|
77
|
-
for key in sorted(runtimes_for_prompt_tokens.keys()):
|
|
78
|
-
if num_prompt_tokens <= key:
|
|
79
|
-
runtime_for_prompt_tokens = runtimes_for_prompt_tokens[key] * (num_prompt_tokens / key)
|
|
80
|
-
break
|
|
81
|
-
# If number of tokens in the prompt exceeds the largest key in the efficiency dict, then
|
|
82
|
-
# estimate the prompt encoding time by linearly scaling up the runtime for the largest
|
|
83
|
-
# key (this is reasonably accurate under certain simplifying assumptions).
|
|
84
|
-
if runtime_for_prompt_tokens is None:
|
|
85
|
-
runtime_for_prompt_tokens = runtimes_for_prompt_tokens[largest_num_tokens_in_efficiency_dict] * (
|
|
86
|
-
num_prompt_tokens / largest_num_tokens_in_efficiency_dict
|
|
87
|
-
)
|
|
88
|
-
overhead: Optional[float] = inference_runtimes_dict_for_model.get("overhead")
|
|
89
|
-
|
|
90
|
-
# Idealized runtime is sum of the runtime of encoding the input tokens, the runtime of
|
|
91
|
-
# generating `num_output_tokens` (`runtime_per_output_token` * (`num_output_tokens` - 1))
|
|
92
|
-
# if number of output tokens is greater than 0, otherwise just `runtime_for_prompt_tokens`,
|
|
93
|
-
# and the overhead if available.
|
|
94
|
-
estimated_runtime = runtime_for_prompt_tokens
|
|
95
|
-
if num_output_tokens > 0:
|
|
96
|
-
estimated_runtime += runtime_per_output_token * (num_output_tokens - 1)
|
|
97
|
-
# Add overhead if it is available.
|
|
98
|
-
if overhead is not None:
|
|
99
|
-
estimated_runtime += overhead
|
|
100
|
-
else:
|
|
101
|
-
estimated_runtime = None
|
|
102
|
-
|
|
103
|
-
return estimated_runtime
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
def pass_at_k_estimator(n: int, c: int, k: int) -> float:
|
|
107
|
-
"""Calculates 1 - comb(n - c, k) / comb(n, k).
|
|
108
|
-
|
|
109
|
-
Numerically stable version defined in
|
|
110
|
-
https://arxiv.org/pdf/2107.03374.pdf
|
|
111
|
-
"""
|
|
112
|
-
if n - c < k:
|
|
113
|
-
return 1.0
|
|
114
|
-
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def normalize_text(text: str) -> str:
|
|
118
|
-
"""Lower text and remove punctuation, articles and extra whitespace.
|
|
119
|
-
Copied from the [QuAC](http://quac.ai/) evaluation script found at
|
|
120
|
-
https://s3.amazonaws.com/my89public/quac/scorer.py"""
|
|
121
|
-
|
|
122
|
-
def remove_articles(text: str) -> str:
|
|
123
|
-
return re.sub(r"\b(a|an|the)\b", " ", text)
|
|
124
|
-
|
|
125
|
-
def white_space_fix(text: str) -> str:
|
|
126
|
-
return " ".join(text.split())
|
|
127
|
-
|
|
128
|
-
def remove_punc(text: str) -> str:
|
|
129
|
-
exclude = set(string.punctuation)
|
|
130
|
-
return "".join(ch for ch in text if ch not in exclude)
|
|
131
|
-
|
|
132
|
-
def lower(text: str) -> str:
|
|
133
|
-
return text.lower()
|
|
134
|
-
|
|
135
|
-
return white_space_fix(remove_articles(remove_punc(lower(text))))
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def exact_match(gold: str, pred: str) -> float:
|
|
139
|
-
if not pred:
|
|
140
|
-
return 0
|
|
141
|
-
|
|
142
|
-
return 1 if gold.strip() == pred.strip() else 0
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
def quasi_exact_match(gold: str, pred: str) -> float:
|
|
146
|
-
if not pred:
|
|
147
|
-
return 0
|
|
148
|
-
|
|
149
|
-
return 1 if normalize_text(gold) == normalize_text(pred) else 0
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def prefix_exact_match(gold: str, pred: str) -> float:
|
|
153
|
-
"""
|
|
154
|
-
The `prefix_exact_match` metric is particularly useful in the zero-shot setting, where the model is
|
|
155
|
-
not given examples of the expected outputs and tends to output more tokens than it should.
|
|
156
|
-
|
|
157
|
-
For example, for this zero-shot prompt from BoolQ,
|
|
158
|
-
|
|
159
|
-
Passage: Elmendorf Air Force Base (IATA: EDF, ICAO: PAED, FAA LID: EDF) is a United States military facility
|
|
160
|
-
in Anchorage, the largest city in Alaska. Originally known as Elmendorf Field, it became Elmendorf Air Force
|
|
161
|
-
Base after World War II, and in 2010 it merged with nearby Fort Richardson to form Joint Base Elmendorf-Richardson.
|
|
162
|
-
Question: Is there an air force base in anchorage alaska?
|
|
163
|
-
Answer:
|
|
164
|
-
|
|
165
|
-
the model could output up to `max_tokens` number of tokens "Yes, Elmendorf" instead of just "Yes".
|
|
166
|
-
"""
|
|
167
|
-
if not pred:
|
|
168
|
-
return 0
|
|
169
|
-
|
|
170
|
-
return 1 if pred.strip().startswith(gold.strip()) else 0
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
def quasi_prefix_exact_match(gold: str, pred: str) -> float:
|
|
174
|
-
"""
|
|
175
|
-
Same thing as `prefix_exact_match` but we normalize the text before checking if the prefix match.
|
|
176
|
-
"""
|
|
177
|
-
if not pred:
|
|
178
|
-
return 0
|
|
179
|
-
|
|
180
|
-
return 1 if normalize_text(pred).startswith(normalize_text(gold)) else 0
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
def f1_score(gold: str, pred: str) -> float:
|
|
184
|
-
ret = f_measure(set(normalize_text(gold).split()), set(normalize_text(pred).split()))
|
|
185
|
-
if ret is None: # answer is the empty string after normalizing
|
|
186
|
-
return 0.0
|
|
187
|
-
|
|
188
|
-
return ret
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
def exact_match_indicator(gold: str, pred: str, indicator: str = " ") -> float:
|
|
192
|
-
"""
|
|
193
|
-
Exact match, allowing for some preceding context.
|
|
194
|
-
For example, the following two answers are considered matching:
|
|
195
|
-
- Because of x and y, the answer is ## <answer>
|
|
196
|
-
- Given reasons y and z, the answer is ## <answer>
|
|
197
|
-
While the following is considered different from the earlier two
|
|
198
|
-
- Given reasons x and a, the answer is ## <other answer>
|
|
199
|
-
"""
|
|
200
|
-
pred = pred.split(indicator)[-1].strip()
|
|
201
|
-
gold = gold.split(indicator)[-1].strip()
|
|
202
|
-
return exact_match(gold, pred)
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
def final_number_exact_match(gold: str, pred: str) -> float:
|
|
206
|
-
"""
|
|
207
|
-
Returns 1 iff the final number in gold and pred match.
|
|
208
|
-
Similar to exact_match_indicator.
|
|
209
|
-
Example:
|
|
210
|
-
- gold = "The answer is 15."
|
|
211
|
-
- pred = "The answer is 15 eggs."
|
|
212
|
-
- Returns 1
|
|
213
|
-
"""
|
|
214
|
-
|
|
215
|
-
def get_final_number(x: str) -> str:
|
|
216
|
-
matches = re.findall(r"-?[\d,]+(?:.\d+)?", x)
|
|
217
|
-
if not matches:
|
|
218
|
-
return ""
|
|
219
|
-
return matches[-1].replace(",", "")
|
|
220
|
-
|
|
221
|
-
return exact_match(get_final_number(gold), get_final_number(pred))
|
|
31
|
+
from .statistic import Stat, merge_stat
|
|
222
32
|
|
|
223
33
|
|
|
224
34
|
def get_num_bytes(tokens: List[Token]) -> int:
|
|
@@ -270,123 +80,6 @@ def convert_tokens_to_text(tokens: List[Token]) -> List[Dict]:
|
|
|
270
80
|
return groups
|
|
271
81
|
|
|
272
82
|
|
|
273
|
-
def rouge_score(gold: str, pred: str, rouge_type: str, scorer: rouge_scorer.RougeScorer) -> float:
|
|
274
|
-
scores = scorer.score(gold, pred)
|
|
275
|
-
return scores[rouge_type].fmeasure
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
def get_rouge_function(rouge_type: str) -> Callable[[str, str], float]:
|
|
279
|
-
scorer = rouge_scorer.RougeScorer([rouge_type], use_stemmer=True)
|
|
280
|
-
return partial(rouge_score, scorer=scorer, rouge_type=rouge_type)
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
def bleu_1(gold: str, pred: str) -> float:
|
|
284
|
-
return sentence_bleu([word_tokenize(gold)], word_tokenize(pred), weights=(1, 0, 0, 0))
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
def chinese_bleu_1(gold: str, pred: str) -> float:
|
|
288
|
-
char_tokenizer = ChineseTokenizer()
|
|
289
|
-
return sentence_bleu([char_tokenizer.tokenize(gold)], char_tokenizer.tokenize(pred), weights=(1, 0, 0, 0))
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
def get_chinese_rouge_function(rouge_type: str) -> Callable[[str, str], float]:
|
|
293
|
-
char_tokenizer = ChineseTokenizer()
|
|
294
|
-
scorer = rouge_scorer.RougeScorer([rouge_type], use_stemmer=True, tokenizer=char_tokenizer)
|
|
295
|
-
return partial(rouge_score, scorer=scorer, rouge_type=rouge_type)
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
def cleva_math_result_match(gold: str, pred: str) -> float:
|
|
299
|
-
"""
|
|
300
|
-
Exact match that only cares the last math expression.
|
|
301
|
-
Common math expressions are numbers and fractions.
|
|
302
|
-
"""
|
|
303
|
-
pattern = r"[-+*/%\.\(\)\d]+"
|
|
304
|
-
matches = re.findall(pattern, pred)
|
|
305
|
-
if matches:
|
|
306
|
-
pred = matches[-1].lstrip(")")
|
|
307
|
-
# remove space in front or at the end
|
|
308
|
-
pred = pred.strip()
|
|
309
|
-
return exact_match(gold, pred)
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
def bleu_4(gold: str, pred: str) -> float:
|
|
313
|
-
return sentence_bleu([word_tokenize(gold)], word_tokenize(pred), weights=(0, 0, 0, 1))
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
def extract_set_from_text(
|
|
317
|
-
set_str: str,
|
|
318
|
-
set_start_str: str = " is ",
|
|
319
|
-
set_separator: str = " and ",
|
|
320
|
-
empty_set_str: str = "Nothing.",
|
|
321
|
-
) -> Set[str]:
|
|
322
|
-
"""
|
|
323
|
-
Given a string, extract the set of strings implied by that string.
|
|
324
|
-
set_start_str denotes the start of the set
|
|
325
|
-
set_separator denotes the string separating set elements
|
|
326
|
-
empty_set_str is the string which denotes the empty set
|
|
327
|
-
"""
|
|
328
|
-
if set_str == empty_set_str:
|
|
329
|
-
return set()
|
|
330
|
-
set_str = set_str.replace(".", "")
|
|
331
|
-
extracted_set = set(set_str.split(set_start_str)[-1].split(set_separator))
|
|
332
|
-
return extracted_set
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
def extract_gold_pred_sets(gold: str, pred: str) -> Tuple[Set[str], Set[str]]:
|
|
336
|
-
"""Extract the set of strings implied by the gold and pred strings"""
|
|
337
|
-
gold_set = extract_set_from_text(gold)
|
|
338
|
-
pred_set = extract_set_from_text(pred.split("\n")[0])
|
|
339
|
-
return gold_set, pred_set
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
def iou_set_match(gold: str, pred: str) -> float:
|
|
343
|
-
"""Compute the intersection over union of the gold and pred sets"""
|
|
344
|
-
gold_set, pred_set = extract_gold_pred_sets(gold, pred)
|
|
345
|
-
if len(gold_set) == 0: # If gold is empty, just check if the pred set is also empty
|
|
346
|
-
return float(gold_set == pred_set)
|
|
347
|
-
return len(gold_set.intersection(pred_set)) / len(gold_set.union(pred_set))
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
def f1_set_match(gold: str, pred: str) -> float:
|
|
351
|
-
"""Compute the F1 score of the gold and pred sets"""
|
|
352
|
-
gold_set, pred_set = extract_gold_pred_sets(gold, pred)
|
|
353
|
-
if len(gold_set) == 0: # If gold is empty, just check if the pred set is also empty
|
|
354
|
-
return float(gold_set == pred_set)
|
|
355
|
-
true_positives = gold_set.intersection(pred_set)
|
|
356
|
-
return 2 * len(true_positives) / (len(gold_set) + len(pred_set))
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
def exact_set_match(gold: str, pred: str) -> float:
|
|
360
|
-
"""Compute whether the sets generated exactly match"""
|
|
361
|
-
gold_set, pred_set = extract_gold_pred_sets(gold, pred)
|
|
362
|
-
return float(gold_set == pred_set)
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
def absolute_value_difference(gold: str, pred: str) -> float:
|
|
366
|
-
"""Compute the absolute value of the difference between two numbers (provided as strings),
|
|
367
|
-
or 0.0 if invalid input.
|
|
368
|
-
"""
|
|
369
|
-
|
|
370
|
-
def maybe_int(text: str):
|
|
371
|
-
"""Parse int, ignoring commas in numbers."""
|
|
372
|
-
try:
|
|
373
|
-
val = int(text.replace(",", ""))
|
|
374
|
-
except ValueError:
|
|
375
|
-
return 0.0
|
|
376
|
-
return val
|
|
377
|
-
|
|
378
|
-
gold_val = maybe_int(gold)
|
|
379
|
-
pred_val = maybe_int(pred)
|
|
380
|
-
return abs(gold_val - pred_val)
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
def code_eval(gold: Tuple[str, Optional[Dict]], pred: str) -> float:
|
|
384
|
-
"""Evaluate Code Correctness on test examples."""
|
|
385
|
-
assert gold[1] is not None # gold[1]["canonical_solution"]
|
|
386
|
-
# Warning: will execute machine generated code; need to sandbox before executing
|
|
387
|
-
return float(code_metrics_helper.check_correctness(gold[1], pred, 3.0)["passed"]) # type: ignore
|
|
388
|
-
|
|
389
|
-
|
|
390
83
|
def compute_perplexity_metrics(stats: Dict[MetricName, Stat]) -> List[Stat]:
|
|
391
84
|
# TODO: find out the root cause and undo num_X > 0 check
|
|
392
85
|
# https://github.com/stanford-crfm/benchmarking/issues/350
|
|
@@ -411,7 +104,37 @@ def compute_perplexity_metrics(stats: Dict[MetricName, Stat]) -> List[Stat]:
|
|
|
411
104
|
return derived_stats
|
|
412
105
|
|
|
413
106
|
|
|
414
|
-
class
|
|
107
|
+
class InstancesPerSplitMetric(MetricInterface):
|
|
108
|
+
"""Report the average num_instances in each MetricContext across train_trials."""
|
|
109
|
+
|
|
110
|
+
def evaluate(
|
|
111
|
+
self, scenario_state: ScenarioState, metric_service: MetricService, eval_cache_path: str, parallelism: int
|
|
112
|
+
) -> MetricResult:
|
|
113
|
+
adapter_spec = scenario_state.adapter_spec
|
|
114
|
+
global_stats: Dict[MetricName, Stat] = {}
|
|
115
|
+
|
|
116
|
+
for train_trial_index in range(adapter_spec.num_train_trials):
|
|
117
|
+
trial_stats: Dict[MetricName, Stat] = {} # Statistics just for this trial
|
|
118
|
+
# Group instances in this train_trial by context.
|
|
119
|
+
instances_per_metric_context: Dict[MetricContext, Set[Instance]] = defaultdict(set)
|
|
120
|
+
for request_state in scenario_state.request_states:
|
|
121
|
+
if request_state.train_trial_index == train_trial_index:
|
|
122
|
+
instances_per_metric_context[MetricContext.from_instance(request_state.instance)].add(
|
|
123
|
+
request_state.instance
|
|
124
|
+
)
|
|
125
|
+
for context, instance_set in instances_per_metric_context.items():
|
|
126
|
+
stat = Stat(MetricName("num_instances")).add(len(instance_set))
|
|
127
|
+
merge_stat(trial_stats, add_context(stat, context))
|
|
128
|
+
|
|
129
|
+
# We take the mean value for each trial.
|
|
130
|
+
for stat in trial_stats.values():
|
|
131
|
+
merge_stat(global_stats, stat.take_mean())
|
|
132
|
+
|
|
133
|
+
# There are no per-instance Stats.
|
|
134
|
+
return MetricResult(list(global_stats.values()), [])
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class BasicGenerationMetric(Metric):
|
|
415
138
|
"""
|
|
416
139
|
Defines basic metrics which don't require domain knowledge. This should be
|
|
417
140
|
fairly comprehensive already, and we should try to use this as much as possible.
|
|
@@ -422,338 +145,11 @@ class BasicMetric(Metric):
|
|
|
422
145
|
|
|
423
146
|
def __init__(self, names: List[str]):
|
|
424
147
|
self.names: List[str] = names
|
|
425
|
-
|
|
426
|
-
# For Efficiency metrics:
|
|
427
|
-
# The `inference_efficiency.json` file contains a `runtime_per_output_token` value
|
|
428
|
-
# (the estimated runtime of generating one output token) and a
|
|
429
|
-
# `runtime_for_prompt_tokens` dict (a mapping from various num_prompt_tokens values to
|
|
430
|
-
# the estimated runtime of encoding a prompt with that many tokens).
|
|
431
|
-
# For example:
|
|
432
|
-
# "openai/davinci": {
|
|
433
|
-
# "runtime_per_output_token": 0.080,
|
|
434
|
-
# "runtime_for_prompt_tokens": {
|
|
435
|
-
# "1": 0.016,
|
|
436
|
-
# "16": 0.018,
|
|
437
|
-
# "32": 0.020,
|
|
438
|
-
# ...
|
|
439
|
-
#
|
|
440
|
-
# These runtimes are generated by initializing Megatron with a model of the right size,
|
|
441
|
-
# obtaining end-to-end generation times for different numbers of prompt and output tokens,
|
|
442
|
-
# and then fitting a linear regression model to the runtimes: the resulting slope is the
|
|
443
|
-
# runtime_per_output_token, which is the processing time for generating each output token,
|
|
444
|
-
# and the y-intercept is the runtime_for_prompt_tokens, with different values for different
|
|
445
|
-
# num_prompt_tokens values.
|
|
446
|
-
# Profiling code and logs, and code to fit the regression model is available at
|
|
447
|
-
# https://github.com/stanford-crfm/benchmarking_efficiency.
|
|
448
|
-
data_package = resources.files(EFFICIENCY_DATA_PACKAGE)
|
|
449
|
-
with data_package.joinpath(INFERENCE_IDEALIZED_RUNTIMES_JSON_FILENAME).open("r") as f:
|
|
450
|
-
self.inference_idealized_runtimes_dict = json.load(f)
|
|
451
|
-
with data_package.joinpath(INFERENCE_DENOISED_RUNTIMES_JSON_FILENAME).open("r") as f:
|
|
452
|
-
self.inference_denoised_runtimes_dict = json.load(f)
|
|
453
|
-
|
|
454
|
-
# We use estimated emitted CO2 during training (in tons of CO2) as a proxy metric
|
|
455
|
-
# for training efficiency. We use reported metrics where applicable, otherwise
|
|
456
|
-
# we estimate them from runtime information, type and number of hardware accelerators
|
|
457
|
-
# used, region, etc.
|
|
458
|
-
with data_package.joinpath(TRAINING_EFFICIENCY_JSON_FILENAME).open("r") as f:
|
|
459
|
-
self.training_efficiency_dict = json.load(f)
|
|
148
|
+
self.efficiency_metric = EfficiencyMetric()
|
|
460
149
|
|
|
461
150
|
def __repr__(self):
|
|
462
151
|
return f"BasicMetric({','.join(self.names)})"
|
|
463
152
|
|
|
464
|
-
def compute_reference_metrics(
|
|
465
|
-
self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
466
|
-
) -> List[Stat]:
|
|
467
|
-
"""
|
|
468
|
-
Setup:
|
|
469
|
-
|
|
470
|
-
- Gold (correct references): G1 ... Gm
|
|
471
|
-
- Predictions (completions): P1 ... Pk
|
|
472
|
-
|
|
473
|
-
For each pair (G, P), we can define a ${score} (e.g., exact match, F1, BLEU).
|
|
474
|
-
|
|
475
|
-
We define the following stats:
|
|
476
|
-
|
|
477
|
-
- ${score}: max_i score(Gi, P1)
|
|
478
|
-
- ${score}@k: max_{i,j} score(Gi, Pj)
|
|
479
|
-
"""
|
|
480
|
-
|
|
481
|
-
def compute_metrics_helper(
|
|
482
|
-
name: MetricName,
|
|
483
|
-
score_func: Callable,
|
|
484
|
-
group: Optional[str] = None,
|
|
485
|
-
) -> List[Stat]:
|
|
486
|
-
if name.name == "pass": # Calculate pass@k for HumanEval from CodeScenario.
|
|
487
|
-
score_func = cast(Callable[[Tuple[str, Optional[Dict]], str], float], score_func) # Make mypy happy.
|
|
488
|
-
code_golds = cast(List[CodeReference], golds)
|
|
489
|
-
results = [
|
|
490
|
-
score_func((gold.output.text, gold.test_cases), pred) for gold in code_golds for pred in preds
|
|
491
|
-
]
|
|
492
|
-
_len, _sum = len(results), int(sum(results)) # Cast to int to make type match.
|
|
493
|
-
score_1 = pass_at_k_estimator(_len, _sum, 1)
|
|
494
|
-
score_k = pass_at_k_estimator(_len, _sum, adapter_spec.num_outputs)
|
|
495
|
-
elif name.name == "code_eval_acc":
|
|
496
|
-
score_func = cast(Callable[[Tuple[str, Optional[Dict]], str], float], score_func) # Make mypy happy.
|
|
497
|
-
code_golds = cast(List[CodeReference], golds)
|
|
498
|
-
score_1 = max(score_func((gold.output.text, gold.test_cases), preds[0]) for gold in code_golds)
|
|
499
|
-
score_k = max(
|
|
500
|
-
score_func((gold.output.text, gold.test_cases), pred) for gold in code_golds for pred in preds
|
|
501
|
-
)
|
|
502
|
-
else:
|
|
503
|
-
score_func = cast(Callable[[str, str], float], score_func) # Make mypy happy.
|
|
504
|
-
score_1 = max(score_func(gold.output.text, preds[0]) for gold in golds)
|
|
505
|
-
score_k = max(score_func(gold.output.text, pred) for gold in golds for pred in preds)
|
|
506
|
-
|
|
507
|
-
metrics = [Stat(name).add(score_1)] # score_1 corresponds using one prediction
|
|
508
|
-
if adapter_spec.num_outputs != 1:
|
|
509
|
-
metrics.append(Stat(replace(name, name=f"{name.name}@{adapter_spec.num_outputs}")).add(score_k))
|
|
510
|
-
return metrics
|
|
511
|
-
|
|
512
|
-
# maps each string metric name to its associated function
|
|
513
|
-
metric_fn_mapping: Dict[str, Callable] = {
|
|
514
|
-
"exact_match": exact_match,
|
|
515
|
-
"quasi_exact_match": quasi_exact_match,
|
|
516
|
-
"prefix_exact_match": prefix_exact_match,
|
|
517
|
-
"quasi_prefix_exact_match": quasi_prefix_exact_match,
|
|
518
|
-
"exact_match_indicator": exact_match_indicator,
|
|
519
|
-
"final_number_exact_match": final_number_exact_match,
|
|
520
|
-
"exact_set_match": exact_set_match,
|
|
521
|
-
"iou_set_match": iou_set_match,
|
|
522
|
-
"f1_set_match": f1_set_match,
|
|
523
|
-
"math_equiv": is_equiv,
|
|
524
|
-
"math_equiv_chain_of_thought": is_equiv_chain_of_thought,
|
|
525
|
-
"code_eval_acc": code_eval,
|
|
526
|
-
"pass": code_eval,
|
|
527
|
-
"f1_score": f1_score,
|
|
528
|
-
"rouge_1": get_rouge_function("rouge1"),
|
|
529
|
-
"rouge_2": get_rouge_function("rouge2"),
|
|
530
|
-
"rouge_l": get_rouge_function("rougeL"),
|
|
531
|
-
"bleu_1": bleu_1,
|
|
532
|
-
"bleu_4": bleu_4,
|
|
533
|
-
"chinese_bleu_1": chinese_bleu_1,
|
|
534
|
-
"chinese_rouge_1": get_chinese_rouge_function("rouge1"),
|
|
535
|
-
"chinese_rouge_2": get_chinese_rouge_function("rouge2"),
|
|
536
|
-
"cleva_math_result_match": cleva_math_result_match,
|
|
537
|
-
"absolute_value_difference": absolute_value_difference,
|
|
538
|
-
}
|
|
539
|
-
|
|
540
|
-
stats: List[Stat] = []
|
|
541
|
-
|
|
542
|
-
# Gold outputs
|
|
543
|
-
golds: List[Reference] = [reference for reference in request_state.instance.references if reference.is_correct]
|
|
544
|
-
assert len(golds) > 0
|
|
545
|
-
|
|
546
|
-
# Predicted outputs
|
|
547
|
-
assert request_state.result is not None
|
|
548
|
-
sorted_completions: List[Sequence] = sorted(request_state.result.completions, key=lambda x: -x.logprob)
|
|
549
|
-
preds: List[str] = [completion.text.strip() for completion in sorted_completions]
|
|
550
|
-
|
|
551
|
-
# Apply mapping if exists (e.g., for multiple-choice questions A -> Boston, B -> New York)
|
|
552
|
-
# Note: If 'A' and 'B' were the only possible choices, smaller language models like GPT-2 would
|
|
553
|
-
# sometimes predict a random letter like 'M'.
|
|
554
|
-
if request_state.output_mapping is not None:
|
|
555
|
-
preds = [request_state.output_mapping.get(pred) for pred in preds] # type: ignore
|
|
556
|
-
|
|
557
|
-
# Compute max_prob, the probability that the model assigns to its generated text.
|
|
558
|
-
# Use the log prob of sorted_completions[0], which is the completion with the highest
|
|
559
|
-
# log_prob. We use this since that's what's used for computing metrics like exact_match.
|
|
560
|
-
# One subtlety is that when computing exact_match, we strip whitespace, so the actual
|
|
561
|
-
# max_prob is the sum of all the probabilities in the set {x : strip(x) = prediction}.
|
|
562
|
-
# In practice, we think this may not make much of a difference because models may not place
|
|
563
|
-
# high probabilities on having additional spaces (should check this). Also, the sum
|
|
564
|
-
# involves computing the log_prob for many completions which could be intractable.
|
|
565
|
-
max_prob = np.exp(sorted_completions[0].logprob)
|
|
566
|
-
stats.append(Stat(MetricName("max_prob")).add(max_prob))
|
|
567
|
-
|
|
568
|
-
# Add other metrics
|
|
569
|
-
for metric_name in self.names:
|
|
570
|
-
if metric_name in metric_fn_mapping:
|
|
571
|
-
stats.extend(compute_metrics_helper(MetricName(metric_name), metric_fn_mapping[metric_name]))
|
|
572
|
-
else:
|
|
573
|
-
raise NameError(f"{metric_name} is not in the list of metric functions.")
|
|
574
|
-
|
|
575
|
-
return stats
|
|
576
|
-
|
|
577
|
-
def compute_efficiency_metrics(
|
|
578
|
-
self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
579
|
-
) -> List[Stat]:
|
|
580
|
-
"""Compute efficiency metrics for both inference and training.
|
|
581
|
-
For inference, we record both the actual runtime and an estimated idealized runtime
|
|
582
|
-
for the given request with an optimized software implementation run on A100 GPU(s),
|
|
583
|
-
taking into account both the number of tokens in the prompt of the request, and the
|
|
584
|
-
number of generated output tokens.
|
|
585
|
-
For training, we report the estimated total metric tons of CO2 emitted to train the
|
|
586
|
-
model. This is the same for each request."""
|
|
587
|
-
# Compute efficiency metrics for inference.
|
|
588
|
-
assert request_state.result is not None
|
|
589
|
-
|
|
590
|
-
runtime: Optional[float] = None
|
|
591
|
-
batch_size: Optional[int] = None
|
|
592
|
-
# Compute efficiency metrics for inference.
|
|
593
|
-
if request_state.result.request_time is not None:
|
|
594
|
-
runtime = request_state.result.request_time
|
|
595
|
-
batch_size = 1
|
|
596
|
-
# For models that perform offline batch inference, effective runtime is batch_request_time, but also
|
|
597
|
-
# record batch_size to provide nuance.
|
|
598
|
-
if request_state.result.batch_request_time is not None and request_state.result.batch_size is not None:
|
|
599
|
-
runtime = request_state.result.batch_request_time
|
|
600
|
-
batch_size = request_state.result.batch_size
|
|
601
|
-
|
|
602
|
-
# Compute total number of prompt and output tokens.
|
|
603
|
-
# Fetch the right `Tokenizer` depending on the model defined in `AdapterSpec`
|
|
604
|
-
# and calculate the number of tokens in the prompt.
|
|
605
|
-
tokenizer_service: TokenizerService = metric_service
|
|
606
|
-
window_service: WindowService = WindowServiceFactory.get_window_service(
|
|
607
|
-
adapter_spec.model_deployment, tokenizer_service
|
|
608
|
-
)
|
|
609
|
-
prompt: str = request_state.request.prompt
|
|
610
|
-
num_prompt_tokens: int = window_service.get_num_tokens(prompt)
|
|
611
|
-
|
|
612
|
-
# Total number of tokens in the completion.
|
|
613
|
-
num_completion_tokens: int = sum([len(completion.tokens) for completion in request_state.result.completions])
|
|
614
|
-
# Don't include prompt in number of generated tokens (e.g., for language modeling).
|
|
615
|
-
# Assume that tokens for different completions are generated sequentially (instead of batched) when
|
|
616
|
-
# computing num_output_tokens (for the purpose of runtime estimation).
|
|
617
|
-
num_output_tokens: int = num_completion_tokens
|
|
618
|
-
if request_state.request.echo_prompt:
|
|
619
|
-
# num_prompt_tokens > num_output_tokens can happen if tokenizer doesn't round trip.
|
|
620
|
-
if num_prompt_tokens <= num_output_tokens:
|
|
621
|
-
num_output_tokens -= num_prompt_tokens
|
|
622
|
-
else:
|
|
623
|
-
hlog(
|
|
624
|
-
f"WARNING: num_prompt_tokens ({num_prompt_tokens}) > num_output_tokens ({num_output_tokens}) "
|
|
625
|
-
f"for prompt: {prompt}"
|
|
626
|
-
)
|
|
627
|
-
num_output_tokens = 0
|
|
628
|
-
|
|
629
|
-
idealized_runtime: Optional[float] = compute_estimated_time_from_prompt_size_and_num_output_tokens(
|
|
630
|
-
request_state, self.inference_idealized_runtimes_dict, num_prompt_tokens, num_output_tokens
|
|
631
|
-
)
|
|
632
|
-
|
|
633
|
-
denoised_runtime: Optional[float] = compute_estimated_time_from_prompt_size_and_num_output_tokens(
|
|
634
|
-
request_state, self.inference_denoised_runtimes_dict, num_prompt_tokens, num_output_tokens
|
|
635
|
-
)
|
|
636
|
-
# Denoised runtime for offline models is just runtime.
|
|
637
|
-
# We divide by batch_size to get approximate per-input runtime.
|
|
638
|
-
if runtime is not None and request_state.result.batch_size is not None:
|
|
639
|
-
denoised_runtime = runtime / request_state.result.batch_size
|
|
640
|
-
|
|
641
|
-
# Compute efficiency metrics for training.
|
|
642
|
-
training_co2_cost: Optional[float]
|
|
643
|
-
if request_state.request.model_deployment in self.training_efficiency_dict["carbon"]:
|
|
644
|
-
training_co2_cost = self.training_efficiency_dict["carbon"][request_state.request.model_deployment]["value"]
|
|
645
|
-
else:
|
|
646
|
-
training_co2_cost = None
|
|
647
|
-
|
|
648
|
-
training_energy_cost: Optional[float]
|
|
649
|
-
if request_state.request.model_deployment in self.training_efficiency_dict["energy"]:
|
|
650
|
-
training_energy_cost = self.training_efficiency_dict["energy"][request_state.request.model_deployment][
|
|
651
|
-
"value"
|
|
652
|
-
]
|
|
653
|
-
else:
|
|
654
|
-
training_energy_cost = None
|
|
655
|
-
|
|
656
|
-
stats = [
|
|
657
|
-
Stat(MetricName("num_prompt_tokens")).add(num_prompt_tokens),
|
|
658
|
-
Stat(MetricName("num_completion_tokens")).add(num_completion_tokens),
|
|
659
|
-
Stat(MetricName("num_output_tokens")).add(num_output_tokens),
|
|
660
|
-
Stat(MetricName("training_co2_cost")).add(training_co2_cost),
|
|
661
|
-
Stat(MetricName("training_energy_cost")).add(training_energy_cost),
|
|
662
|
-
]
|
|
663
|
-
if runtime is not None:
|
|
664
|
-
stats.append(Stat(MetricName("inference_runtime")).add(runtime))
|
|
665
|
-
if batch_size is not None:
|
|
666
|
-
stats.append(Stat(MetricName("batch_size")).add(batch_size))
|
|
667
|
-
if denoised_runtime is not None:
|
|
668
|
-
stats.append(Stat(MetricName("inference_denoised_runtime")).add(denoised_runtime))
|
|
669
|
-
if idealized_runtime is not None:
|
|
670
|
-
stats.append(Stat(MetricName("inference_idealized_runtime")).add(idealized_runtime))
|
|
671
|
-
return stats
|
|
672
|
-
|
|
673
|
-
def compute_finish_reason_metrics(
|
|
674
|
-
self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
675
|
-
) -> List[Stat]:
|
|
676
|
-
"""Record how often generation finished due to reaching token limit, stop token(s), or end of text"""
|
|
677
|
-
assert request_state.result is not None
|
|
678
|
-
sequence = request_state.result.completions[0]
|
|
679
|
-
valid_reasons = [
|
|
680
|
-
"length",
|
|
681
|
-
"stop",
|
|
682
|
-
"endoftext",
|
|
683
|
-
"unknown",
|
|
684
|
-
]
|
|
685
|
-
if sequence.finish_reason is None or sequence.finish_reason["reason"] not in valid_reasons:
|
|
686
|
-
reason = "unknown"
|
|
687
|
-
else:
|
|
688
|
-
reason = sequence.finish_reason["reason"]
|
|
689
|
-
return [
|
|
690
|
-
Stat(MetricName(f"finish_reason_{valid_reason}")).add(int(reason == valid_reason))
|
|
691
|
-
for valid_reason in valid_reasons
|
|
692
|
-
]
|
|
693
|
-
|
|
694
|
-
def compute_truncation_metrics(
|
|
695
|
-
self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
696
|
-
) -> List[Stat]:
|
|
697
|
-
"""
|
|
698
|
-
Record the number of training instances used in the prompt and whether
|
|
699
|
-
even the prompt needed to be truncated (once we hit zero training instances).
|
|
700
|
-
"""
|
|
701
|
-
return [
|
|
702
|
-
Stat(MetricName("num_train_instances")).add(request_state.num_train_instances),
|
|
703
|
-
Stat(MetricName("prompt_truncated")).add(request_state.prompt_truncated),
|
|
704
|
-
]
|
|
705
|
-
|
|
706
|
-
def compute_all_general_metrics(
|
|
707
|
-
self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
708
|
-
) -> List[Stat]:
|
|
709
|
-
"""
|
|
710
|
-
Compute metrics that are common to both `evaluate_generation` and `evaluate_references`.
|
|
711
|
-
"""
|
|
712
|
-
stats: List[Stat] = []
|
|
713
|
-
|
|
714
|
-
stats.append(Stat(MetricName("num_references")).add(len(request_state.instance.references)))
|
|
715
|
-
|
|
716
|
-
# Copy from adapter spec
|
|
717
|
-
stats.append(Stat(MetricName("num_train_trials")).add(adapter_spec.num_train_trials))
|
|
718
|
-
|
|
719
|
-
stats.extend(self.compute_efficiency_metrics(adapter_spec, request_state, metric_service))
|
|
720
|
-
stats.extend(self.compute_finish_reason_metrics(adapter_spec, request_state, metric_service))
|
|
721
|
-
stats.extend(self.compute_truncation_metrics(adapter_spec, request_state, metric_service))
|
|
722
|
-
|
|
723
|
-
return stats
|
|
724
|
-
|
|
725
|
-
def compute_language_modeling_metrics(
|
|
726
|
-
self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
727
|
-
) -> List[Stat]:
|
|
728
|
-
"""Compute the logprob and normalization factors for the first completion"""
|
|
729
|
-
assert request_state.result is not None
|
|
730
|
-
sequence = request_state.result.completions[0]
|
|
731
|
-
|
|
732
|
-
# Remove the empty tokens (typically generated by the AI21 tokenizer in the beginning of the text)
|
|
733
|
-
#
|
|
734
|
-
# Some more details about AI21 tokenizer: If the input prompt begins with a space, then
|
|
735
|
-
# the tokenizer inserts an empty token to the beginning.
|
|
736
|
-
# e.g. " burying him" -> ["▁"(0,0), "▁burying"(0,8), "▁him"(8,12)].
|
|
737
|
-
# TODO(#1522): Update this comment once solved.
|
|
738
|
-
# Since this empty token is introduced by our chunking approach, we need to remove it.
|
|
739
|
-
tokens: List[Token]
|
|
740
|
-
if request_state.num_conditioning_tokens > 0 and sequence.tokens[0].text == "":
|
|
741
|
-
tokens = sequence.tokens[1:]
|
|
742
|
-
else:
|
|
743
|
-
tokens = sequence.tokens
|
|
744
|
-
pred_tokens = tokens[request_state.num_conditioning_tokens :]
|
|
745
|
-
logprob, num_perplexity_tokens, num_bytes = (
|
|
746
|
-
sum(token.logprob for token in pred_tokens),
|
|
747
|
-
len(pred_tokens),
|
|
748
|
-
get_num_bytes(pred_tokens),
|
|
749
|
-
)
|
|
750
|
-
|
|
751
|
-
return [
|
|
752
|
-
Stat(MetricName("logprob")).add(logprob),
|
|
753
|
-
Stat(MetricName("num_perplexity_tokens")).add(num_perplexity_tokens),
|
|
754
|
-
Stat(MetricName("num_bytes")).add(num_bytes),
|
|
755
|
-
]
|
|
756
|
-
|
|
757
153
|
def evaluate_generation(
|
|
758
154
|
self,
|
|
759
155
|
adapter_spec: AdapterSpec,
|
|
@@ -763,15 +159,40 @@ class BasicMetric(Metric):
|
|
|
763
159
|
) -> List[Stat]:
|
|
764
160
|
"""Compute all metrics."""
|
|
765
161
|
stats: List[Stat] = []
|
|
766
|
-
stats.extend(self.
|
|
162
|
+
stats.extend(compute_request_state_metrics(self.efficiency_metric, adapter_spec, request_state, metric_service))
|
|
767
163
|
|
|
768
164
|
if len(request_state.instance.references) > 0:
|
|
769
|
-
stats.extend(self.
|
|
165
|
+
stats.extend(compute_reference_metrics(self.names, adapter_spec, request_state, metric_service))
|
|
770
166
|
|
|
771
|
-
stats.extend(
|
|
167
|
+
stats.extend(compute_language_modeling_metrics(adapter_spec, request_state, metric_service))
|
|
772
168
|
|
|
773
169
|
return stats
|
|
774
170
|
|
|
171
|
+
def derive_stats(self, stats_dict: Dict[MetricName, Stat]) -> List[Stat]:
|
|
172
|
+
"""Derive perplexity metrics if applicable. We don't worry about splits and perturbations here."""
|
|
173
|
+
derived_stats: List[Stat] = []
|
|
174
|
+
derived_stats.extend(compute_perplexity_metrics(stats_dict))
|
|
175
|
+
return derived_stats
|
|
176
|
+
|
|
177
|
+
def derive_per_instance_stats(self, per_instance_stats: Dict[Instance, List[Stat]]) -> List[Stat]:
|
|
178
|
+
"""Derive calibration metrics if applicable. We don't worry about splits and perturbations here."""
|
|
179
|
+
derived_stats: List[Stat] = []
|
|
180
|
+
derived_stats.extend(compute_calibration_metrics(per_instance_stats))
|
|
181
|
+
return derived_stats
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class BasicReferenceMetric(ReferenceMetric):
|
|
185
|
+
"""
|
|
186
|
+
Defines basic metrics for Scenarios that use one Request per Reference instead of
|
|
187
|
+
one per Instance.
|
|
188
|
+
"""
|
|
189
|
+
|
|
190
|
+
def __init__(self):
|
|
191
|
+
self.efficiency_metric = EfficiencyMetric()
|
|
192
|
+
|
|
193
|
+
def __repr__(self):
|
|
194
|
+
return "BasicReferenceMetric"
|
|
195
|
+
|
|
775
196
|
def evaluate_references(
|
|
776
197
|
self,
|
|
777
198
|
adapter_spec: AdapterSpec,
|
|
@@ -801,7 +222,7 @@ class BasicMetric(Metric):
|
|
|
801
222
|
assert len(request_state.result.completions) == 1
|
|
802
223
|
|
|
803
224
|
reference_index = request_state.reference_index
|
|
804
|
-
sequence:
|
|
225
|
+
sequence: GeneratedOutput = request_state.result.completions[0]
|
|
805
226
|
reference: str = request_state.instance.references[reference_index].output.text
|
|
806
227
|
|
|
807
228
|
# Find the span of the completion that matches the reference.
|
|
@@ -848,8 +269,14 @@ class BasicMetric(Metric):
|
|
|
848
269
|
raise ValueError(f"Unknown adapter method: {adapter_spec.method}")
|
|
849
270
|
|
|
850
271
|
stats: List[Stat] = []
|
|
851
|
-
stats.extend(self.compute_all_general_metrics(adapter_spec, request_state, metric_service))
|
|
852
272
|
|
|
273
|
+
general_metrics: Dict[MetricName, Stat] = {}
|
|
274
|
+
for request_state in reference_request_states:
|
|
275
|
+
for stat in compute_request_state_metrics(
|
|
276
|
+
self.efficiency_metric, adapter_spec, request_state, metric_service
|
|
277
|
+
):
|
|
278
|
+
merge_stat(general_metrics, stat)
|
|
279
|
+
stats.extend(general_metrics.values())
|
|
853
280
|
max_prob = np.max(scipy.special.softmax(reference_scores))
|
|
854
281
|
|
|
855
282
|
# Multiple references may attain the same maximal score; in such cases,
|
|
@@ -868,18 +295,96 @@ class BasicMetric(Metric):
|
|
|
868
295
|
)
|
|
869
296
|
return stats
|
|
870
297
|
|
|
871
|
-
def derive_stats(self, stats_dict: Dict[MetricName, Stat]) -> List[Stat]:
|
|
872
|
-
"""Derive perplexity metrics if applicable. We don't worry about splits and perturbations here."""
|
|
873
|
-
derived_stats: List[Stat] = []
|
|
874
|
-
derived_stats.extend(compute_perplexity_metrics(stats_dict))
|
|
875
|
-
return derived_stats
|
|
876
298
|
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
299
|
+
def compute_request_state_metrics(
|
|
300
|
+
efficiency_metric: EfficiencyMetric,
|
|
301
|
+
adapter_spec: AdapterSpec,
|
|
302
|
+
request_state: RequestState,
|
|
303
|
+
metric_service: MetricService,
|
|
304
|
+
) -> List[Stat]:
|
|
305
|
+
"""
|
|
306
|
+
Compute metrics that are common to both `evaluate_generation` and `evaluate_references`.
|
|
307
|
+
"""
|
|
308
|
+
stats: List[Stat] = []
|
|
309
|
+
|
|
310
|
+
stats.append(Stat(MetricName("num_references")).add(len(request_state.instance.references)))
|
|
311
|
+
|
|
312
|
+
# Copy from adapter spec
|
|
313
|
+
stats.append(Stat(MetricName("num_train_trials")).add(adapter_spec.num_train_trials))
|
|
314
|
+
|
|
315
|
+
stats.extend(efficiency_metric.compute_efficiency_metrics(adapter_spec, request_state, metric_service))
|
|
316
|
+
stats.extend(_compute_finish_reason_metrics(adapter_spec, request_state, metric_service))
|
|
317
|
+
stats.extend(_compute_truncation_metrics(adapter_spec, request_state, metric_service))
|
|
318
|
+
|
|
319
|
+
return stats
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def _compute_finish_reason_metrics(
|
|
323
|
+
adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
324
|
+
) -> List[Stat]:
|
|
325
|
+
"""Record how often generation finished due to reaching token limit, stop token(s), or end of text"""
|
|
326
|
+
assert request_state.result is not None
|
|
327
|
+
sequence = request_state.result.completions[0]
|
|
328
|
+
valid_reasons = [
|
|
329
|
+
"length",
|
|
330
|
+
"stop",
|
|
331
|
+
"endoftext",
|
|
332
|
+
"unknown",
|
|
333
|
+
]
|
|
334
|
+
if sequence.finish_reason is None or sequence.finish_reason["reason"] not in valid_reasons:
|
|
335
|
+
reason = "unknown"
|
|
336
|
+
else:
|
|
337
|
+
reason = sequence.finish_reason["reason"]
|
|
338
|
+
return [
|
|
339
|
+
Stat(MetricName(f"finish_reason_{valid_reason}")).add(int(reason == valid_reason))
|
|
340
|
+
for valid_reason in valid_reasons
|
|
341
|
+
]
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def _compute_truncation_metrics(
|
|
345
|
+
adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
346
|
+
) -> List[Stat]:
|
|
347
|
+
"""
|
|
348
|
+
Record the number of training instances used in the prompt and whether
|
|
349
|
+
even the prompt needed to be truncated (once we hit zero training instances).
|
|
350
|
+
"""
|
|
351
|
+
return [
|
|
352
|
+
Stat(MetricName("num_train_instances")).add(request_state.num_train_instances),
|
|
353
|
+
Stat(MetricName("prompt_truncated")).add(request_state.prompt_truncated),
|
|
354
|
+
]
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def compute_language_modeling_metrics(
|
|
358
|
+
adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService
|
|
359
|
+
) -> List[Stat]:
|
|
360
|
+
"""Compute the logprob and normalization factors for the first completion"""
|
|
361
|
+
assert request_state.result is not None
|
|
362
|
+
sequence = request_state.result.completions[0]
|
|
363
|
+
|
|
364
|
+
# Remove the empty tokens (typically generated by the AI21 tokenizer in the beginning of the text)
|
|
365
|
+
#
|
|
366
|
+
# Some more details about AI21 tokenizer: If the input prompt begins with a space, then
|
|
367
|
+
# the tokenizer inserts an empty token to the beginning.
|
|
368
|
+
# e.g. " burying him" -> ["▁"(0,0), "▁burying"(0,8), "▁him"(8,12)].
|
|
369
|
+
# TODO(#1522): Update this comment once solved.
|
|
370
|
+
# Since this empty token is introduced by our chunking approach, we need to remove it.
|
|
371
|
+
tokens: List[Token]
|
|
372
|
+
if request_state.num_conditioning_tokens > 0 and sequence.tokens[0].text == "":
|
|
373
|
+
tokens = sequence.tokens[1:]
|
|
374
|
+
else:
|
|
375
|
+
tokens = sequence.tokens
|
|
376
|
+
pred_tokens = tokens[request_state.num_conditioning_tokens :]
|
|
377
|
+
logprob, num_perplexity_tokens, num_bytes = (
|
|
378
|
+
sum(token.logprob for token in pred_tokens),
|
|
379
|
+
len(pred_tokens),
|
|
380
|
+
get_num_bytes(pred_tokens),
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
return [
|
|
384
|
+
Stat(MetricName("logprob")).add(logprob),
|
|
385
|
+
Stat(MetricName("num_perplexity_tokens")).add(num_perplexity_tokens),
|
|
386
|
+
Stat(MetricName("num_bytes")).add(num_bytes),
|
|
387
|
+
]
|
|
883
388
|
|
|
884
389
|
|
|
885
390
|
def _has_non_zero_valued_logprobs(per_instance_stats: Dict[Instance, List[Stat]]) -> bool:
|