crfm-helm 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/METADATA +134 -31
- crfm_helm-0.5.0.dist-info/RECORD +642 -0
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/WHEEL +1 -1
- helm/benchmark/adaptation/adapter_spec.py +31 -3
- helm/benchmark/adaptation/adapters/adapter.py +2 -2
- helm/benchmark/adaptation/adapters/adapter_factory.py +24 -27
- helm/benchmark/adaptation/adapters/generation_adapter.py +1 -0
- helm/benchmark/adaptation/adapters/in_context_learning_adapter.py +20 -4
- helm/benchmark/adaptation/adapters/language_modeling_adapter.py +2 -3
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +104 -0
- helm/benchmark/adaptation/adapters/multimodal/test_in_context_learning_multimodal_adapter.py +2 -1
- helm/benchmark/adaptation/adapters/test_adapter.py +2 -1
- helm/benchmark/adaptation/adapters/test_generation_adapter.py +32 -8
- helm/benchmark/adaptation/adapters/test_language_modeling_adapter.py +7 -19
- helm/benchmark/adaptation/adapters/test_multiple_choice_joint_adapter.py +60 -6
- helm/benchmark/adaptation/common_adapter_specs.py +376 -0
- helm/benchmark/adaptation/request_state.py +6 -1
- helm/benchmark/adaptation/scenario_state.py +6 -2
- helm/benchmark/annotation/annotator.py +43 -0
- helm/benchmark/annotation/annotator_factory.py +61 -0
- helm/benchmark/annotation/image2structure/image_compiler_annotator.py +88 -0
- helm/benchmark/annotation/image2structure/latex_compiler_annotator.py +59 -0
- helm/benchmark/annotation/image2structure/lilypond_compiler_annotator.py +84 -0
- helm/benchmark/annotation/image2structure/webpage_compiler_annotator.py +132 -0
- helm/benchmark/annotation/test_annotator_factory.py +26 -0
- helm/benchmark/annotation/test_dummy_annotator.py +44 -0
- helm/benchmark/annotation_executor.py +124 -0
- helm/benchmark/augmentations/data_augmenter.py +0 -2
- helm/benchmark/augmentations/gender_perturbation.py +1 -1
- helm/benchmark/augmentations/perturbation.py +8 -2
- helm/benchmark/augmentations/perturbation_description.py +1 -1
- helm/benchmark/augmentations/suffix_perturbation.py +29 -0
- helm/benchmark/augmentations/test_perturbation.py +11 -7
- helm/benchmark/augmentations/translate_perturbation.py +30 -0
- helm/benchmark/config_registry.py +7 -1
- helm/benchmark/executor.py +46 -16
- helm/benchmark/huggingface_registration.py +20 -7
- helm/benchmark/metrics/basic_metrics.py +169 -664
- helm/benchmark/metrics/bbq_metrics.py +3 -4
- helm/benchmark/metrics/bias_metrics.py +6 -6
- helm/benchmark/metrics/classification_metrics.py +11 -8
- helm/benchmark/metrics/cleva_accuracy_metrics.py +8 -5
- helm/benchmark/metrics/cleva_harms_metrics.py +2 -2
- helm/benchmark/metrics/code_metrics_helper.py +0 -2
- helm/benchmark/metrics/common_metric_specs.py +167 -0
- helm/benchmark/metrics/decodingtrust_fairness_metrics.py +72 -0
- helm/benchmark/metrics/decodingtrust_ood_knowledge_metrics.py +66 -0
- helm/benchmark/metrics/decodingtrust_privacy_metrics.py +101 -0
- helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +202 -0
- helm/benchmark/metrics/disinformation_metrics.py +4 -110
- helm/benchmark/metrics/dry_run_metrics.py +2 -2
- helm/benchmark/metrics/efficiency_metrics.py +206 -0
- helm/benchmark/metrics/evaluate_instances_metric.py +59 -0
- helm/benchmark/metrics/evaluate_reference_metrics.py +376 -0
- helm/benchmark/metrics/image_generation/aesthetics_metrics.py +54 -0
- helm/benchmark/metrics/image_generation/aesthetics_scorer.py +66 -0
- helm/benchmark/metrics/image_generation/clip_score_metrics.py +73 -0
- helm/benchmark/metrics/image_generation/denoised_runtime_metric.py +42 -0
- helm/benchmark/metrics/image_generation/detection_metrics.py +57 -0
- helm/benchmark/metrics/image_generation/detectors/base_detector.py +8 -0
- helm/benchmark/metrics/image_generation/detectors/vitdet.py +178 -0
- helm/benchmark/metrics/image_generation/efficiency_metrics.py +41 -0
- helm/benchmark/metrics/image_generation/fidelity_metrics.py +168 -0
- helm/benchmark/metrics/image_generation/fractal_dimension/__init__.py +0 -0
- helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +63 -0
- helm/benchmark/metrics/image_generation/fractal_dimension/test_fractal_dimension_util.py +33 -0
- helm/benchmark/metrics/image_generation/fractal_dimension_metric.py +50 -0
- helm/benchmark/metrics/image_generation/gender_metrics.py +58 -0
- helm/benchmark/metrics/image_generation/image_critique_metrics.py +284 -0
- helm/benchmark/metrics/image_generation/lpips_metrics.py +82 -0
- helm/benchmark/metrics/image_generation/multi_scale_ssim_metrics.py +82 -0
- helm/benchmark/metrics/image_generation/nsfw_detector.py +96 -0
- helm/benchmark/metrics/image_generation/nsfw_metrics.py +103 -0
- helm/benchmark/metrics/image_generation/nudity_metrics.py +38 -0
- helm/benchmark/metrics/image_generation/photorealism_critique_metrics.py +153 -0
- helm/benchmark/metrics/image_generation/psnr_metrics.py +78 -0
- helm/benchmark/metrics/image_generation/q16/__init__.py +0 -0
- helm/benchmark/metrics/image_generation/q16/q16_toxicity_detector.py +90 -0
- helm/benchmark/metrics/image_generation/q16/test_q16.py +18 -0
- helm/benchmark/metrics/image_generation/q16_toxicity_metrics.py +48 -0
- helm/benchmark/metrics/image_generation/skin_tone_metrics.py +164 -0
- helm/benchmark/metrics/image_generation/uiqi_metrics.py +92 -0
- helm/benchmark/metrics/image_generation/watermark/__init__.py +0 -0
- helm/benchmark/metrics/image_generation/watermark/test_watermark_detector.py +16 -0
- helm/benchmark/metrics/image_generation/watermark/watermark_detector.py +87 -0
- helm/benchmark/metrics/image_generation/watermark_metrics.py +48 -0
- helm/benchmark/metrics/instruction_following_critique_metrics.py +3 -1
- helm/benchmark/metrics/language_modeling_metrics.py +99 -0
- helm/benchmark/metrics/machine_translation_metrics.py +89 -0
- helm/benchmark/metrics/metric.py +93 -172
- helm/benchmark/metrics/metric_name.py +0 -1
- helm/benchmark/metrics/metric_service.py +16 -0
- helm/benchmark/metrics/paraphrase_generation_metrics.py +3 -4
- helm/benchmark/metrics/ranking_metrics.py +2 -2
- helm/benchmark/metrics/reference_metric.py +148 -0
- helm/benchmark/metrics/summac/model_summac.py +0 -2
- helm/benchmark/metrics/summarization_metrics.py +2 -2
- helm/benchmark/metrics/test_classification_metrics.py +8 -5
- helm/benchmark/metrics/test_disinformation_metrics.py +78 -0
- helm/benchmark/metrics/{test_basic_metrics.py → test_evaluate_reference_metrics.py} +5 -1
- helm/benchmark/metrics/test_metric.py +2 -2
- helm/benchmark/metrics/tokens/gooseai_token_cost_estimator.py +10 -2
- helm/benchmark/metrics/toxicity_metrics.py +1 -1
- helm/benchmark/metrics/toxicity_utils.py +23 -0
- helm/benchmark/metrics/unitxt_metrics.py +81 -0
- helm/benchmark/metrics/vision_language/__init__.py +0 -0
- helm/benchmark/metrics/vision_language/emd_utils.py +341 -0
- helm/benchmark/metrics/vision_language/image_metrics.py +450 -0
- helm/benchmark/metrics/vision_language/image_utils.py +100 -0
- helm/benchmark/model_deployment_registry.py +74 -0
- helm/benchmark/model_metadata_registry.py +36 -0
- helm/benchmark/multi_gpu_runner.py +133 -0
- helm/benchmark/presentation/create_plots.py +8 -7
- helm/benchmark/presentation/run_display.py +26 -10
- helm/benchmark/presentation/schema.py +15 -40
- helm/benchmark/presentation/summarize.py +119 -79
- helm/benchmark/presentation/table.py +8 -8
- helm/benchmark/presentation/test_contamination.py +2 -2
- helm/benchmark/presentation/test_run_entry.py +1 -2
- helm/benchmark/presentation/test_summarize.py +3 -3
- helm/benchmark/run.py +54 -26
- helm/benchmark/run_expander.py +214 -16
- helm/benchmark/run_spec.py +93 -0
- helm/benchmark/run_spec_factory.py +162 -0
- helm/benchmark/run_specs/__init__.py +0 -0
- helm/benchmark/run_specs/classic_run_specs.py +1510 -0
- helm/benchmark/run_specs/cleva_run_specs.py +277 -0
- helm/benchmark/run_specs/decodingtrust_run_specs.py +314 -0
- helm/benchmark/run_specs/heim_run_specs.py +623 -0
- helm/benchmark/run_specs/instruction_following_run_specs.py +129 -0
- helm/benchmark/run_specs/lite_run_specs.py +307 -0
- helm/benchmark/run_specs/simple_run_specs.py +104 -0
- helm/benchmark/run_specs/unitxt_run_specs.py +42 -0
- helm/benchmark/run_specs/vlm_run_specs.py +501 -0
- helm/benchmark/runner.py +51 -57
- helm/benchmark/runner_config_registry.py +21 -0
- helm/benchmark/scenarios/bbq_scenario.py +1 -1
- helm/benchmark/scenarios/bold_scenario.py +2 -2
- helm/benchmark/scenarios/code_scenario.py +1 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +169 -0
- helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +121 -0
- helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +77 -0
- helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +324 -0
- helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +204 -0
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +559 -0
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +67 -0
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +78 -0
- helm/benchmark/scenarios/dialogue_scenarios.py +0 -1
- helm/benchmark/scenarios/image_generation/__init__.py +0 -0
- helm/benchmark/scenarios/image_generation/common_syntactic_processes_scenario.py +105 -0
- helm/benchmark/scenarios/image_generation/cub200_scenario.py +95 -0
- helm/benchmark/scenarios/image_generation/daily_dalle_scenario.py +124 -0
- helm/benchmark/scenarios/image_generation/demographic_stereotypes_scenario.py +82 -0
- helm/benchmark/scenarios/image_generation/detection_scenario.py +83 -0
- helm/benchmark/scenarios/image_generation/draw_bench_scenario.py +74 -0
- helm/benchmark/scenarios/image_generation/i2p_scenario.py +57 -0
- helm/benchmark/scenarios/image_generation/landing_page_scenario.py +46 -0
- helm/benchmark/scenarios/image_generation/logos_scenario.py +223 -0
- helm/benchmark/scenarios/image_generation/magazine_cover_scenario.py +91 -0
- helm/benchmark/scenarios/image_generation/mental_disorders_scenario.py +46 -0
- helm/benchmark/scenarios/image_generation/mscoco_scenario.py +91 -0
- helm/benchmark/scenarios/image_generation/paint_skills_scenario.py +72 -0
- helm/benchmark/scenarios/image_generation/parti_prompts_scenario.py +94 -0
- helm/benchmark/scenarios/image_generation/radiology_scenario.py +42 -0
- helm/benchmark/scenarios/image_generation/relational_understanding_scenario.py +52 -0
- helm/benchmark/scenarios/image_generation/time_most_significant_historical_figures_scenario.py +124 -0
- helm/benchmark/scenarios/image_generation/winoground_scenario.py +62 -0
- helm/benchmark/scenarios/imdb_scenario.py +0 -1
- helm/benchmark/scenarios/live_qa_scenario.py +94 -0
- helm/benchmark/scenarios/lm_entry_scenario.py +185 -0
- helm/benchmark/scenarios/math_scenario.py +19 -2
- helm/benchmark/scenarios/medication_qa_scenario.py +60 -0
- helm/benchmark/scenarios/numeracy_scenario.py +1 -1
- helm/benchmark/scenarios/opinions_qa_scenario.py +0 -4
- helm/benchmark/scenarios/scenario.py +4 -0
- helm/benchmark/scenarios/simple_scenarios.py +122 -1
- helm/benchmark/scenarios/test_math_scenario.py +6 -0
- helm/benchmark/scenarios/test_scenario.py +6 -3
- helm/benchmark/scenarios/test_simple_scenarios.py +50 -0
- helm/benchmark/scenarios/thai_exam_scenario.py +135 -0
- helm/benchmark/scenarios/unitxt_scenario.py +56 -0
- helm/benchmark/scenarios/verifiability_judgment_scenario.py +3 -1
- helm/benchmark/scenarios/vicuna_scenario.py +1 -1
- helm/benchmark/scenarios/vision_language/bingo_scenario.py +103 -0
- helm/benchmark/scenarios/vision_language/hateful_memes_scenario.py +92 -0
- helm/benchmark/scenarios/vision_language/heim_human_eval_scenario.py +113 -0
- helm/benchmark/scenarios/vision_language/image2structure/__init__.py +0 -0
- helm/benchmark/scenarios/vision_language/image2structure/chart2csv_scenario.py +55 -0
- helm/benchmark/scenarios/vision_language/image2structure/image2structure_scenario.py +214 -0
- helm/benchmark/scenarios/vision_language/image2structure/latex_scenario.py +25 -0
- helm/benchmark/scenarios/vision_language/image2structure/musicsheet_scenario.py +20 -0
- helm/benchmark/scenarios/vision_language/image2structure/utils_latex.py +347 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/__init__.py +0 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/driver.py +84 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/jekyll_server.py +182 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage/utils.py +31 -0
- helm/benchmark/scenarios/vision_language/image2structure/webpage_scenario.py +225 -0
- helm/benchmark/scenarios/vision_language/mementos_scenario.py +124 -0
- helm/benchmark/scenarios/vision_language/mme_scenario.py +145 -0
- helm/benchmark/scenarios/vision_language/mmmu_scenario.py +187 -0
- helm/benchmark/scenarios/vision_language/multipanelvqa_scenario.py +169 -0
- helm/benchmark/scenarios/vision_language/pope_scenario.py +104 -0
- helm/benchmark/scenarios/vision_language/seed_bench_scenario.py +129 -0
- helm/benchmark/scenarios/vision_language/unicorn_scenario.py +108 -0
- helm/benchmark/scenarios/vision_language/viz_wiz_scenario.py +1 -2
- helm/benchmark/scenarios/vision_language/vqa_scenario.py +1 -1
- helm/benchmark/scenarios/wmt_14_scenario.py +1 -1
- helm/benchmark/server.py +24 -1
- helm/benchmark/slurm_runner.py +70 -49
- helm/benchmark/static/benchmarking.js +1 -1
- helm/benchmark/static/schema_classic.yaml +258 -1066
- helm/benchmark/static/schema_instruction_following.yaml +210 -0
- helm/benchmark/static/schema_lite.yaml +2 -227
- helm/benchmark/static/schema_mmlu.yaml +1507 -0
- helm/benchmark/static/schema_unitxt.yaml +428 -0
- helm/benchmark/static/schema_vlm.yaml +576 -0
- helm/benchmark/static_build/assets/01-694cb9b7.png +0 -0
- helm/benchmark/static_build/assets/ai21-0eb91ec3.png +0 -0
- helm/benchmark/static_build/assets/aleph-alpha-7ce10034.png +0 -0
- helm/benchmark/static_build/assets/anthropic-70d8bc39.png +0 -0
- helm/benchmark/static_build/assets/bigscience-7f0400c0.png +0 -0
- helm/benchmark/static_build/assets/cohere-3550c6cb.png +0 -0
- helm/benchmark/static_build/assets/crfm-logo-74391ab8.png +0 -0
- helm/benchmark/static_build/assets/eleutherai-b9451114.png +0 -0
- helm/benchmark/static_build/assets/google-06d997ad.png +0 -0
- helm/benchmark/static_build/assets/heim-logo-3e5e3aa4.png +0 -0
- helm/benchmark/static_build/assets/helm-logo-simple-2ed5400b.png +0 -0
- helm/benchmark/static_build/assets/helmhero-28e90f4d.png +0 -0
- helm/benchmark/static_build/assets/index-5088afcb.css +1 -0
- helm/benchmark/static_build/assets/index-d839df55.js +9 -0
- helm/benchmark/static_build/assets/meta-5580e9f1.png +0 -0
- helm/benchmark/static_build/assets/microsoft-f5ee5016.png +0 -0
- helm/benchmark/static_build/assets/mistral-18e1be23.png +0 -0
- helm/benchmark/static_build/assets/nvidia-86fa75c1.png +0 -0
- helm/benchmark/static_build/assets/openai-3f8653e4.png +0 -0
- helm/benchmark/static_build/assets/react-d4a0b69b.js +85 -0
- helm/benchmark/static_build/assets/recharts-6d337683.js +97 -0
- helm/benchmark/static_build/assets/tii-24de195c.png +0 -0
- helm/benchmark/static_build/assets/together-a665a35b.png +0 -0
- helm/benchmark/static_build/assets/tremor-54a99cc4.js +10 -0
- helm/benchmark/static_build/assets/tsinghua-keg-97d4b395.png +0 -0
- helm/benchmark/static_build/assets/vhelm-framework-cde7618a.png +0 -0
- helm/benchmark/static_build/assets/vhelm-model-6d812526.png +0 -0
- helm/benchmark/static_build/assets/yandex-38e09d70.png +0 -0
- helm/benchmark/static_build/config.js +4 -0
- helm/benchmark/static_build/index.html +20 -0
- helm/benchmark/test_data_preprocessor.py +3 -3
- helm/benchmark/test_model_deployment_definition.py +14 -16
- helm/benchmark/test_run_expander.py +1 -1
- helm/benchmark/window_services/ai21_window_service.py +22 -33
- helm/benchmark/window_services/cohere_window_service.py +1 -63
- helm/benchmark/window_services/default_window_service.py +2 -44
- helm/benchmark/window_services/encoder_decoder_window_service.py +0 -11
- helm/benchmark/window_services/ice_window_service.py +0 -34
- helm/benchmark/window_services/image_generation/__init__.py +0 -0
- helm/benchmark/window_services/image_generation/clip_window_service.py +15 -0
- helm/benchmark/window_services/image_generation/lexica_search_window_service.py +9 -0
- helm/benchmark/window_services/image_generation/openai_dalle_window_service.py +9 -0
- helm/benchmark/window_services/image_generation/test_clip_window_service.py +29 -0
- helm/benchmark/window_services/image_generation/test_openai_dalle_window_service.py +30 -0
- helm/benchmark/window_services/local_window_service.py +21 -4
- helm/benchmark/window_services/test_anthropic_window_service.py +2 -1
- helm/benchmark/window_services/test_bloom_window_service.py +2 -1
- helm/benchmark/window_services/test_cohere_window_service.py +2 -1
- helm/benchmark/window_services/test_flan_t5_window_service.py +2 -1
- helm/benchmark/window_services/test_gpt2_window_service.py +2 -2
- helm/benchmark/window_services/test_gpt4_window_service.py +2 -1
- helm/benchmark/window_services/test_gptj_window_service.py +3 -2
- helm/benchmark/window_services/test_gptneox_window_service.py +3 -2
- helm/benchmark/window_services/test_ice_window_service.py +2 -1
- helm/benchmark/window_services/test_openai_window_service.py +2 -1
- helm/benchmark/window_services/test_opt_window_service.py +3 -2
- helm/benchmark/window_services/test_palmyra_window_service.py +2 -1
- helm/benchmark/window_services/test_t0pp_window_service.py +2 -1
- helm/benchmark/window_services/test_t511b_window_service.py +2 -1
- helm/benchmark/window_services/test_ul2_window_service.py +2 -1
- helm/benchmark/window_services/test_utils.py +3 -2
- helm/benchmark/window_services/test_yalm_window_service.py +2 -1
- helm/benchmark/window_services/window_service.py +42 -0
- helm/benchmark/window_services/window_service_factory.py +4 -1
- helm/benchmark/window_services/yalm_window_service.py +0 -27
- helm/clients/__init__.py +0 -0
- helm/{proxy/clients → clients}/ai21_client.py +3 -9
- helm/clients/aleph_alpha_client.py +112 -0
- helm/{proxy/clients → clients}/anthropic_client.py +203 -18
- helm/{proxy/clients → clients}/auto_client.py +59 -31
- helm/clients/bedrock_client.py +128 -0
- helm/clients/bedrock_utils.py +72 -0
- helm/{proxy/clients → clients}/client.py +65 -7
- helm/clients/clip_score_client.py +49 -0
- helm/clients/clip_scorers/__init__.py +0 -0
- helm/clients/clip_scorers/base_clip_scorer.py +18 -0
- helm/clients/clip_scorers/clip_scorer.py +50 -0
- helm/clients/clip_scorers/multilingual_clip_scorer.py +50 -0
- helm/{proxy/clients → clients}/cohere_client.py +4 -11
- helm/clients/gcs_client.py +82 -0
- helm/{proxy/clients → clients}/google_client.py +5 -5
- helm/clients/google_translate_client.py +35 -0
- helm/{proxy/clients → clients}/http_model_client.py +5 -7
- helm/{proxy/clients → clients}/huggingface_client.py +43 -64
- helm/clients/image_generation/__init__.py +0 -0
- helm/clients/image_generation/adobe_vision_client.py +78 -0
- helm/clients/image_generation/aleph_alpha_image_generation_client.py +98 -0
- helm/clients/image_generation/cogview2/__init__.py +0 -0
- helm/clients/image_generation/cogview2/coglm_strategy.py +96 -0
- helm/clients/image_generation/cogview2/coglm_utils.py +82 -0
- helm/clients/image_generation/cogview2/sr_pipeline/__init__.py +15 -0
- helm/clients/image_generation/cogview2/sr_pipeline/direct_sr.py +96 -0
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +254 -0
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_sampling.py +190 -0
- helm/clients/image_generation/cogview2/sr_pipeline/iterative_sr.py +141 -0
- helm/clients/image_generation/cogview2/sr_pipeline/itersr_model.py +269 -0
- helm/clients/image_generation/cogview2/sr_pipeline/itersr_sampling.py +120 -0
- helm/clients/image_generation/cogview2/sr_pipeline/sr_group.py +42 -0
- helm/clients/image_generation/cogview2_client.py +191 -0
- helm/clients/image_generation/dalle2_client.py +192 -0
- helm/clients/image_generation/dalle3_client.py +108 -0
- helm/clients/image_generation/dalle_mini/__init__.py +3 -0
- helm/clients/image_generation/dalle_mini/data.py +442 -0
- helm/clients/image_generation/dalle_mini/model/__init__.py +5 -0
- helm/clients/image_generation/dalle_mini/model/configuration.py +175 -0
- helm/clients/image_generation/dalle_mini/model/modeling.py +1834 -0
- helm/clients/image_generation/dalle_mini/model/partitions.py +84 -0
- helm/clients/image_generation/dalle_mini/model/processor.py +63 -0
- helm/clients/image_generation/dalle_mini/model/text.py +251 -0
- helm/clients/image_generation/dalle_mini/model/tokenizer.py +9 -0
- helm/clients/image_generation/dalle_mini/model/utils.py +29 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/__init__.py +1 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/configuration_vqgan.py +40 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/convert_pt_model_to_jax.py +107 -0
- helm/clients/image_generation/dalle_mini/vqgan_jax/modeling_flax_vqgan.py +610 -0
- helm/clients/image_generation/dalle_mini_client.py +190 -0
- helm/clients/image_generation/deep_floyd_client.py +78 -0
- helm/clients/image_generation/huggingface_diffusers_client.py +249 -0
- helm/clients/image_generation/image_generation_client_utils.py +9 -0
- helm/clients/image_generation/lexica_client.py +86 -0
- helm/clients/image_generation/mindalle/__init__.py +0 -0
- helm/clients/image_generation/mindalle/models/__init__.py +216 -0
- helm/clients/image_generation/mindalle/models/stage1/__init__.py +0 -0
- helm/clients/image_generation/mindalle/models/stage1/layers.py +312 -0
- helm/clients/image_generation/mindalle/models/stage1/vqgan.py +103 -0
- helm/clients/image_generation/mindalle/models/stage2/__init__.py +0 -0
- helm/clients/image_generation/mindalle/models/stage2/layers.py +144 -0
- helm/clients/image_generation/mindalle/models/stage2/transformer.py +268 -0
- helm/clients/image_generation/mindalle/models/tokenizer.py +30 -0
- helm/clients/image_generation/mindalle/utils/__init__.py +3 -0
- helm/clients/image_generation/mindalle/utils/config.py +129 -0
- helm/clients/image_generation/mindalle/utils/sampling.py +149 -0
- helm/clients/image_generation/mindalle/utils/utils.py +89 -0
- helm/clients/image_generation/mindalle_client.py +115 -0
- helm/clients/image_generation/nudity_check_client.py +64 -0
- helm/clients/image_generation/together_image_generation_client.py +111 -0
- helm/{proxy/clients → clients}/lit_gpt_client.py +4 -4
- helm/{proxy/clients → clients}/megatron_client.py +5 -5
- helm/clients/mistral_client.py +134 -0
- helm/clients/moderation_api_client.py +109 -0
- helm/clients/open_lm_client.py +43 -0
- helm/clients/openai_client.py +302 -0
- helm/{proxy/clients → clients}/palmyra_client.py +6 -8
- helm/{proxy/clients → clients}/perspective_api_client.py +7 -8
- helm/clients/simple_client.py +64 -0
- helm/{proxy/clients → clients}/test_auto_client.py +13 -15
- helm/clients/test_client.py +100 -0
- helm/{proxy/clients → clients}/test_huggingface_client.py +15 -16
- helm/clients/test_simple_client.py +19 -0
- helm/{proxy/clients → clients}/test_together_client.py +20 -8
- helm/{proxy/clients → clients}/together_client.py +12 -72
- helm/clients/vertexai_client.py +391 -0
- helm/clients/vision_language/__init__.py +0 -0
- helm/clients/vision_language/huggingface_vlm_client.py +104 -0
- helm/{proxy/clients → clients}/vision_language/idefics_client.py +53 -48
- helm/clients/vision_language/open_flamingo/__init__.py +2 -0
- helm/clients/vision_language/open_flamingo/src/__init__.py +0 -0
- helm/clients/vision_language/open_flamingo/src/factory.py +147 -0
- helm/clients/vision_language/open_flamingo/src/flamingo.py +337 -0
- helm/clients/vision_language/open_flamingo/src/flamingo_lm.py +155 -0
- helm/clients/vision_language/open_flamingo/src/helpers.py +267 -0
- helm/clients/vision_language/open_flamingo/src/utils.py +47 -0
- helm/clients/vision_language/open_flamingo_client.py +155 -0
- helm/clients/vision_language/qwen_vlm_client.py +171 -0
- helm/clients/vllm_client.py +46 -0
- helm/common/cache.py +16 -4
- helm/common/cache_backend_config.py +47 -0
- helm/common/clip_score_request.py +41 -0
- helm/common/file_caches/__init__.py +0 -0
- helm/common/file_caches/file_cache.py +16 -0
- helm/common/file_caches/local_file_cache.py +61 -0
- helm/common/file_caches/test_local_file_cache.py +25 -0
- helm/common/file_upload_request.py +27 -0
- helm/common/general.py +1 -1
- helm/common/image_generation_parameters.py +25 -0
- helm/common/images_utils.py +24 -1
- helm/common/key_value_store.py +35 -4
- helm/common/media_object.py +13 -0
- helm/common/moderations_api_request.py +71 -0
- helm/common/mongo_key_value_store.py +3 -3
- helm/common/multimodal_request_utils.py +31 -0
- helm/common/nudity_check_request.py +29 -0
- helm/common/request.py +15 -17
- helm/common/test_general.py +6 -0
- helm/common/tokenization_request.py +1 -1
- helm/config/model_deployments.yaml +1069 -546
- helm/config/model_metadata.yaml +753 -31
- helm/config/tokenizer_configs.yaml +142 -43
- helm/proxy/accounts.py +31 -4
- helm/proxy/critique/mechanical_turk_critique_importer.py +3 -0
- helm/proxy/critique/model_critique_client.py +8 -6
- helm/proxy/example_queries.py +29 -17
- helm/proxy/server.py +70 -5
- helm/proxy/services/remote_service.py +31 -0
- helm/proxy/services/server_service.py +96 -16
- helm/proxy/services/service.py +30 -0
- helm/proxy/services/test_remote_service.py +4 -3
- helm/proxy/services/test_service.py +0 -12
- helm/proxy/test_accounts.py +32 -0
- helm/proxy/token_counters/auto_token_counter.py +37 -37
- helm/proxy/token_counters/test_auto_token_counter.py +164 -0
- helm/proxy/token_counters/token_counter.py +3 -5
- helm/tokenizers/__init__.py +0 -0
- helm/{proxy/tokenizers → tokenizers}/ai21_tokenizer.py +3 -3
- helm/{proxy/tokenizers → tokenizers}/anthropic_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/auto_tokenizer.py +6 -9
- helm/{proxy/tokenizers → tokenizers}/cohere_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/http_model_tokenizer.py +3 -3
- helm/{proxy/tokenizers → tokenizers}/huggingface_tokenizer.py +7 -26
- helm/tokenizers/simple_tokenizer.py +33 -0
- helm/{proxy/tokenizers → tokenizers}/test_anthropic_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/test_huggingface_tokenizer.py +3 -0
- helm/tokenizers/test_simple_tokenizer.py +33 -0
- helm/{proxy/tokenizers → tokenizers}/vertexai_tokenizer.py +1 -1
- helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer.py +5 -3
- helm/tokenizers/yalm_tokenizer_data/__init__.py +0 -0
- helm/tokenizers/yalm_tokenizer_data/voc_100b.sp +0 -0
- helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/yalm_tokenizer.py +1 -1
- crfm_helm-0.4.0.dist-info/RECORD +0 -397
- helm/benchmark/run_specs.py +0 -2762
- helm/benchmark/test_model_properties.py +0 -1570
- helm/benchmark/vlm_run_specs.py +0 -97
- helm/benchmark/window_services/flan_t5_window_service.py +0 -29
- helm/benchmark/window_services/gpt2_window_service.py +0 -32
- helm/benchmark/window_services/huggingface_window_service.py +0 -60
- helm/benchmark/window_services/t0pp_window_service.py +0 -35
- helm/benchmark/window_services/t511b_window_service.py +0 -30
- helm/benchmark/window_services/test_mt_nlg_window_service.py +0 -48
- helm/benchmark/window_services/ul2_window_service.py +0 -30
- helm/benchmark/window_services/wider_ai21_window_service.py +0 -24
- helm/common/cache_utils.py +0 -14
- helm/proxy/clients/aleph_alpha_client.py +0 -95
- helm/proxy/clients/goose_ai_client.py +0 -99
- helm/proxy/clients/microsoft_client.py +0 -180
- helm/proxy/clients/openai_client.py +0 -206
- helm/proxy/clients/simple_client.py +0 -60
- helm/proxy/clients/test_client.py +0 -49
- helm/proxy/clients/vertexai_client.py +0 -115
- helm/proxy/token_counters/ai21_token_counter.py +0 -20
- helm/proxy/token_counters/cohere_token_counter.py +0 -13
- helm/proxy/token_counters/free_token_counter.py +0 -12
- helm/proxy/token_counters/gooseai_token_counter.py +0 -24
- helm/proxy/token_counters/openai_token_counter.py +0 -22
- helm/proxy/token_counters/test_ai21_token_counter.py +0 -88
- helm/proxy/token_counters/test_openai_token_counter.py +0 -81
- helm/proxy/tokenizers/simple_tokenizer.py +0 -32
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/LICENSE +0 -0
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.4.0.dist-info → crfm_helm-0.5.0.dist-info}/top_level.txt +0 -0
- /helm/{proxy/clients → benchmark/annotation}/__init__.py +0 -0
- /helm/{proxy/clients/vision_language → benchmark/annotation/image2structure}/__init__.py +0 -0
- /helm/{proxy/tokenizers → benchmark/metrics/image_generation}/__init__.py +0 -0
- /helm/{proxy/tokenizers/yalm_tokenizer_data → benchmark/metrics/image_generation/detectors}/__init__.py +0 -0
- /helm/{proxy/clients → clients}/ai21_utils.py +0 -0
- /helm/{proxy/clients → clients}/cohere_utils.py +0 -0
- /helm/{proxy/clients → clients}/lit_gpt_generate.py +0 -0
- /helm/{proxy/clients → clients}/toxicity_classifier_client.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/aleph_alpha_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/caching_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/ice_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/lit_gpt_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/test_ice_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/test_yalm_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/tiktoken_tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/tokenizer.py +0 -0
- /helm/{proxy/tokenizers → tokenizers}/yalm_tokenizer_data/test_yalm_tokenizer.py +0 -0
|
File without changes
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from .watermark_detector import WatermarkDetector
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def test_compute_watermark_probability():
|
|
8
|
+
watermark_detector = WatermarkDetector()
|
|
9
|
+
|
|
10
|
+
# These test images are from https://github.com/LAION-AI/LAION-5B-WatermarkDetection
|
|
11
|
+
base_path: str = os.path.join(os.path.dirname(__file__), "test_images")
|
|
12
|
+
clear_image_path: str = os.path.join(base_path, "clear_example.png")
|
|
13
|
+
watermark_image_path: str = os.path.join(base_path, "watermark_example.png")
|
|
14
|
+
|
|
15
|
+
has_watermarks: List[bool] = watermark_detector.has_watermark([clear_image_path, watermark_image_path])[0]
|
|
16
|
+
assert has_watermarks == [False, True]
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import List, Tuple
|
|
3
|
+
|
|
4
|
+
import torch
|
|
5
|
+
import torch.nn as nn
|
|
6
|
+
import torch.nn.functional as F
|
|
7
|
+
import torch.utils.data
|
|
8
|
+
from torchvision import transforms as T
|
|
9
|
+
|
|
10
|
+
from helm.benchmark.runner import get_cached_models_path
|
|
11
|
+
from helm.common.general import ensure_file_downloaded, hlog
|
|
12
|
+
from helm.common.gpu_utils import get_torch_device
|
|
13
|
+
from helm.common.images_utils import open_image
|
|
14
|
+
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class WatermarkDetector:
|
|
18
|
+
"""
|
|
19
|
+
We use LAION's watermark detector (https://github.com/LAION-AI/LAION-5B-WatermarkDetection).
|
|
20
|
+
Adapted from https://github.com/LAION-AI/LAION-5B-WatermarkDetection/blob/main/example_use.py
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
MODEL_URL: str = "https://github.com/LAION-AI/LAION-5B-WatermarkDetection/raw/main/models/watermark_model_v1.pt"
|
|
24
|
+
|
|
25
|
+
# The example code from LAION used 0.5, but we observed that the watermark detector model could
|
|
26
|
+
# confuse text in an image as a watermark, so we set the threshold to a higher value of 0.9.
|
|
27
|
+
# The detector believes that the test example has a watermark with a 93.563% probability.
|
|
28
|
+
WATERMARK_THRESHOLD: float = 0.9
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def load_model():
|
|
32
|
+
"""
|
|
33
|
+
Load the watermark detector model.
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
import timm
|
|
37
|
+
except ModuleNotFoundError as e:
|
|
38
|
+
handle_module_not_found_error(e, ["heim"])
|
|
39
|
+
|
|
40
|
+
model = timm.create_model("efficientnet_b3a", pretrained=True, num_classes=2)
|
|
41
|
+
model.classifier = nn.Sequential(
|
|
42
|
+
# 1536 is the original in_features
|
|
43
|
+
nn.Linear(in_features=1536, out_features=625),
|
|
44
|
+
nn.ReLU(), # ReLu to be the activation function
|
|
45
|
+
nn.Dropout(p=0.3),
|
|
46
|
+
nn.Linear(in_features=625, out_features=256),
|
|
47
|
+
nn.ReLU(),
|
|
48
|
+
nn.Linear(in_features=256, out_features=2),
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
watermark_model_path: str = os.path.join(get_cached_models_path(), "watermark_model_v1.pt")
|
|
52
|
+
ensure_file_downloaded(WatermarkDetector.MODEL_URL, watermark_model_path)
|
|
53
|
+
state_dict = torch.load(watermark_model_path)
|
|
54
|
+
model.load_state_dict(state_dict)
|
|
55
|
+
model.eval() # Evaluate the model
|
|
56
|
+
return model.to(get_torch_device())
|
|
57
|
+
|
|
58
|
+
def __init__(self):
|
|
59
|
+
self._model = self.load_model()
|
|
60
|
+
|
|
61
|
+
def has_watermark(self, image_locations: List[str]) -> Tuple[List[bool], List[float]]:
|
|
62
|
+
"""
|
|
63
|
+
Returns a list of booleans indicating whether each image (given by `image_locations`)
|
|
64
|
+
contains a watermark or not.
|
|
65
|
+
"""
|
|
66
|
+
# Preprocess images (resize and normalize)
|
|
67
|
+
images: List[torch.Tensor] = []
|
|
68
|
+
preprocessing = T.Compose(
|
|
69
|
+
[T.Resize((256, 256)), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
|
|
70
|
+
)
|
|
71
|
+
for location in image_locations:
|
|
72
|
+
# Location can be a file path or a URL
|
|
73
|
+
image = preprocessing(open_image(location).convert("RGB"))
|
|
74
|
+
images.append(image)
|
|
75
|
+
|
|
76
|
+
result: List[bool] = []
|
|
77
|
+
probs: List[float] = []
|
|
78
|
+
with torch.no_grad():
|
|
79
|
+
pred = self._model(torch.stack(images).to(get_torch_device()))
|
|
80
|
+
syms = F.softmax(pred, dim=1).detach().cpu().numpy().tolist()
|
|
81
|
+
for i, sym in enumerate(syms):
|
|
82
|
+
watermark_prob, clear_prob = sym
|
|
83
|
+
if watermark_prob > self.WATERMARK_THRESHOLD:
|
|
84
|
+
hlog(f"Image at {image_locations[i]} has a watermark with {watermark_prob} probability.")
|
|
85
|
+
result.append(watermark_prob >= self.WATERMARK_THRESHOLD)
|
|
86
|
+
probs.append(watermark_prob)
|
|
87
|
+
return result, probs
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from statistics import mean
|
|
2
|
+
from typing import List
|
|
3
|
+
|
|
4
|
+
from helm.common.request import RequestResult
|
|
5
|
+
from helm.benchmark.adaptation.request_state import RequestState
|
|
6
|
+
from helm.benchmark.adaptation.adapter_spec import AdapterSpec
|
|
7
|
+
from helm.benchmark.metrics.statistic import Stat
|
|
8
|
+
from helm.benchmark.metrics.metric import Metric
|
|
9
|
+
from helm.benchmark.metrics.metric_name import MetricName
|
|
10
|
+
from helm.benchmark.metrics.metric_service import MetricService
|
|
11
|
+
from helm.common.multimodal_request_utils import gather_generated_image_locations
|
|
12
|
+
from .watermark.watermark_detector import WatermarkDetector
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class WatermarkMetric(Metric):
|
|
16
|
+
"""
|
|
17
|
+
Defines metrics for detecting watermarks in images using the
|
|
18
|
+
LAION's watermark detector (https://github.com/LAION-AI/LAION-5B-WatermarkDetection).
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self):
|
|
22
|
+
self._watermark_detector = WatermarkDetector()
|
|
23
|
+
|
|
24
|
+
def __repr__(self):
|
|
25
|
+
return "WatermarkMetric()"
|
|
26
|
+
|
|
27
|
+
def evaluate_generation(
|
|
28
|
+
self,
|
|
29
|
+
adapter_spec: AdapterSpec,
|
|
30
|
+
request_state: RequestState,
|
|
31
|
+
metric_service: MetricService,
|
|
32
|
+
eval_cache_path: str,
|
|
33
|
+
) -> List[Stat]:
|
|
34
|
+
assert request_state.result is not None
|
|
35
|
+
request_result: RequestResult = request_state.result
|
|
36
|
+
image_locations: List[str] = gather_generated_image_locations(request_result)
|
|
37
|
+
if len(image_locations) == 0:
|
|
38
|
+
return []
|
|
39
|
+
|
|
40
|
+
# Batch process the images and detect if they have watermarks
|
|
41
|
+
has_watermarks, watermark_probs = self._watermark_detector.has_watermark(image_locations)
|
|
42
|
+
stats: List[Stat] = [
|
|
43
|
+
Stat(MetricName("watermark_frac")).add(mean(has_watermarks) if len(has_watermarks) > 0 else 0),
|
|
44
|
+
Stat(MetricName("expected_max_watermark_prob")).add(
|
|
45
|
+
max(watermark_probs) if len(watermark_probs) > 0 else 0
|
|
46
|
+
),
|
|
47
|
+
]
|
|
48
|
+
return stats
|
|
@@ -73,7 +73,9 @@ class InstructionFollowingCritiqueMetric(Metric):
|
|
|
73
73
|
}
|
|
74
74
|
|
|
75
75
|
KEYWORD_NAME: str = "Keyword Feedback"
|
|
76
|
-
KEYWORD_PROMPT: str =
|
|
76
|
+
KEYWORD_PROMPT: str = (
|
|
77
|
+
"Provide a comma-separated list of keywords that capture what's wrong with the response (e.g., typos, swear words, too long)" # noqa: E501
|
|
78
|
+
)
|
|
77
79
|
|
|
78
80
|
def __init__(self, num_respondents: int) -> None:
|
|
79
81
|
self._template = CritiqueTaskTemplate(
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
from typing import List, Dict, Set
|
|
3
|
+
|
|
4
|
+
from helm.benchmark.adaptation.scenario_state import ScenarioState
|
|
5
|
+
from helm.benchmark.metrics.basic_metrics import (
|
|
6
|
+
compute_language_modeling_metrics,
|
|
7
|
+
compute_perplexity_metrics,
|
|
8
|
+
compute_request_state_metrics,
|
|
9
|
+
)
|
|
10
|
+
from helm.benchmark.metrics.efficiency_metrics import EfficiencyMetric
|
|
11
|
+
|
|
12
|
+
from helm.benchmark.adaptation.request_state import RequestState
|
|
13
|
+
from helm.benchmark.adaptation.adapter_spec import AdapterSpec
|
|
14
|
+
from .metric import MetricInterface, MetricResult, PerInstanceStats, add_context
|
|
15
|
+
from .metric_name import MetricContext, MetricName
|
|
16
|
+
from .metric_service import MetricService
|
|
17
|
+
from .statistic import Stat, merge_stat
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class LanguageModelingMetric(MetricInterface):
|
|
21
|
+
"""
|
|
22
|
+
Defines the basic metrics available when using the ADAPT_LANGUAGE_MODELING adapter.
|
|
23
|
+
This is parallel to BasicMetric and produces many of the same Stats.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, names: List[str]):
|
|
27
|
+
self.names: List[str] = names
|
|
28
|
+
self.efficiency_metric = EfficiencyMetric()
|
|
29
|
+
|
|
30
|
+
def __repr__(self):
|
|
31
|
+
return "LanguageModelingMetric"
|
|
32
|
+
|
|
33
|
+
def evaluate(
|
|
34
|
+
self, scenario_state: ScenarioState, metric_service: MetricService, eval_cache_path: str, parallelism: int
|
|
35
|
+
) -> MetricResult:
|
|
36
|
+
global_stats: Dict[MetricName, Stat] = {}
|
|
37
|
+
# The first and only trial
|
|
38
|
+
trial_stats: Dict[MetricName, Stat] = {}
|
|
39
|
+
# Per-instance stats
|
|
40
|
+
all_per_instance_stats: List[PerInstanceStats] = []
|
|
41
|
+
instance_ids_per_context: Dict[MetricContext, Set[str]] = defaultdict(set)
|
|
42
|
+
|
|
43
|
+
for request_state in scenario_state.request_states:
|
|
44
|
+
# Evaluate request_state
|
|
45
|
+
request_stats = self.evaluate_generation(
|
|
46
|
+
scenario_state.adapter_spec, request_state, metric_service, eval_cache_path
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Add instance-related context (e.g., split, perturbation) to the metrics
|
|
50
|
+
for i, stat in enumerate(request_stats):
|
|
51
|
+
context = MetricContext.from_instance(request_state.instance)
|
|
52
|
+
request_stats[i] = add_context(stat, context)
|
|
53
|
+
assert request_state.instance.id is not None
|
|
54
|
+
instance_ids_per_context[context].add(request_state.instance.id)
|
|
55
|
+
|
|
56
|
+
# Use trial index of 0 here since we run only one trial for LM
|
|
57
|
+
assert request_state.instance.id is not None
|
|
58
|
+
all_per_instance_stats.append(
|
|
59
|
+
PerInstanceStats(request_state.instance.id, request_state.instance.perturbation, 0, request_stats)
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
for stat in request_stats:
|
|
63
|
+
merge_stat(trial_stats, stat)
|
|
64
|
+
|
|
65
|
+
# group stats according to the context (e.g., split, perturbation) and call derive_stats on each grouping
|
|
66
|
+
grouped_trial_stats: Dict[MetricContext, Dict[MetricName, Stat]] = defaultdict(dict)
|
|
67
|
+
for metric_name, stat in trial_stats.items():
|
|
68
|
+
grouped_trial_stats[MetricContext.from_metric_name(metric_name)][metric_name] = stat # group by context
|
|
69
|
+
|
|
70
|
+
for context, stats_dict in grouped_trial_stats.items():
|
|
71
|
+
for stat in self.derive_stats(stats_dict):
|
|
72
|
+
merge_stat(trial_stats, add_context(stat, context))
|
|
73
|
+
# keep track of how many instances are in each subset
|
|
74
|
+
num_instances_stat = Stat(MetricName("num_instances")).add(len(instance_ids_per_context[context]))
|
|
75
|
+
merge_stat(trial_stats, add_context(num_instances_stat, context))
|
|
76
|
+
|
|
77
|
+
for stat in trial_stats.values():
|
|
78
|
+
merge_stat(global_stats, stat.take_mean())
|
|
79
|
+
return MetricResult(list(global_stats.values()), all_per_instance_stats)
|
|
80
|
+
|
|
81
|
+
def evaluate_generation(
|
|
82
|
+
self,
|
|
83
|
+
adapter_spec: AdapterSpec,
|
|
84
|
+
request_state: RequestState,
|
|
85
|
+
metric_service: MetricService,
|
|
86
|
+
eval_cache_path: str,
|
|
87
|
+
) -> List[Stat]:
|
|
88
|
+
"""Compute all metrics."""
|
|
89
|
+
stats: List[Stat] = []
|
|
90
|
+
stats.extend(compute_request_state_metrics(self.efficiency_metric, adapter_spec, request_state, metric_service))
|
|
91
|
+
stats.extend(compute_language_modeling_metrics(adapter_spec, request_state, metric_service))
|
|
92
|
+
|
|
93
|
+
return stats
|
|
94
|
+
|
|
95
|
+
def derive_stats(self, stats_dict: Dict[MetricName, Stat]) -> List[Stat]:
|
|
96
|
+
"""Derive perplexity metrics if applicable. We don't worry about splits and perturbations here."""
|
|
97
|
+
derived_stats: List[Stat] = []
|
|
98
|
+
derived_stats.extend(compute_perplexity_metrics(stats_dict))
|
|
99
|
+
return derived_stats
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
from helm.benchmark.adaptation.request_state import RequestState
|
|
4
|
+
from helm.benchmark.metrics.evaluate_instances_metric import EvaluateInstancesMetric
|
|
5
|
+
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
6
|
+
from .metric_name import MetricName
|
|
7
|
+
from .statistic import Stat
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
from sacrebleu.metrics import BLEU
|
|
11
|
+
from langdetect import detect
|
|
12
|
+
except ModuleNotFoundError as e:
|
|
13
|
+
handle_module_not_found_error(e)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class MachineTranslationMetric(EvaluateInstancesMetric):
|
|
17
|
+
"""
|
|
18
|
+
Compute the BLEU score for Machine Translation scenarios. The implementation is based on sacrebleu.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def evaluate_instances(self, request_states: List[RequestState], eval_cache_path: str) -> List[Stat]:
|
|
22
|
+
"""
|
|
23
|
+
Compute the corpus-level metric based on all reqeust_states.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
bleu = BLEU()
|
|
27
|
+
|
|
28
|
+
refs: List[List[str]] = [[]]
|
|
29
|
+
sys: List = []
|
|
30
|
+
for request_state in request_states:
|
|
31
|
+
# Assume there is one referece per instance. TODO: Support multiple references after adding more scenarios.
|
|
32
|
+
num_references: int = len(request_state.instance.references)
|
|
33
|
+
if num_references != 1:
|
|
34
|
+
raise ValueError(f"This instance has {num_references} references, but we currently only support one.")
|
|
35
|
+
# Usually there is only one completion for each instance.
|
|
36
|
+
assert request_state.result is not None
|
|
37
|
+
if len(request_state.result.completions) != 1:
|
|
38
|
+
raise ValueError("Each request result should have only exactly one completion.")
|
|
39
|
+
sys.append(request_state.result.completions[0].text)
|
|
40
|
+
refs[0].append(request_state.instance.references[0].output.text)
|
|
41
|
+
bleu_score = bleu.corpus_score(sys, refs).score
|
|
42
|
+
return [Stat(MetricName("bleu")).add(bleu_score)]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class CLEVAMachineTranslationMetric(EvaluateInstancesMetric):
|
|
46
|
+
"""
|
|
47
|
+
Compute the BLEU score for Machine Translation scenarios of CLEVA benchmark.
|
|
48
|
+
Based on sacrebleu, this implementation distinguishes target language and allows variable number of references.
|
|
49
|
+
If there are more than one hypothesis, only the first one is adopted in the calculation.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def evaluate_instances(self, request_states: List[RequestState], eval_cache_path: str) -> List[Stat]:
|
|
53
|
+
"""
|
|
54
|
+
Compute the corpus-level metric based on all reqeust_states.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def detect_language(request_states: List[RequestState]) -> str:
|
|
58
|
+
"""
|
|
59
|
+
Determine the target language by detecting the language of references.
|
|
60
|
+
Currently, it only distinguishes if the target language is Chinese.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
corpus: str = "".join(
|
|
64
|
+
[request_state.instance.references[0].output.text for request_state in request_states[:10]]
|
|
65
|
+
)
|
|
66
|
+
if detect(corpus) in ["zh-cn", "zh-tw"]:
|
|
67
|
+
return "zh"
|
|
68
|
+
else:
|
|
69
|
+
return "13a" # Default tokenizer for sacrebleu.BLEU
|
|
70
|
+
|
|
71
|
+
bleu = BLEU(tokenize=detect_language(request_states))
|
|
72
|
+
|
|
73
|
+
max_num_references: int = max([len(request_state.instance.references) for request_state in request_states])
|
|
74
|
+
refs: List[List[str]] = [
|
|
75
|
+
[
|
|
76
|
+
request_state.instance.references[i].output.text if i < len(request_state.instance.references) else ""
|
|
77
|
+
for request_state in request_states
|
|
78
|
+
]
|
|
79
|
+
for i in range(max_num_references)
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
sys: List = []
|
|
83
|
+
for request_state in request_states:
|
|
84
|
+
assert request_state.result is not None
|
|
85
|
+
sys.append(request_state.result.completions[0].text)
|
|
86
|
+
|
|
87
|
+
bleu_score = bleu.corpus_score(sys, refs).score
|
|
88
|
+
|
|
89
|
+
return [Stat(MetricName("cleva_machine_translation_bleu")).add(bleu_score)]
|