crfm-helm 0.5.6__py3-none-any.whl → 0.5.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crfm-helm might be problematic. Click here for more details.
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.10.dist-info}/METADATA +72 -130
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.10.dist-info}/RECORD +372 -305
- helm/benchmark/adaptation/adapter_spec.py +10 -0
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +11 -3
- helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +11 -8
- helm/benchmark/annotation/aci_bench_annotator.py +11 -22
- helm/benchmark/annotation/air_bench_annotator.py +1 -1
- helm/benchmark/annotation/alrage_annotator.py +90 -0
- helm/benchmark/annotation/chw_care_plan_annotator.py +10 -21
- helm/benchmark/annotation/dischargeme_annotator.py +11 -22
- helm/benchmark/annotation/live_qa_annotator.py +1 -1
- helm/benchmark/annotation/med_dialog_annotator.py +11 -22
- helm/benchmark/annotation/medalign_annotator.py +11 -22
- helm/benchmark/annotation/medi_qa_annotator.py +11 -22
- helm/benchmark/annotation/medication_qa_annotator.py +11 -22
- helm/benchmark/annotation/mental_health_annotator.py +11 -22
- helm/benchmark/annotation/mimic_bhc_annotator.py +11 -22
- helm/benchmark/annotation/mimic_rrs_annotator.py +11 -22
- helm/benchmark/annotation/model_as_judge.py +23 -18
- helm/benchmark/annotation/mtsamples_procedures_annotator.py +11 -22
- helm/benchmark/annotation/mtsamples_replicate_annotator.py +11 -22
- helm/benchmark/annotation/starr_patient_instructions_annotator.py +11 -22
- helm/benchmark/metrics/air_bench_metrics.py +3157 -1
- helm/benchmark/metrics/alrage_metric.py +35 -0
- helm/benchmark/metrics/basic_metrics.py +267 -2
- helm/benchmark/metrics/bbq_metrics.py +12 -0
- helm/benchmark/metrics/classification_metrics.py +19 -1
- helm/benchmark/metrics/codeinsights_code_efficiency_metrics.py +186 -0
- helm/benchmark/metrics/codeinsights_code_evaluation_metrics.py +477 -0
- helm/benchmark/metrics/codeinsights_correct_code_metrics.py +366 -0
- helm/benchmark/metrics/codeinsights_edge_case_metrics.py +92 -0
- helm/benchmark/metrics/codeinsights_metric_specs.py +51 -0
- helm/benchmark/metrics/comet_metric.py +1 -1
- helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +12 -1
- helm/benchmark/metrics/copyright_metrics.py +1 -1
- helm/benchmark/metrics/decodingtrust_stereotype_bias_metrics.py +1 -1
- helm/benchmark/metrics/dry_run_metrics.py +30 -1
- helm/benchmark/metrics/efficiency_metrics.py +74 -0
- helm/benchmark/metrics/ehr_sql_metrics.py +57 -1
- helm/benchmark/metrics/evaluate_reference_metrics.py +312 -1
- helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +13 -1
- helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +13 -1
- helm/benchmark/metrics/ifeval_metrics.py +13 -1
- helm/benchmark/metrics/image_generation/clip_score_metrics.py +13 -2
- helm/benchmark/metrics/image_generation/fractal_dimension/fractal_dimension_util.py +1 -1
- helm/benchmark/metrics/instruction_following_critique_metrics.py +41 -1
- helm/benchmark/metrics/kpi_edgar_metrics.py +21 -0
- helm/benchmark/metrics/language_modeling_metrics.py +13 -1
- helm/benchmark/metrics/live_qa_metrics.py +13 -1
- helm/benchmark/metrics/llm_jury_metrics.py +13 -1
- helm/benchmark/metrics/lmkt_metric_specs.py +12 -0
- helm/benchmark/metrics/lmkt_metrics.py +47 -0
- helm/benchmark/metrics/medcalc_bench_metrics.py +14 -1
- helm/benchmark/metrics/medec_metrics.py +25 -2
- helm/benchmark/metrics/melt_toxicity_metric.py +1 -1
- helm/benchmark/metrics/metric.py +25 -0
- helm/benchmark/metrics/mimiciv_billing_code_metrics.py +32 -1
- helm/benchmark/metrics/omni_math_metrics.py +13 -1
- helm/benchmark/metrics/safety_metrics.py +13 -1
- helm/benchmark/metrics/seahelm_metrics.py +14 -1
- helm/benchmark/metrics/summac/model_summac.py +3 -3
- helm/benchmark/metrics/summarization_metrics.py +129 -1
- helm/benchmark/metrics/toxicity_metrics.py +31 -1
- helm/benchmark/metrics/ultra_suite_asr_classification_metrics.py +52 -0
- helm/benchmark/metrics/wildbench_metrics.py +21 -1
- helm/benchmark/model_deployment_registry.py +11 -19
- helm/benchmark/presentation/create_plots.py +11 -2
- helm/benchmark/presentation/run_display.py +13 -3
- helm/benchmark/presentation/run_entry.py +2 -2
- helm/benchmark/presentation/schema.py +10 -22
- helm/benchmark/presentation/summarize.py +189 -14
- helm/benchmark/presentation/taxonomy_info.py +20 -0
- helm/benchmark/presentation/test_create_plots.py +4 -1
- helm/benchmark/run.py +15 -4
- helm/benchmark/run_expander.py +4 -0
- helm/benchmark/run_specs/arabic_run_specs.py +197 -0
- helm/benchmark/run_specs/bluex_run_specs.py +40 -0
- helm/benchmark/run_specs/classic_run_specs.py +2 -55
- helm/benchmark/run_specs/codeinsights_run_specs.py +192 -0
- helm/benchmark/run_specs/healthqa_br_run_specs.py +40 -0
- helm/benchmark/run_specs/heim_run_specs.py +3 -1
- helm/benchmark/run_specs/lmkt_run_specs.py +144 -0
- helm/benchmark/run_specs/long_context_run_specs.py +48 -1
- helm/benchmark/run_specs/medhelm/__init__.py +0 -0
- helm/benchmark/run_specs/medhelm/benchmark_config.py +219 -0
- helm/benchmark/run_specs/medhelm_run_specs.py +363 -53
- helm/benchmark/run_specs/multilingual_run_specs.py +50 -0
- helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +11 -13
- helm/benchmark/runner.py +7 -0
- helm/benchmark/scenarios/aci_bench_scenario.py +23 -0
- helm/benchmark/scenarios/air_bench_scenario.py +21 -0
- helm/benchmark/scenarios/alghafa_scenario.py +126 -0
- helm/benchmark/scenarios/alrage_scenario.py +54 -0
- helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +23 -1
- helm/benchmark/scenarios/anthropic_red_team_scenario.py +12 -1
- helm/benchmark/scenarios/arabic_exams_scenario.py +114 -0
- helm/benchmark/scenarios/arabic_mmlu_scenario.py +82 -0
- helm/benchmark/scenarios/aratrust_scenario.py +95 -0
- helm/benchmark/scenarios/audio_language/casual_conversations2_scenario.py +1 -1
- helm/benchmark/scenarios/audio_language/mustard_scenario.py +1 -1
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification_scenario.py +74 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_transcription_scenario.py +70 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +22 -53
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +21 -21
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +21 -52
- helm/benchmark/scenarios/babi_qa_scenario.py +15 -0
- helm/benchmark/scenarios/banking77_scenario.py +21 -0
- helm/benchmark/scenarios/bbq_scenario.py +15 -0
- helm/benchmark/scenarios/best_chatgpt_prompts.yaml +473 -0
- helm/benchmark/scenarios/bird_sql_scenario.py +18 -0
- helm/benchmark/scenarios/bluex_scenario.py +70 -0
- helm/benchmark/scenarios/bold_scenario.py +15 -0
- helm/benchmark/scenarios/boolq_scenario.py +20 -0
- helm/benchmark/scenarios/chw_care_plan_scenario.py +23 -0
- helm/benchmark/scenarios/civil_comments_scenario.py +13 -0
- helm/benchmark/scenarios/clear_scenario.py +23 -0
- helm/benchmark/scenarios/cleva_scenario.py +480 -1
- helm/benchmark/scenarios/code_scenario.py +28 -0
- helm/benchmark/scenarios/codeinsights_code_efficiency_scenario.py +197 -0
- helm/benchmark/scenarios/codeinsights_correct_code_scenario.py +78 -0
- helm/benchmark/scenarios/codeinsights_edge_case_scenario.py +192 -0
- helm/benchmark/scenarios/codeinsights_student_coding_scenario.py +162 -0
- helm/benchmark/scenarios/codeinsights_student_mistake_scenario.py +188 -0
- helm/benchmark/scenarios/commonsense_scenario.py +32 -0
- helm/benchmark/scenarios/compositional_instructions.yaml +70 -0
- helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +21 -0
- helm/benchmark/scenarios/copyright_scenario.py +35 -1
- helm/benchmark/scenarios/cti_to_mitre_scenario.py +21 -0
- helm/benchmark/scenarios/czech_bank_qa_scenario.py +18 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +22 -1
- helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +23 -1
- helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +22 -1
- helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +21 -1
- helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +13 -0
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +13 -1
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +13 -1
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +13 -1
- helm/benchmark/scenarios/dischargeme_scenario.py +24 -0
- helm/benchmark/scenarios/disinformation_scenario.py +22 -0
- helm/benchmark/scenarios/dyck_language_scenario.py +15 -0
- helm/benchmark/scenarios/ehrshot_scenario.py +22 -0
- helm/benchmark/scenarios/enem_challenge_scenario.py +19 -0
- helm/benchmark/scenarios/entity_data_imputation_scenario.py +14 -0
- helm/benchmark/scenarios/entity_matching_scenario.py +14 -0
- helm/benchmark/scenarios/exams_multilingual_scenario.py +115 -0
- helm/benchmark/scenarios/fin_qa_scenario.py +20 -0
- helm/benchmark/scenarios/financebench_scenario.py +21 -0
- helm/benchmark/scenarios/financial_phrasebank_scenario.py +21 -0
- helm/benchmark/scenarios/gold_commodity_news_scenario.py +21 -0
- helm/benchmark/scenarios/gpqa_scenario.py +18 -0
- helm/benchmark/scenarios/grammar_scenario.py +20 -1
- helm/benchmark/scenarios/gsm_scenario.py +21 -0
- helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +12 -1
- helm/benchmark/scenarios/harm_bench_scenario.py +12 -1
- helm/benchmark/scenarios/headqa_scenario.py +22 -0
- helm/benchmark/scenarios/healthqa_br_scenario.py +80 -0
- helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +13 -0
- helm/benchmark/scenarios/ice_scenario.py +21 -1
- helm/benchmark/scenarios/ifeval_scenario.py +18 -0
- helm/benchmark/scenarios/imdb_scenario.py +15 -0
- helm/benchmark/scenarios/infinite_bench_en_mc_scenario.py +111 -0
- helm/benchmark/scenarios/infinite_bench_en_qa_scenario.py +1 -1
- helm/benchmark/scenarios/infinite_bench_en_sum_scenario.py +19 -0
- helm/benchmark/scenarios/koala_scenario.py +21 -1
- helm/benchmark/scenarios/kpi_edgar_scenario.py +21 -0
- helm/benchmark/scenarios/legal_contract_summarization_scenario.py +20 -0
- helm/benchmark/scenarios/legal_summarization_scenario.py +50 -0
- helm/benchmark/scenarios/legal_support_scenario.py +13 -0
- helm/benchmark/scenarios/legalbench_scenario.py +19 -0
- helm/benchmark/scenarios/lex_glue_scenario.py +11 -0
- helm/benchmark/scenarios/lextreme_scenario.py +11 -0
- helm/benchmark/scenarios/lmkt_scenarios.py +288 -0
- helm/benchmark/scenarios/lsat_qa_scenario.py +14 -0
- helm/benchmark/scenarios/madinah_qa_scenario.py +73 -0
- helm/benchmark/scenarios/math_scenario.py +54 -20
- helm/benchmark/scenarios/mbzuai_human_translated_arabic_mmlu.py +68 -0
- helm/benchmark/scenarios/med_dialog_scenario.py +32 -1
- helm/benchmark/scenarios/med_mcqa_scenario.py +14 -0
- helm/benchmark/scenarios/med_qa_scenario.py +20 -0
- helm/benchmark/scenarios/medalign_scenario.py +23 -0
- helm/benchmark/scenarios/medalign_scenario_helper.py +19 -125
- helm/benchmark/scenarios/medbullets_scenario.py +22 -0
- helm/benchmark/scenarios/medcalc_bench_scenario.py +22 -0
- helm/benchmark/scenarios/medec_scenario.py +23 -0
- helm/benchmark/scenarios/medhallu_scenario.py +23 -0
- helm/benchmark/scenarios/medhelm/__init__.py +0 -0
- helm/benchmark/scenarios/medhelm/judges.yaml +14 -0
- helm/benchmark/scenarios/medhelm_configurable_scenario.py +101 -0
- helm/benchmark/scenarios/medi_qa_scenario.py +24 -1
- helm/benchmark/scenarios/medication_qa_scenario.py +31 -1
- helm/benchmark/scenarios/melt_scenarios.py +2 -2
- helm/benchmark/scenarios/mental_health_scenario.py +23 -0
- helm/benchmark/scenarios/mimic_bhc_scenario.py +25 -1
- helm/benchmark/scenarios/mimic_rrs_scenario.py +23 -0
- helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +22 -0
- helm/benchmark/scenarios/mmlu_pro_scenario.py +18 -0
- helm/benchmark/scenarios/mmlu_scenario.py +21 -0
- helm/benchmark/scenarios/mmmlu_scenario.py +85 -0
- helm/benchmark/scenarios/msmarco_scenario.py +30 -0
- helm/benchmark/scenarios/mtsamples_procedures_scenario.py +22 -0
- helm/benchmark/scenarios/mtsamples_replicate_scenario.py +22 -0
- helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +20 -0
- helm/benchmark/scenarios/narrativeqa_scenario.py +19 -0
- helm/benchmark/scenarios/natural_qa_scenario.py +32 -0
- helm/benchmark/scenarios/omni_math_scenario.py +18 -0
- helm/benchmark/scenarios/open_assistant_scenario.py +22 -0
- helm/benchmark/scenarios/openai_mrcr_scenario.py +15 -0
- helm/benchmark/scenarios/pubmed_qa_scenario.py +22 -0
- helm/benchmark/scenarios/quac_scenario.py +14 -0
- helm/benchmark/scenarios/race_based_med_scenario.py +23 -0
- helm/benchmark/scenarios/raft_scenario.py +15 -0
- helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +14 -1
- helm/benchmark/scenarios/ruler_qa_scenarios.py +40 -0
- helm/benchmark/scenarios/scenario.py +31 -0
- helm/benchmark/scenarios/seahelm_scenario.py +350 -2
- helm/benchmark/scenarios/self_instruct_scenario.py +29 -1
- helm/benchmark/scenarios/shc_bmt_scenario.py +22 -0
- helm/benchmark/scenarios/shc_cdi_scenario.py +20 -0
- helm/benchmark/scenarios/shc_conf_scenario.py +23 -0
- helm/benchmark/scenarios/shc_ent_scenario.py +21 -0
- helm/benchmark/scenarios/shc_gip_scenario.py +20 -0
- helm/benchmark/scenarios/shc_privacy_scenario.py +22 -0
- helm/benchmark/scenarios/shc_proxy_scenario.py +23 -1
- helm/benchmark/scenarios/shc_ptbm_scenario.py +23 -0
- helm/benchmark/scenarios/shc_sequoia_scenario.py +21 -0
- helm/benchmark/scenarios/simple_safety_tests_scenario.py +12 -1
- helm/benchmark/scenarios/situation_prompts.yaml +49 -0
- helm/benchmark/scenarios/spider_scenario.py +18 -0
- helm/benchmark/scenarios/starr_patient_instructions_scenario.py +22 -0
- helm/benchmark/scenarios/summarization_scenario.py +37 -0
- helm/benchmark/scenarios/synthetic_efficiency_scenario.py +22 -1
- helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +13 -0
- helm/benchmark/scenarios/test_alghafa_scenario.py +29 -0
- helm/benchmark/scenarios/test_alrage_scenario.py +23 -0
- helm/benchmark/scenarios/test_arabic_exams_scenario.py +21 -0
- helm/benchmark/scenarios/test_aratrust_scenario.py +21 -0
- helm/benchmark/scenarios/test_bluex_scenario.py +59 -0
- helm/benchmark/scenarios/test_exams_multilingual_scenario.py +29 -0
- helm/benchmark/scenarios/test_healtha_br_scenario.py +57 -0
- helm/benchmark/scenarios/thai_exam_scenario.py +95 -0
- helm/benchmark/scenarios/the_pile_scenario.py +13 -1
- helm/benchmark/scenarios/truthful_qa_scenario.py +14 -0
- helm/benchmark/scenarios/twitter_aae_scenario.py +20 -1
- helm/benchmark/scenarios/vicuna_scenario.py +21 -1
- helm/benchmark/scenarios/wikifact_scenario.py +20 -0
- helm/benchmark/scenarios/wildbench_scenario.py +18 -0
- helm/benchmark/scenarios/wmt_14_scenario.py +19 -0
- helm/benchmark/slurm_jobs.py +1 -2
- helm/benchmark/slurm_runner.py +8 -1
- helm/benchmark/static/schema_arabic.yaml +271 -0
- helm/benchmark/static/schema_classic.yaml +0 -17
- helm/benchmark/static/schema_long_context.yaml +17 -18
- helm/benchmark/static/schema_medhelm.yaml +36 -0
- helm/benchmark/static/schema_slp.yaml +219 -0
- helm/benchmark/static_build/assets/audio-table-Dn5NMMeJ.png +0 -0
- helm/benchmark/static_build/assets/index-oIeiQW2g.css +1 -0
- helm/benchmark/static_build/assets/index-qOFpOyHb.js +10 -0
- helm/benchmark/static_build/assets/react-BteFIppM.js +85 -0
- helm/benchmark/static_build/assets/recharts-DxuQtTOs.js +97 -0
- helm/benchmark/static_build/assets/tremor-DR4fE7ko.js +10 -0
- helm/benchmark/static_build/index.html +5 -6
- helm/benchmark/window_services/image_generation/clip_window_service.py +1 -3
- helm/clients/ai21_client.py +2 -0
- helm/clients/aleph_alpha_client.py +2 -0
- helm/clients/anthropic_client.py +7 -1
- helm/clients/audio_language/diva_llama_client.py +2 -0
- helm/clients/audio_language/llama_omni/arguments.py +61 -0
- helm/clients/audio_language/llama_omni/constants.py +9 -0
- helm/clients/audio_language/llama_omni/conversation.py +213 -0
- helm/clients/audio_language/llama_omni/model/__init__.py +0 -0
- helm/clients/audio_language/llama_omni/model/builder.py +88 -0
- helm/clients/audio_language/llama_omni/model/language_model/omni_speech2s_llama.py +190 -0
- helm/clients/audio_language/llama_omni/model/language_model/omni_speech_llama.py +118 -0
- helm/clients/audio_language/llama_omni/model/omni_speech_arch.py +249 -0
- helm/clients/audio_language/llama_omni/model/speech_encoder/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_encoder/speech_encoder.py +27 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/generation.py +622 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/speech_generator.py +104 -0
- helm/clients/audio_language/llama_omni/model/speech_projector/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_projector/speech_projector.py +27 -0
- helm/clients/audio_language/llama_omni/preprocess.py +295 -0
- helm/clients/audio_language/llama_omni/utils.py +202 -0
- helm/clients/audio_language/llama_omni_client.py +2 -1
- helm/clients/audio_language/qwen2_5_omni_client.py +21 -8
- helm/clients/audio_language/qwen2_audiolm_client.py +2 -1
- helm/clients/audio_language/qwen_audiolm_client.py +2 -1
- helm/clients/audio_language/qwen_omni/configuration_qwen2_5_omni.py +519 -0
- helm/clients/audio_language/qwen_omni/modeling_qwen2_5_omni.py +4308 -0
- helm/clients/audio_language/qwen_omni/processing_qwen2_5_omni.py +270 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/__init__.py +0 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/__init__.py +8 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/audio_process.py +56 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/vision_process.py +380 -0
- helm/clients/bedrock_client.py +63 -6
- helm/clients/cohere_client.py +3 -0
- helm/clients/dspy_client.py +135 -0
- helm/clients/google_client.py +2 -0
- helm/clients/http_model_client.py +2 -0
- helm/clients/huggingface_client.py +4 -3
- helm/clients/ibm_client.py +3 -1
- helm/clients/image_generation/adobe_vision_client.py +2 -0
- helm/clients/image_generation/aleph_alpha_image_generation_client.py +2 -0
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +1 -1
- helm/clients/image_generation/cogview2_client.py +2 -1
- helm/clients/image_generation/dalle2_client.py +2 -0
- helm/clients/image_generation/dalle_mini_client.py +2 -1
- helm/clients/image_generation/deep_floyd_client.py +2 -0
- helm/clients/image_generation/huggingface_diffusers_client.py +2 -1
- helm/clients/image_generation/lexica_client.py +2 -0
- helm/clients/image_generation/mindalle/models/stage1/layers.py +2 -2
- helm/clients/image_generation/mindalle_client.py +2 -1
- helm/clients/image_generation/together_image_generation_client.py +2 -0
- helm/clients/megatron_client.py +2 -0
- helm/clients/mistral_client.py +2 -0
- helm/clients/moderation_api_client.py +2 -0
- helm/clients/openai_client.py +38 -21
- helm/clients/openai_responses_client.py +34 -8
- helm/clients/openrouter_client.py +31 -0
- helm/clients/palmyra_client.py +2 -1
- helm/clients/reka_client.py +2 -1
- helm/clients/stanfordhealthcare_azure_openai_client.py +2 -2
- helm/clients/stanfordhealthcare_http_model_client.py +2 -0
- helm/clients/test_huggingface_client.py +3 -3
- helm/clients/test_openrouter_client.py +69 -0
- helm/clients/together_client.py +52 -13
- helm/clients/vertexai_client.py +23 -11
- helm/clients/vision_language/huggingface_vision2seq_client.py +2 -1
- helm/clients/vision_language/huggingface_vlm_client.py +2 -0
- helm/clients/vision_language/idefics_client.py +2 -1
- helm/clients/vision_language/open_flamingo_client.py +2 -1
- helm/clients/vision_language/paligemma_client.py +2 -1
- helm/clients/vision_language/palmyra_vision_client.py +2 -0
- helm/clients/vision_language/qwen2_vlm_client.py +2 -1
- helm/clients/vision_language/qwen_vlm_client.py +2 -1
- helm/clients/vllm_client.py +43 -7
- helm/clients/vllm_granite_thinking_client.py +56 -0
- helm/clients/writer_client.py +5 -2
- helm/common/critique_request.py +0 -1
- helm/common/hierarchical_logger.py +103 -34
- helm/common/object_spec.py +23 -8
- helm/common/optional_dependencies.py +1 -1
- helm/common/test_general.py +4 -0
- helm/common/test_logging.py +94 -0
- helm/config/model_deployments.yaml +1001 -187
- helm/config/model_metadata.yaml +602 -18
- helm/config/tokenizer_configs.yaml +202 -5
- helm/proxy/cli.py +1 -1
- helm/proxy/example_queries.py +8 -8
- helm/proxy/retry.py +5 -0
- helm/proxy/server.py +2 -1
- helm/proxy/static/index.css +4 -0
- helm/proxy/static/index.js +7 -1
- helm/tokenizers/auto_tokenizer.py +2 -2
- helm/tokenizers/grok_tokenizer.py +2 -0
- helm/benchmark/metrics/aci_bench_metrics.py +0 -14
- helm/benchmark/metrics/chw_care_plan_metrics.py +0 -14
- helm/benchmark/metrics/dischargeme_metrics.py +0 -14
- helm/benchmark/metrics/med_dialog_metrics.py +0 -14
- helm/benchmark/metrics/medalign_metrics.py +0 -14
- helm/benchmark/metrics/medi_qa_metrics.py +0 -14
- helm/benchmark/metrics/medication_qa_metrics.py +0 -14
- helm/benchmark/metrics/mental_health_metrics.py +0 -14
- helm/benchmark/metrics/mimic_bhc_metrics.py +0 -14
- helm/benchmark/metrics/mimic_rrs_metrics.py +0 -14
- helm/benchmark/metrics/mtsamples_procedures_metrics.py +0 -14
- helm/benchmark/metrics/mtsamples_replicate_metrics.py +0 -14
- helm/benchmark/metrics/numeracy_metrics.py +0 -72
- helm/benchmark/metrics/starr_patient_instructions_metrics.py +0 -14
- helm/benchmark/metrics/test_numeracy_metrics.py +0 -95
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification.py +0 -103
- helm/benchmark/scenarios/numeracy_scenario.py +0 -794
- helm/benchmark/static_build/assets/index-94295e78.js +0 -10
- helm/benchmark/static_build/assets/index-b9779128.css +0 -1
- helm/benchmark/static_build/assets/react-f82877fd.js +0 -85
- helm/benchmark/static_build/assets/recharts-4037aff0.js +0 -97
- helm/benchmark/static_build/assets/tremor-38a10867.js +0 -10
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.10.dist-info}/WHEEL +0 -0
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.10.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.10.dist-info}/licenses/LICENSE +0 -0
- {crfm_helm-0.5.6.dist-info → crfm_helm-0.5.10.dist-info}/top_level.txt +0 -0
- /helm/benchmark/static_build/assets/{air-overview-d2e6c49f.png → air-overview-DpBbyagA.png} +0 -0
- /helm/benchmark/static_build/assets/{crfm-logo-74391ab8.png → crfm-logo-Du4T1uWZ.png} +0 -0
- /helm/benchmark/static_build/assets/{heim-logo-3e5e3aa4.png → heim-logo-BJtQlEbV.png} +0 -0
- /helm/benchmark/static_build/assets/{helm-logo-simple-2ed5400b.png → helm-logo-simple-DzOhNN41.png} +0 -0
- /helm/benchmark/static_build/assets/{helm-safety-2907a7b6.png → helm-safety-COfndXuS.png} +0 -0
- /helm/benchmark/static_build/assets/{helmhero-28e90f4d.png → helmhero-D9TvmJsp.png} +0 -0
- /helm/benchmark/static_build/assets/{medhelm-overview-eac29843.png → medhelm-overview-CND0EIsy.png} +0 -0
- /helm/benchmark/static_build/assets/{medhelm-v1-overview-3ddfcd65.png → medhelm-v1-overview-Cu2tphBB.png} +0 -0
- /helm/benchmark/static_build/assets/{overview-74aea3d8.png → overview-BwypNWnk.png} +0 -0
- /helm/benchmark/static_build/assets/{process-flow-bd2eba96.png → process-flow-DWDJC733.png} +0 -0
- /helm/benchmark/static_build/assets/{vhelm-aspects-1437d673.png → vhelm-aspects-NiDQofvP.png} +0 -0
- /helm/benchmark/static_build/assets/{vhelm-framework-a1ca3f3f.png → vhelm-framework-NxJE4fdA.png} +0 -0
- /helm/benchmark/static_build/assets/{vhelm-model-8afb7616.png → vhelm-model-ypCL5Yvq.png} +0 -0
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import logging
|
|
5
|
+
import math
|
|
6
|
+
import os
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
import warnings
|
|
10
|
+
from functools import lru_cache
|
|
11
|
+
from io import BytesIO
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
import torch
|
|
15
|
+
import torchvision
|
|
16
|
+
from packaging import version
|
|
17
|
+
from PIL import Image
|
|
18
|
+
from torchvision import io, transforms
|
|
19
|
+
from torchvision.transforms import InterpolationMode
|
|
20
|
+
from typing import List, Optional, Union
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
IMAGE_FACTOR = 28
|
|
26
|
+
MIN_PIXELS = 4 * 28 * 28
|
|
27
|
+
MAX_PIXELS = 16384 * 28 * 28
|
|
28
|
+
MAX_RATIO = 200
|
|
29
|
+
|
|
30
|
+
VIDEO_MIN_PIXELS = 128 * 28 * 28
|
|
31
|
+
VIDEO_MAX_PIXELS = 768 * 28 * 28
|
|
32
|
+
FRAME_FACTOR = 2
|
|
33
|
+
FPS = 2.0
|
|
34
|
+
FPS_MIN_FRAMES = 4
|
|
35
|
+
FPS_MAX_FRAMES = 768
|
|
36
|
+
|
|
37
|
+
# Set the maximum number of video token inputs.
|
|
38
|
+
# Here, 128K represents the maximum number of input tokens for the VLLM model.
|
|
39
|
+
# Remember to adjust it according to your own configuration.
|
|
40
|
+
VIDEO_TOTAL_PIXELS = int(float(os.environ.get("VIDEO_MAX_PIXELS", 128000 * 28 * 28 * 0.9)))
|
|
41
|
+
logger.info(f"set VIDEO_TOTAL_PIXELS: {VIDEO_TOTAL_PIXELS}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def round_by_factor(number: int, factor: int) -> int:
|
|
45
|
+
"""Returns the closest integer to 'number' that is divisible by 'factor'."""
|
|
46
|
+
return round(number / factor) * factor
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def ceil_by_factor(number: int, factor: int) -> int:
|
|
50
|
+
"""Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
|
|
51
|
+
return math.ceil(number / factor) * factor
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def floor_by_factor(number: int, factor: int) -> int:
|
|
55
|
+
"""Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
|
|
56
|
+
return math.floor(number / factor) * factor
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def smart_resize(
|
|
60
|
+
height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS
|
|
61
|
+
) -> tuple[int, int]:
|
|
62
|
+
"""
|
|
63
|
+
Rescales the image so that the following conditions are met:
|
|
64
|
+
|
|
65
|
+
1. Both dimensions (height and width) are divisible by 'factor'.
|
|
66
|
+
|
|
67
|
+
2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
|
|
68
|
+
|
|
69
|
+
3. The aspect ratio of the image is maintained as closely as possible.
|
|
70
|
+
"""
|
|
71
|
+
if max(height, width) / min(height, width) > MAX_RATIO:
|
|
72
|
+
raise ValueError(
|
|
73
|
+
f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}"
|
|
74
|
+
)
|
|
75
|
+
h_bar = max(factor, round_by_factor(height, factor))
|
|
76
|
+
w_bar = max(factor, round_by_factor(width, factor))
|
|
77
|
+
if h_bar * w_bar > max_pixels:
|
|
78
|
+
beta = math.sqrt((height * width) / max_pixels)
|
|
79
|
+
h_bar = floor_by_factor(int(height / beta), factor)
|
|
80
|
+
w_bar = floor_by_factor(int(width / beta), factor)
|
|
81
|
+
elif h_bar * w_bar < min_pixels:
|
|
82
|
+
beta = math.sqrt(min_pixels / (height * width))
|
|
83
|
+
h_bar = ceil_by_factor(int(height * beta), factor)
|
|
84
|
+
w_bar = ceil_by_factor(int(width * beta), factor)
|
|
85
|
+
return h_bar, w_bar
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def to_rgb(pil_image: Image.Image) -> Image.Image:
|
|
89
|
+
if pil_image.mode == "RGBA":
|
|
90
|
+
white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
|
|
91
|
+
white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask
|
|
92
|
+
return white_background
|
|
93
|
+
else:
|
|
94
|
+
return pil_image.convert("RGB")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def fetch_image(ele, size_factor: int = IMAGE_FACTOR) -> Image.Image:
|
|
98
|
+
if "image" in ele:
|
|
99
|
+
image = ele["image"]
|
|
100
|
+
else:
|
|
101
|
+
image = ele["image_url"]
|
|
102
|
+
image_obj = None
|
|
103
|
+
if isinstance(image, Image.Image):
|
|
104
|
+
image_obj = image
|
|
105
|
+
elif image.startswith("http://") or image.startswith("https://"):
|
|
106
|
+
response = requests.get(image, stream=True)
|
|
107
|
+
image_obj = Image.open(BytesIO(response.content))
|
|
108
|
+
elif image.startswith("file://"):
|
|
109
|
+
image_obj = Image.open(image[7:])
|
|
110
|
+
elif image.startswith("data:image"):
|
|
111
|
+
if "base64," in image:
|
|
112
|
+
_, base64_data = image.split("base64,", 1)
|
|
113
|
+
data = base64.b64decode(base64_data)
|
|
114
|
+
image_obj = Image.open(BytesIO(data))
|
|
115
|
+
else:
|
|
116
|
+
image_obj = Image.open(image)
|
|
117
|
+
if image_obj is None:
|
|
118
|
+
raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
|
|
119
|
+
image = to_rgb(image_obj)
|
|
120
|
+
# resize
|
|
121
|
+
if "resized_height" in ele and "resized_width" in ele:
|
|
122
|
+
resized_height, resized_width = smart_resize(
|
|
123
|
+
int(ele["resized_height"]),
|
|
124
|
+
int(ele["resized_width"]),
|
|
125
|
+
factor=size_factor,
|
|
126
|
+
)
|
|
127
|
+
else:
|
|
128
|
+
width, height = image.size
|
|
129
|
+
min_pixels = int(ele.get("min_pixels", MIN_PIXELS))
|
|
130
|
+
max_pixels = int(ele.get("max_pixels", MAX_PIXELS))
|
|
131
|
+
resized_height, resized_width = smart_resize(
|
|
132
|
+
height,
|
|
133
|
+
width,
|
|
134
|
+
factor=size_factor,
|
|
135
|
+
min_pixels=min_pixels,
|
|
136
|
+
max_pixels=max_pixels,
|
|
137
|
+
)
|
|
138
|
+
image = image.resize((resized_width, resized_height))
|
|
139
|
+
|
|
140
|
+
return image
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def smart_nframes(
|
|
144
|
+
ele: dict,
|
|
145
|
+
total_frames: int,
|
|
146
|
+
video_fps: Union[int, float],
|
|
147
|
+
) -> int:
|
|
148
|
+
"""calculate the number of frames for video used for model inputs.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
ele (dict): a dict contains the configuration of video.
|
|
152
|
+
support either `fps` or `nframes`:
|
|
153
|
+
- nframes: the number of frames to extract for model inputs.
|
|
154
|
+
- fps: the fps to extract frames for model inputs.
|
|
155
|
+
- min_frames: the minimum number of frames of the video, only used when fps is provided.
|
|
156
|
+
- max_frames: the maximum number of frames of the video, only used when fps is provided.
|
|
157
|
+
total_frames (int): the original total number of frames of the video.
|
|
158
|
+
video_fps (int | float): the original fps of the video.
|
|
159
|
+
|
|
160
|
+
Raises:
|
|
161
|
+
ValueError: nframes should in interval [FRAME_FACTOR, total_frames].
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
int: the number of frames for video used for model inputs.
|
|
165
|
+
"""
|
|
166
|
+
assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`"
|
|
167
|
+
if "nframes" in ele:
|
|
168
|
+
nframes = round_by_factor(ele["nframes"], FRAME_FACTOR)
|
|
169
|
+
else:
|
|
170
|
+
fps = ele.get("fps", FPS)
|
|
171
|
+
min_frames = ceil_by_factor(ele.get("min_frames", FPS_MIN_FRAMES), FRAME_FACTOR)
|
|
172
|
+
max_frames = floor_by_factor(ele.get("max_frames", min(FPS_MAX_FRAMES, total_frames)), FRAME_FACTOR)
|
|
173
|
+
nframes = total_frames / video_fps * fps
|
|
174
|
+
if nframes > total_frames:
|
|
175
|
+
logger.warning(f"smart_nframes: nframes[{nframes}] > total_frames[{total_frames}]")
|
|
176
|
+
nframes = min(min(max(nframes, min_frames), max_frames), total_frames)
|
|
177
|
+
nframes = floor_by_factor(nframes, FRAME_FACTOR)
|
|
178
|
+
if not (FRAME_FACTOR <= nframes and nframes <= total_frames):
|
|
179
|
+
raise ValueError(f"nframes should in interval [{FRAME_FACTOR}, {total_frames}], but got {nframes}.")
|
|
180
|
+
return nframes
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _read_video_torchvision(
|
|
184
|
+
ele: dict,
|
|
185
|
+
):
|
|
186
|
+
"""read video using torchvision.io.read_video
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
ele (dict): a dict contains the configuration of video.
|
|
190
|
+
support keys:
|
|
191
|
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
|
192
|
+
- video_start: the start time of video.
|
|
193
|
+
- video_end: the end time of video.
|
|
194
|
+
Returns:
|
|
195
|
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
|
196
|
+
"""
|
|
197
|
+
video_path = ele["video"]
|
|
198
|
+
if version.parse(torchvision.__version__) < version.parse("0.19.0"):
|
|
199
|
+
if "http://" in video_path or "https://" in video_path:
|
|
200
|
+
warnings.warn("torchvision < 0.19.0 does not support http/https video path, please upgrade to 0.19.0.")
|
|
201
|
+
if "file://" in video_path:
|
|
202
|
+
video_path = video_path[7:]
|
|
203
|
+
st = time.time()
|
|
204
|
+
video, audio, info = io.read_video(
|
|
205
|
+
video_path,
|
|
206
|
+
start_pts=ele.get("video_start", 0.0),
|
|
207
|
+
end_pts=ele.get("video_end", None),
|
|
208
|
+
pts_unit="sec",
|
|
209
|
+
output_format="TCHW",
|
|
210
|
+
)
|
|
211
|
+
total_frames, video_fps = video.size(0), info["video_fps"]
|
|
212
|
+
logger.info(f"torchvision: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
|
|
213
|
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
|
214
|
+
idx = torch.linspace(0, total_frames - 1, nframes).round().long()
|
|
215
|
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
|
216
|
+
video = video[idx]
|
|
217
|
+
return video, sample_fps
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def is_decord_available() -> bool:
|
|
221
|
+
import importlib.util
|
|
222
|
+
|
|
223
|
+
return importlib.util.find_spec("decord") is not None
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _read_video_decord(
|
|
227
|
+
ele: dict,
|
|
228
|
+
):
|
|
229
|
+
"""read video using decord.VideoReader
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
ele (dict): a dict contains the configuration of video.
|
|
233
|
+
support keys:
|
|
234
|
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
|
235
|
+
- video_start: the start time of video.
|
|
236
|
+
- video_end: the end time of video.
|
|
237
|
+
Returns:
|
|
238
|
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
|
239
|
+
"""
|
|
240
|
+
import decord
|
|
241
|
+
|
|
242
|
+
video_path = ele["video"]
|
|
243
|
+
st = time.time()
|
|
244
|
+
vr = decord.VideoReader(video_path)
|
|
245
|
+
# TODO: support start_pts and end_pts
|
|
246
|
+
if "video_start" in ele or "video_end" in ele:
|
|
247
|
+
raise NotImplementedError("not support start_pts and end_pts in decord for now.")
|
|
248
|
+
total_frames, video_fps = len(vr), vr.get_avg_fps()
|
|
249
|
+
logger.info(f"decord: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
|
|
250
|
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
|
251
|
+
idx = torch.linspace(0, total_frames - 1, nframes).round().long().tolist()
|
|
252
|
+
video = vr.get_batch(idx).asnumpy()
|
|
253
|
+
video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
|
|
254
|
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
|
255
|
+
return video, sample_fps
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
VIDEO_READER_BACKENDS = {
|
|
259
|
+
"decord": _read_video_decord,
|
|
260
|
+
"torchvision": _read_video_torchvision,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
FORCE_QWENVL_VIDEO_READER = os.getenv("FORCE_QWENVL_VIDEO_READER", None)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
@lru_cache(maxsize=1)
|
|
267
|
+
def get_video_reader_backend() -> str:
|
|
268
|
+
if FORCE_QWENVL_VIDEO_READER is not None:
|
|
269
|
+
video_reader_backend = FORCE_QWENVL_VIDEO_READER
|
|
270
|
+
elif is_decord_available():
|
|
271
|
+
video_reader_backend = "decord"
|
|
272
|
+
else:
|
|
273
|
+
video_reader_backend = "torchvision"
|
|
274
|
+
print(f"qwen-vl-utils using {video_reader_backend} to read video.", file=sys.stderr)
|
|
275
|
+
return video_reader_backend
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def fetch_video(ele: dict, image_factor: int = IMAGE_FACTOR, return_video_sample_fps: bool = False):
|
|
279
|
+
if isinstance(ele["video"], str):
|
|
280
|
+
video_reader_backend = get_video_reader_backend()
|
|
281
|
+
try:
|
|
282
|
+
video, sample_fps = VIDEO_READER_BACKENDS[video_reader_backend](ele)
|
|
283
|
+
except Exception as e:
|
|
284
|
+
logger.warning(f"video_reader_backend {video_reader_backend} error, use torchvision as default, msg: {e}")
|
|
285
|
+
video, sample_fps = VIDEO_READER_BACKENDS["torchvision"](ele)
|
|
286
|
+
|
|
287
|
+
nframes, _, height, width = video.shape
|
|
288
|
+
min_pixels = ele.get("min_pixels", VIDEO_MIN_PIXELS)
|
|
289
|
+
total_pixels = ele.get("total_pixels", VIDEO_TOTAL_PIXELS)
|
|
290
|
+
max_pixels = max(min(VIDEO_MAX_PIXELS, total_pixels / nframes * FRAME_FACTOR), int(min_pixels * 1.05))
|
|
291
|
+
max_pixels_supposed = ele.get("max_pixels", max_pixels)
|
|
292
|
+
if max_pixels_supposed > max_pixels:
|
|
293
|
+
logger.warning(f"The given max_pixels[{max_pixels_supposed}] exceeds limit[{max_pixels}].")
|
|
294
|
+
max_pixels = min(max_pixels_supposed, max_pixels)
|
|
295
|
+
if "resized_height" in ele and "resized_width" in ele:
|
|
296
|
+
resized_height, resized_width = smart_resize(
|
|
297
|
+
ele["resized_height"],
|
|
298
|
+
ele["resized_width"],
|
|
299
|
+
factor=image_factor,
|
|
300
|
+
)
|
|
301
|
+
else:
|
|
302
|
+
resized_height, resized_width = smart_resize(
|
|
303
|
+
height,
|
|
304
|
+
width,
|
|
305
|
+
factor=image_factor,
|
|
306
|
+
min_pixels=min_pixels,
|
|
307
|
+
max_pixels=max_pixels,
|
|
308
|
+
)
|
|
309
|
+
video = transforms.functional.resize(
|
|
310
|
+
video,
|
|
311
|
+
[resized_height, resized_width],
|
|
312
|
+
interpolation=InterpolationMode.BICUBIC,
|
|
313
|
+
antialias=True,
|
|
314
|
+
).float()
|
|
315
|
+
if return_video_sample_fps:
|
|
316
|
+
return video, sample_fps
|
|
317
|
+
return video
|
|
318
|
+
else:
|
|
319
|
+
assert isinstance(ele["video"], (list, tuple))
|
|
320
|
+
process_info = ele.copy()
|
|
321
|
+
process_info.pop("type", None)
|
|
322
|
+
process_info.pop("video", None)
|
|
323
|
+
images = [
|
|
324
|
+
fetch_image({"image": video_element, **process_info}, size_factor=image_factor)
|
|
325
|
+
for video_element in ele["video"]
|
|
326
|
+
]
|
|
327
|
+
nframes = ceil_by_factor(len(images), FRAME_FACTOR)
|
|
328
|
+
if len(images) < nframes:
|
|
329
|
+
images.extend([images[-1]] * (nframes - len(images)))
|
|
330
|
+
if return_video_sample_fps:
|
|
331
|
+
return images, process_info.pop("fps", 2.0)
|
|
332
|
+
return images
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def extract_vision_info(conversations) -> list[dict]:
|
|
336
|
+
vision_infos = []
|
|
337
|
+
if isinstance(conversations[0], dict):
|
|
338
|
+
conversations_p = [conversations]
|
|
339
|
+
for conversation in conversations_p:
|
|
340
|
+
for message in conversation:
|
|
341
|
+
if isinstance(message["content"], list):
|
|
342
|
+
for ele in message["content"]:
|
|
343
|
+
if (
|
|
344
|
+
"image" in ele
|
|
345
|
+
or "image_url" in ele
|
|
346
|
+
or "video" in ele
|
|
347
|
+
or ele["type"] in ("image", "image_url", "video")
|
|
348
|
+
):
|
|
349
|
+
vision_infos.append(ele)
|
|
350
|
+
return vision_infos
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def process_vision_info(
|
|
354
|
+
conversations: list[dict] | list[list[dict]],
|
|
355
|
+
return_video_kwargs: bool = False,
|
|
356
|
+
):
|
|
357
|
+
|
|
358
|
+
vision_infos = extract_vision_info(conversations)
|
|
359
|
+
# Read images or videos
|
|
360
|
+
image_inputs: Optional[List] = []
|
|
361
|
+
video_inputs: Optional[List] = []
|
|
362
|
+
video_sample_fps_list = []
|
|
363
|
+
for vision_info in vision_infos:
|
|
364
|
+
if "image" in vision_info or "image_url" in vision_info:
|
|
365
|
+
assert image_inputs is not None
|
|
366
|
+
image_inputs.append(fetch_image(vision_info))
|
|
367
|
+
elif "video" in vision_info:
|
|
368
|
+
assert video_inputs is not None
|
|
369
|
+
video_input, video_sample_fps = fetch_video(vision_info, return_video_sample_fps=True)
|
|
370
|
+
video_sample_fps_list.append(video_sample_fps)
|
|
371
|
+
video_inputs.append(video_input)
|
|
372
|
+
else:
|
|
373
|
+
raise ValueError("image, image_url or video should in content.")
|
|
374
|
+
if image_inputs is not None and len(image_inputs) == 0:
|
|
375
|
+
image_inputs = None
|
|
376
|
+
if video_inputs is not None and len(video_inputs) == 0:
|
|
377
|
+
video_inputs = None
|
|
378
|
+
if return_video_kwargs:
|
|
379
|
+
return image_inputs, video_inputs, {"fps": video_sample_fps_list}
|
|
380
|
+
return image_inputs, video_inputs
|
helm/clients/bedrock_client.py
CHANGED
|
@@ -7,8 +7,10 @@ from datetime import datetime
|
|
|
7
7
|
|
|
8
8
|
from helm.common.cache import CacheConfig
|
|
9
9
|
from helm.clients.client import CachingClient, truncate_and_tokenize_response_text
|
|
10
|
+
from helm.common.hierarchical_logger import hexception
|
|
10
11
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
11
12
|
from helm.clients.bedrock_utils import get_bedrock_client, get_bedrock_client_v1
|
|
13
|
+
from helm.proxy.retry import NonRetriableException
|
|
12
14
|
from helm.tokenizers.tokenizer import Tokenizer
|
|
13
15
|
|
|
14
16
|
|
|
@@ -41,6 +43,7 @@ class BedrockClient(CachingClient):
|
|
|
41
43
|
tokenizer_name: str,
|
|
42
44
|
assumed_role: Optional[str] = None,
|
|
43
45
|
region: Optional[str] = None,
|
|
46
|
+
bedrock_model_id: Optional[str] = None,
|
|
44
47
|
):
|
|
45
48
|
super().__init__(cache_config=cache_config)
|
|
46
49
|
self.tokenizer = tokenizer
|
|
@@ -49,15 +52,19 @@ class BedrockClient(CachingClient):
|
|
|
49
52
|
assumed_role=assumed_role or os.environ.get("BEDROCK_ASSUME_ROLE", None),
|
|
50
53
|
region=region,
|
|
51
54
|
)
|
|
55
|
+
self.bedrock_model_id = bedrock_model_id
|
|
52
56
|
|
|
53
57
|
def make_request(self, request: Request) -> RequestResult:
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
# check if model_name starts with "amazon-"
|
|
57
|
-
if self.model_provider == "amazon":
|
|
58
|
-
model_id = f"{self.model_provider}.{model_name}"
|
|
58
|
+
if self.bedrock_model_id:
|
|
59
|
+
model_id = self.bedrock_model_id
|
|
59
60
|
else:
|
|
60
|
-
model_id
|
|
61
|
+
# model_id should be something like "amazon.titan-tg1-large", replace amazon- prefix with model creator name
|
|
62
|
+
model_name = request.model.split("/")[-1]
|
|
63
|
+
# check if model_name starts with "amazon-"
|
|
64
|
+
if self.model_provider == "amazon":
|
|
65
|
+
model_id = f"{self.model_provider}.{model_name}"
|
|
66
|
+
else:
|
|
67
|
+
model_id = model_name.replace("amazon-", f"{self.model_provider}.")
|
|
61
68
|
|
|
62
69
|
raw_request = self.convert_request_to_raw_request(request)
|
|
63
70
|
|
|
@@ -75,6 +82,7 @@ class BedrockClient(CachingClient):
|
|
|
75
82
|
response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
|
|
76
83
|
|
|
77
84
|
except Exception as error:
|
|
85
|
+
hexception(error)
|
|
78
86
|
return RequestResult(
|
|
79
87
|
success=False,
|
|
80
88
|
cached=False,
|
|
@@ -322,3 +330,52 @@ class BedrockLlamaClient(BedrockClient):
|
|
|
322
330
|
completions.append(completion)
|
|
323
331
|
|
|
324
332
|
return completions
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
class BedrockPalmyraClient(BedrockClient):
|
|
336
|
+
"""Amazon Bedrock model for Palmyra models"""
|
|
337
|
+
|
|
338
|
+
_COMPLETION_REASON_TO_FINISH_REASON = {
|
|
339
|
+
"length": "length",
|
|
340
|
+
"stop": "endoftext",
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
model_provider = "writer"
|
|
344
|
+
|
|
345
|
+
def _get_messages_from_request(self, request: Request) -> List[Dict]:
|
|
346
|
+
if request.prompt and request.messages:
|
|
347
|
+
raise NonRetriableException(f"Only one of `prompt` and `messages` may be set in request: {request}")
|
|
348
|
+
if request.multimodal_prompt:
|
|
349
|
+
raise NonRetriableException("`multimodal_prompt` is not supported by WriterClient")
|
|
350
|
+
if request.messages:
|
|
351
|
+
return [{"role": message["role"], "content": message["content"]} for message in request.messages]
|
|
352
|
+
else:
|
|
353
|
+
return [{"role": "user", "content": request.prompt}]
|
|
354
|
+
|
|
355
|
+
def convert_request_to_raw_request(self, request: Request) -> Dict:
|
|
356
|
+
return {
|
|
357
|
+
"messages": self._get_messages_from_request(request),
|
|
358
|
+
"temperature": request.temperature,
|
|
359
|
+
"top_p": request.top_p,
|
|
360
|
+
"max_tokens": request.max_tokens,
|
|
361
|
+
"stop": request.stop_sequences,
|
|
362
|
+
"seed": request.random,
|
|
363
|
+
"presence_penalty": request.presence_penalty,
|
|
364
|
+
"frequency_penalty": request.frequency_penalty,
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
def convert_raw_response_to_completions(self, response: Dict, request: Request) -> List[GeneratedOutput]:
|
|
368
|
+
completions: List[GeneratedOutput] = []
|
|
369
|
+
if response.get("object") == "error":
|
|
370
|
+
raise Exception(f"BedrockPalmyraClient request failed: {response['message']}")
|
|
371
|
+
for choice in response["choices"]:
|
|
372
|
+
assert choice["message"]["role"] == "assistant"
|
|
373
|
+
output_text = choice["message"]["content"]
|
|
374
|
+
finish_reason = BedrockPalmyraClient._COMPLETION_REASON_TO_FINISH_REASON.get(
|
|
375
|
+
choice["finish_reason"], choice["finish_reason"].lower()
|
|
376
|
+
)
|
|
377
|
+
completion = truncate_and_tokenize_response_text(
|
|
378
|
+
output_text, request, self.tokenizer, self.tokenizer_name, finish_reason
|
|
379
|
+
)
|
|
380
|
+
completions.append(completion)
|
|
381
|
+
return completions
|
helm/clients/cohere_client.py
CHANGED
|
@@ -3,6 +3,7 @@ import requests
|
|
|
3
3
|
from typing import List, Optional, Sequence, TypedDict
|
|
4
4
|
|
|
5
5
|
from helm.common.cache import CacheConfig
|
|
6
|
+
from helm.common.hierarchical_logger import hexception
|
|
6
7
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
7
8
|
from helm.common.request import (
|
|
8
9
|
wrap_request_time,
|
|
@@ -123,6 +124,7 @@ class CohereClient(CachingClient):
|
|
|
123
124
|
|
|
124
125
|
response, cached = self.cache.get(raw_request, wrap_request_time(do_it))
|
|
125
126
|
except (requests.exceptions.RequestException, AssertionError) as e:
|
|
127
|
+
hexception(e)
|
|
126
128
|
error: str = f"CohereClient error: {e}"
|
|
127
129
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
128
130
|
|
|
@@ -232,6 +234,7 @@ class CohereChatClient(CachingClient):
|
|
|
232
234
|
|
|
233
235
|
response, cached = self.cache.get(raw_request, wrap_request_time(do_it))
|
|
234
236
|
except (requests.exceptions.RequestException, AssertionError) as e:
|
|
237
|
+
hexception(e)
|
|
235
238
|
error: str = f"CohereClient error: {e}"
|
|
236
239
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
237
240
|
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import hashlib
|
|
2
|
+
import tempfile
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import dspy
|
|
10
|
+
except ModuleNotFoundError as e:
|
|
11
|
+
handle_module_not_found_error(e, ["dspy"])
|
|
12
|
+
|
|
13
|
+
from helm.benchmark.runner import _get_current_run_spec_name
|
|
14
|
+
from helm.clients.client import Client
|
|
15
|
+
from helm.common.cache import Cache, CacheConfig
|
|
16
|
+
from helm.common.general import ensure_file_downloaded
|
|
17
|
+
from helm.common.hierarchical_logger import hlog
|
|
18
|
+
from helm.common.request import Request, RequestResult, GeneratedOutput
|
|
19
|
+
from helm.proxy.retry import NonRetriableException
|
|
20
|
+
from helm.tokenizers.tokenizer import Tokenizer
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DSPyClient(Client):
|
|
24
|
+
"""
|
|
25
|
+
A HELM client that uses DSPy for inference instead of directly calling the model.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
cache_config: CacheConfig,
|
|
31
|
+
tokenizer: Tokenizer,
|
|
32
|
+
tokenizer_name: str,
|
|
33
|
+
model_name: Optional[str] = None,
|
|
34
|
+
dspy_agent_url: Optional[str] = None,
|
|
35
|
+
dspy_module: Optional[str] = None,
|
|
36
|
+
dspy_api_model: Optional[str] = None,
|
|
37
|
+
dspy_api_base: Optional[str] = None,
|
|
38
|
+
api_key: Optional[str] = None,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initializes the DSPyClient.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
cache_config (CacheConfig): Configuration for caching.
|
|
45
|
+
tokenizer (Tokenizer): Tokenizer instance (unused but required by HELM interface).
|
|
46
|
+
tokenizer_name (str): Name of the tokenizer (unused but required by HELM interface).
|
|
47
|
+
model_name (str): The official model name used within HELM.
|
|
48
|
+
dspy_agent_url (str): URL for the DSPy agent JSON configuration.
|
|
49
|
+
dspy_module (str): The module to use with DSPy (Predict, ChainOfThought).
|
|
50
|
+
dspy_api_model (str): The actual model name (API) to use with DSPy.
|
|
51
|
+
dspy_api_base (str): Base URL for the model API.
|
|
52
|
+
api_key (str): API key for the DSPy model provider.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
if not model_name:
|
|
56
|
+
raise NonRetriableException("Please specify the model name in model_deployments.yaml")
|
|
57
|
+
if not dspy_api_model:
|
|
58
|
+
raise NonRetriableException("Please specify dspy_api_model in model_deployments.yaml")
|
|
59
|
+
|
|
60
|
+
if ("o3-mini" in model_name) or ("deepseek-r1" in model_name):
|
|
61
|
+
self.lm = dspy.LM(
|
|
62
|
+
model=dspy_api_model, api_base=dspy_api_base, api_key=api_key, temperature=1.0, max_tokens=100000
|
|
63
|
+
)
|
|
64
|
+
else:
|
|
65
|
+
self.lm = dspy.LM(model=dspy_api_model, api_base=dspy_api_base, api_key=api_key, temperature=0.0)
|
|
66
|
+
|
|
67
|
+
run_spec_name = _get_current_run_spec_name()
|
|
68
|
+
self.scenario_name = run_spec_name.split(":")[0] if run_spec_name else "unknown"
|
|
69
|
+
self.model_name = model_name
|
|
70
|
+
self.dspy_agent_url_template = dspy_agent_url
|
|
71
|
+
self.dspy_module = dspy_module
|
|
72
|
+
self.dspy_api_model = dspy_api_model
|
|
73
|
+
self.dspy_api_base = dspy_api_base
|
|
74
|
+
self.api_key = api_key
|
|
75
|
+
self.cache_dir = Path(tempfile.gettempdir()) / "dspy_agent_cache"
|
|
76
|
+
self.cache_dir.mkdir(exist_ok=True)
|
|
77
|
+
self._current_cache_file = None
|
|
78
|
+
self._load_agent_for_scenario()
|
|
79
|
+
self.cache = Cache(cache_config) if cache_config else None
|
|
80
|
+
|
|
81
|
+
def _get_cache_file(self, url):
|
|
82
|
+
url_hash = hashlib.md5(url.encode()).hexdigest()
|
|
83
|
+
return self.cache_dir / f"agent_{url_hash}.json"
|
|
84
|
+
|
|
85
|
+
def _load_agent_for_scenario(self):
|
|
86
|
+
if self.dspy_module == "ChainOfThought":
|
|
87
|
+
self.agent = dspy.ChainOfThought("inputs -> output")
|
|
88
|
+
elif self.dspy_module == "Predict":
|
|
89
|
+
self.agent = dspy.Predict("inputs -> output")
|
|
90
|
+
else:
|
|
91
|
+
raise ValueError(f"Unknown dspy_module: {self.dspy_module}")
|
|
92
|
+
|
|
93
|
+
dspy_agent_url = (
|
|
94
|
+
self.dspy_agent_url_template.format(scenario=self.scenario_name) if self.dspy_agent_url_template else None
|
|
95
|
+
)
|
|
96
|
+
if dspy_agent_url:
|
|
97
|
+
cache_file = self._get_cache_file(dspy_agent_url)
|
|
98
|
+
if self._current_cache_file == cache_file:
|
|
99
|
+
return
|
|
100
|
+
try:
|
|
101
|
+
ensure_file_downloaded(source_url=dspy_agent_url, target_path=str(cache_file))
|
|
102
|
+
self.agent.load(str(cache_file))
|
|
103
|
+
self._current_cache_file = cache_file
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
raise NonRetriableException(f"Failed to load DSPy agent from URL {dspy_agent_url}: {str(e)}")
|
|
107
|
+
hlog(
|
|
108
|
+
f"DSPy client initialized - HELM Model: {self.model_name}, DSPy Model: {self.dspy_api_model}, API Base: {self.dspy_api_base}, API Key: {'***' if self.api_key else None}, DSPy Agent: {dspy_agent_url}, DSPy Module: {self.dspy_module}"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
def make_request(self, request: Request) -> RequestResult:
|
|
112
|
+
run_spec_name = _get_current_run_spec_name()
|
|
113
|
+
current_scenario = run_spec_name.split(":")[0] if run_spec_name else "unknown"
|
|
114
|
+
if current_scenario != self.scenario_name:
|
|
115
|
+
self.scenario_name = current_scenario
|
|
116
|
+
if self.dspy_agent_url_template:
|
|
117
|
+
self._load_agent_for_scenario()
|
|
118
|
+
else:
|
|
119
|
+
hlog(
|
|
120
|
+
f"DSPy client initialized - HELM Model: {self.model_name}, DSPy Model: {self.dspy_api_model}, API Base: {self.dspy_api_base}, API Key: {'***' if self.api_key else None}, DSPy Agent: None, DSPy Module: {self.dspy_module}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
prompt_text = request.prompt
|
|
124
|
+
if request.messages:
|
|
125
|
+
prompt_text = " ".join(msg["content"] for msg in request.messages if msg.get("role") != "system")
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
with dspy.context(lm=self.lm):
|
|
129
|
+
prediction = self.agent(inputs=prompt_text)
|
|
130
|
+
output_text = prediction.output if hasattr(prediction, "output") else str(prediction)
|
|
131
|
+
except Exception as e:
|
|
132
|
+
return RequestResult(success=False, cached=False, completions=[], embedding=[], error=str(e))
|
|
133
|
+
|
|
134
|
+
output = GeneratedOutput(text=output_text, logprob=0.0, tokens=[])
|
|
135
|
+
return RequestResult(success=True, cached=False, completions=[output], embedding=[])
|
helm/clients/google_client.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Dict
|
|
2
2
|
|
|
3
3
|
from helm.common.cache import CacheConfig
|
|
4
|
+
from helm.common.hierarchical_logger import hexception
|
|
4
5
|
from helm.common.request import Request, RequestResult, GeneratedOutput, Token
|
|
5
6
|
from helm.clients.client import CachingClient, truncate_sequence
|
|
6
7
|
|
|
@@ -44,6 +45,7 @@ class GoogleClient(CachingClient):
|
|
|
44
45
|
# If results are not cached for a given query, fail fast
|
|
45
46
|
response, cached = self.cache.get(cache_key, fail)
|
|
46
47
|
except RuntimeError as e:
|
|
48
|
+
hexception(e)
|
|
47
49
|
error: str = f"GoogleClient error: {e}"
|
|
48
50
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
49
51
|
|
|
@@ -3,6 +3,7 @@ from dataclasses import asdict
|
|
|
3
3
|
from typing import Any, Dict
|
|
4
4
|
|
|
5
5
|
from helm.common.cache import CacheConfig
|
|
6
|
+
from helm.common.hierarchical_logger import hexception
|
|
6
7
|
from helm.common.request import (
|
|
7
8
|
wrap_request_time,
|
|
8
9
|
Request,
|
|
@@ -76,5 +77,6 @@ class HTTPModelClient(CachingClient):
|
|
|
76
77
|
request_time=response["request_time"],
|
|
77
78
|
)
|
|
78
79
|
except requests.exceptions.RequestException as e:
|
|
80
|
+
hexception(e)
|
|
79
81
|
error: str = f"Request error: {e}"
|
|
80
82
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|