crfm-helm 0.5.7__py3-none-any.whl → 0.5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crfm-helm might be problematic. Click here for more details.
- {crfm_helm-0.5.7.dist-info → crfm_helm-0.5.9.dist-info}/METADATA +7 -77
- {crfm_helm-0.5.7.dist-info → crfm_helm-0.5.9.dist-info}/RECORD +315 -282
- helm/benchmark/adaptation/adapter_spec.py +10 -0
- helm/benchmark/adaptation/adapters/multimodal/multiple_choice_joint_multimodal_adapter.py +11 -3
- helm/benchmark/adaptation/adapters/multiple_choice_joint_adapter.py +11 -8
- helm/benchmark/annotation/aci_bench_annotator.py +11 -22
- helm/benchmark/annotation/alrage_annotator.py +90 -0
- helm/benchmark/annotation/chw_care_plan_annotator.py +10 -21
- helm/benchmark/annotation/dischargeme_annotator.py +11 -22
- helm/benchmark/annotation/med_dialog_annotator.py +11 -22
- helm/benchmark/annotation/medalign_annotator.py +11 -22
- helm/benchmark/annotation/medi_qa_annotator.py +11 -22
- helm/benchmark/annotation/medication_qa_annotator.py +11 -22
- helm/benchmark/annotation/mental_health_annotator.py +11 -22
- helm/benchmark/annotation/mimic_bhc_annotator.py +11 -22
- helm/benchmark/annotation/mimic_rrs_annotator.py +11 -22
- helm/benchmark/annotation/model_as_judge.py +23 -18
- helm/benchmark/annotation/mtsamples_procedures_annotator.py +11 -22
- helm/benchmark/annotation/mtsamples_replicate_annotator.py +11 -22
- helm/benchmark/annotation/starr_patient_instructions_annotator.py +11 -22
- helm/benchmark/metrics/air_bench_metrics.py +3157 -1
- helm/benchmark/metrics/alrage_metric.py +35 -0
- helm/benchmark/metrics/basic_metrics.py +267 -2
- helm/benchmark/metrics/bbq_metrics.py +12 -0
- helm/benchmark/metrics/classification_metrics.py +19 -1
- helm/benchmark/metrics/conv_fin_qa_calc_metrics.py +12 -1
- helm/benchmark/metrics/dry_run_metrics.py +30 -1
- helm/benchmark/metrics/efficiency_metrics.py +74 -0
- helm/benchmark/metrics/ehr_sql_metrics.py +57 -1
- helm/benchmark/metrics/evaluate_reference_metrics.py +311 -0
- helm/benchmark/metrics/gpqa_chain_of_thought_metric.py +13 -1
- helm/benchmark/metrics/helpdesk_call_summarization_metrics.py +13 -1
- helm/benchmark/metrics/ifeval_metrics.py +13 -1
- helm/benchmark/metrics/instruction_following_critique_metrics.py +41 -1
- helm/benchmark/metrics/kpi_edgar_metrics.py +21 -0
- helm/benchmark/metrics/language_modeling_metrics.py +13 -1
- helm/benchmark/metrics/live_qa_metrics.py +13 -1
- helm/benchmark/metrics/llm_jury_metrics.py +13 -1
- helm/benchmark/metrics/medcalc_bench_metrics.py +14 -1
- helm/benchmark/metrics/medec_metrics.py +25 -2
- helm/benchmark/metrics/metric.py +25 -0
- helm/benchmark/metrics/mimiciv_billing_code_metrics.py +32 -1
- helm/benchmark/metrics/omni_math_metrics.py +13 -1
- helm/benchmark/metrics/safety_metrics.py +13 -1
- helm/benchmark/metrics/seahelm_metrics.py +14 -1
- helm/benchmark/metrics/summac/model_summac.py +2 -2
- helm/benchmark/metrics/summarization_metrics.py +129 -1
- helm/benchmark/metrics/toxicity_metrics.py +31 -1
- helm/benchmark/metrics/ultra_suite_asr_classification_metrics.py +52 -0
- helm/benchmark/metrics/wildbench_metrics.py +21 -1
- helm/benchmark/presentation/run_display.py +13 -3
- helm/benchmark/presentation/run_entry.py +2 -2
- helm/benchmark/presentation/schema.py +5 -22
- helm/benchmark/presentation/summarize.py +180 -11
- helm/benchmark/presentation/taxonomy_info.py +20 -0
- helm/benchmark/run.py +1 -1
- helm/benchmark/run_expander.py +4 -0
- helm/benchmark/run_specs/arabic_run_specs.py +140 -16
- helm/benchmark/run_specs/bluex_run_specs.py +1 -1
- helm/benchmark/run_specs/classic_run_specs.py +2 -2
- helm/benchmark/run_specs/long_context_run_specs.py +2 -2
- helm/benchmark/run_specs/medhelm/__init__.py +0 -0
- helm/benchmark/run_specs/medhelm/benchmark_config.py +219 -0
- helm/benchmark/run_specs/medhelm_run_specs.py +362 -52
- helm/benchmark/run_specs/speech_disorder_audio_run_specs.py +6 -2
- helm/benchmark/scenarios/aci_bench_scenario.py +23 -0
- helm/benchmark/scenarios/air_bench_scenario.py +21 -0
- helm/benchmark/scenarios/alrage_scenario.py +54 -0
- helm/benchmark/scenarios/anthropic_hh_rlhf_scenario.py +23 -1
- helm/benchmark/scenarios/anthropic_red_team_scenario.py +12 -1
- helm/benchmark/scenarios/arabic_exams_scenario.py +114 -0
- helm/benchmark/scenarios/arabic_mmlu_scenario.py +8 -4
- helm/benchmark/scenarios/aratrust_scenario.py +19 -0
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_classification_scenario.py +24 -54
- helm/benchmark/scenarios/audio_language/ultra_suite_asr_transcription_scenario.py +19 -48
- helm/benchmark/scenarios/audio_language/ultra_suite_classification_scenario.py +22 -61
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_breakdown_scenario.py +21 -29
- helm/benchmark/scenarios/audio_language/ultra_suite_disorder_symptoms_scenario.py +21 -60
- helm/benchmark/scenarios/babi_qa_scenario.py +15 -0
- helm/benchmark/scenarios/banking77_scenario.py +21 -0
- helm/benchmark/scenarios/bbq_scenario.py +15 -0
- helm/benchmark/scenarios/best_chatgpt_prompts.yaml +473 -0
- helm/benchmark/scenarios/bird_sql_scenario.py +18 -0
- helm/benchmark/scenarios/bluex_scenario.py +6 -2
- helm/benchmark/scenarios/bold_scenario.py +15 -0
- helm/benchmark/scenarios/boolq_scenario.py +20 -0
- helm/benchmark/scenarios/chw_care_plan_scenario.py +23 -0
- helm/benchmark/scenarios/civil_comments_scenario.py +13 -0
- helm/benchmark/scenarios/clear_scenario.py +23 -0
- helm/benchmark/scenarios/cleva_scenario.py +479 -0
- helm/benchmark/scenarios/code_scenario.py +28 -0
- helm/benchmark/scenarios/commonsense_scenario.py +32 -0
- helm/benchmark/scenarios/compositional_instructions.yaml +70 -0
- helm/benchmark/scenarios/conv_fin_qa_calc_scenario.py +21 -0
- helm/benchmark/scenarios/copyright_scenario.py +35 -1
- helm/benchmark/scenarios/cti_to_mitre_scenario.py +21 -0
- helm/benchmark/scenarios/czech_bank_qa_scenario.py +18 -0
- helm/benchmark/scenarios/decodingtrust_adv_demonstration_scenario.py +22 -1
- helm/benchmark/scenarios/decodingtrust_adv_robustness_scenario.py +23 -1
- helm/benchmark/scenarios/decodingtrust_fairness_scenario.py +22 -1
- helm/benchmark/scenarios/decodingtrust_machine_ethics_scenario.py +21 -1
- helm/benchmark/scenarios/decodingtrust_ood_robustness_scenario.py +13 -0
- helm/benchmark/scenarios/decodingtrust_privacy_scenario.py +13 -1
- helm/benchmark/scenarios/decodingtrust_stereotype_bias_scenario.py +13 -1
- helm/benchmark/scenarios/decodingtrust_toxicity_prompts_scenario.py +13 -1
- helm/benchmark/scenarios/dischargeme_scenario.py +24 -0
- helm/benchmark/scenarios/disinformation_scenario.py +22 -0
- helm/benchmark/scenarios/dyck_language_scenario.py +15 -0
- helm/benchmark/scenarios/ehrshot_scenario.py +22 -0
- helm/benchmark/scenarios/enem_challenge_scenario.py +19 -0
- helm/benchmark/scenarios/entity_data_imputation_scenario.py +14 -0
- helm/benchmark/scenarios/entity_matching_scenario.py +14 -0
- helm/benchmark/scenarios/fin_qa_scenario.py +20 -0
- helm/benchmark/scenarios/financebench_scenario.py +21 -0
- helm/benchmark/scenarios/financial_phrasebank_scenario.py +21 -0
- helm/benchmark/scenarios/gold_commodity_news_scenario.py +21 -0
- helm/benchmark/scenarios/gpqa_scenario.py +18 -0
- helm/benchmark/scenarios/grammar_scenario.py +20 -1
- helm/benchmark/scenarios/gsm_scenario.py +21 -0
- helm/benchmark/scenarios/harm_bench_gcg_transfer_scenario.py +12 -1
- helm/benchmark/scenarios/harm_bench_scenario.py +12 -1
- helm/benchmark/scenarios/headqa_scenario.py +22 -0
- helm/benchmark/scenarios/helpdesk_call_summarization_scenario.py +13 -0
- helm/benchmark/scenarios/ice_scenario.py +21 -1
- helm/benchmark/scenarios/ifeval_scenario.py +18 -0
- helm/benchmark/scenarios/imdb_scenario.py +15 -0
- helm/benchmark/scenarios/infinite_bench_en_mc_scenario.py +21 -0
- helm/benchmark/scenarios/infinite_bench_en_sum_scenario.py +19 -0
- helm/benchmark/scenarios/koala_scenario.py +21 -1
- helm/benchmark/scenarios/kpi_edgar_scenario.py +21 -0
- helm/benchmark/scenarios/legal_contract_summarization_scenario.py +20 -0
- helm/benchmark/scenarios/legal_summarization_scenario.py +50 -0
- helm/benchmark/scenarios/legal_support_scenario.py +13 -0
- helm/benchmark/scenarios/legalbench_scenario.py +19 -0
- helm/benchmark/scenarios/lex_glue_scenario.py +11 -0
- helm/benchmark/scenarios/lextreme_scenario.py +11 -0
- helm/benchmark/scenarios/lsat_qa_scenario.py +14 -0
- helm/benchmark/scenarios/madinah_qa_scenario.py +73 -0
- helm/benchmark/scenarios/math_scenario.py +33 -0
- helm/benchmark/scenarios/mbzuai_human_translated_arabic_mmlu.py +68 -0
- helm/benchmark/scenarios/med_dialog_scenario.py +32 -1
- helm/benchmark/scenarios/med_mcqa_scenario.py +14 -0
- helm/benchmark/scenarios/med_qa_scenario.py +20 -0
- helm/benchmark/scenarios/medalign_scenario.py +23 -0
- helm/benchmark/scenarios/medbullets_scenario.py +22 -0
- helm/benchmark/scenarios/medcalc_bench_scenario.py +22 -0
- helm/benchmark/scenarios/medec_scenario.py +23 -0
- helm/benchmark/scenarios/medhallu_scenario.py +23 -0
- helm/benchmark/scenarios/medhelm/__init__.py +0 -0
- helm/benchmark/scenarios/medhelm/judges.yaml +14 -0
- helm/benchmark/scenarios/medhelm_configurable_scenario.py +101 -0
- helm/benchmark/scenarios/medi_qa_scenario.py +24 -1
- helm/benchmark/scenarios/medication_qa_scenario.py +31 -1
- helm/benchmark/scenarios/mental_health_scenario.py +23 -0
- helm/benchmark/scenarios/mimic_bhc_scenario.py +24 -0
- helm/benchmark/scenarios/mimic_rrs_scenario.py +23 -0
- helm/benchmark/scenarios/mimiciv_billing_code_scenario.py +22 -0
- helm/benchmark/scenarios/mmlu_pro_scenario.py +18 -0
- helm/benchmark/scenarios/mmlu_scenario.py +21 -0
- helm/benchmark/scenarios/msmarco_scenario.py +30 -0
- helm/benchmark/scenarios/mtsamples_procedures_scenario.py +22 -0
- helm/benchmark/scenarios/mtsamples_replicate_scenario.py +22 -0
- helm/benchmark/scenarios/n2c2_ct_matching_scenario.py +20 -0
- helm/benchmark/scenarios/narrativeqa_scenario.py +19 -0
- helm/benchmark/scenarios/natural_qa_scenario.py +32 -0
- helm/benchmark/scenarios/omni_math_scenario.py +18 -0
- helm/benchmark/scenarios/open_assistant_scenario.py +22 -0
- helm/benchmark/scenarios/openai_mrcr_scenario.py +15 -0
- helm/benchmark/scenarios/pubmed_qa_scenario.py +22 -0
- helm/benchmark/scenarios/quac_scenario.py +14 -0
- helm/benchmark/scenarios/race_based_med_scenario.py +23 -0
- helm/benchmark/scenarios/raft_scenario.py +15 -0
- helm/benchmark/scenarios/real_toxicity_prompts_scenario.py +14 -1
- helm/benchmark/scenarios/ruler_qa_scenarios.py +40 -0
- helm/benchmark/scenarios/scenario.py +31 -0
- helm/benchmark/scenarios/seahelm_scenario.py +348 -0
- helm/benchmark/scenarios/self_instruct_scenario.py +29 -1
- helm/benchmark/scenarios/shc_bmt_scenario.py +22 -0
- helm/benchmark/scenarios/shc_cdi_scenario.py +20 -0
- helm/benchmark/scenarios/shc_conf_scenario.py +23 -0
- helm/benchmark/scenarios/shc_ent_scenario.py +21 -0
- helm/benchmark/scenarios/shc_gip_scenario.py +20 -0
- helm/benchmark/scenarios/shc_privacy_scenario.py +22 -0
- helm/benchmark/scenarios/shc_proxy_scenario.py +22 -0
- helm/benchmark/scenarios/shc_ptbm_scenario.py +23 -0
- helm/benchmark/scenarios/shc_sequoia_scenario.py +21 -0
- helm/benchmark/scenarios/simple_safety_tests_scenario.py +12 -1
- helm/benchmark/scenarios/situation_prompts.yaml +49 -0
- helm/benchmark/scenarios/spider_scenario.py +18 -0
- helm/benchmark/scenarios/starr_patient_instructions_scenario.py +22 -0
- helm/benchmark/scenarios/summarization_scenario.py +37 -0
- helm/benchmark/scenarios/synthetic_efficiency_scenario.py +22 -1
- helm/benchmark/scenarios/synthetic_reasoning_natural_scenario.py +13 -0
- helm/benchmark/scenarios/test_alrage_scenario.py +23 -0
- helm/benchmark/scenarios/test_arabic_exams_scenario.py +21 -0
- helm/benchmark/scenarios/test_aratrust_scenario.py +1 -1
- helm/benchmark/scenarios/test_bluex_scenario.py +2 -2
- helm/benchmark/scenarios/thai_exam_scenario.py +95 -0
- helm/benchmark/scenarios/the_pile_scenario.py +13 -1
- helm/benchmark/scenarios/truthful_qa_scenario.py +14 -0
- helm/benchmark/scenarios/twitter_aae_scenario.py +20 -1
- helm/benchmark/scenarios/vicuna_scenario.py +21 -1
- helm/benchmark/scenarios/wikifact_scenario.py +20 -0
- helm/benchmark/scenarios/wildbench_scenario.py +18 -0
- helm/benchmark/scenarios/wmt_14_scenario.py +19 -0
- helm/benchmark/static/schema_arabic.yaml +55 -12
- helm/benchmark/static/schema_long_context.yaml +11 -30
- helm/benchmark/static/schema_medhelm.yaml +36 -0
- helm/benchmark/static/schema_slp.yaml +219 -0
- helm/benchmark/static_build/assets/audio-table-Dn5NMMeJ.png +0 -0
- helm/benchmark/static_build/assets/index-oIeiQW2g.css +1 -0
- helm/benchmark/static_build/assets/index-qOFpOyHb.js +10 -0
- helm/benchmark/static_build/assets/react-BteFIppM.js +85 -0
- helm/benchmark/static_build/assets/recharts-DxuQtTOs.js +97 -0
- helm/benchmark/static_build/assets/tremor-DR4fE7ko.js +10 -0
- helm/benchmark/static_build/index.html +5 -6
- helm/clients/ai21_client.py +2 -0
- helm/clients/aleph_alpha_client.py +2 -0
- helm/clients/anthropic_client.py +7 -1
- helm/clients/audio_language/diva_llama_client.py +2 -0
- helm/clients/audio_language/llama_omni/arguments.py +61 -0
- helm/clients/audio_language/llama_omni/constants.py +9 -0
- helm/clients/audio_language/llama_omni/conversation.py +213 -0
- helm/clients/audio_language/llama_omni/model/__init__.py +0 -0
- helm/clients/audio_language/llama_omni/model/builder.py +88 -0
- helm/clients/audio_language/llama_omni/model/language_model/omni_speech2s_llama.py +190 -0
- helm/clients/audio_language/llama_omni/model/language_model/omni_speech_llama.py +118 -0
- helm/clients/audio_language/llama_omni/model/omni_speech_arch.py +249 -0
- helm/clients/audio_language/llama_omni/model/speech_encoder/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_encoder/speech_encoder.py +27 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/generation.py +622 -0
- helm/clients/audio_language/llama_omni/model/speech_generator/speech_generator.py +104 -0
- helm/clients/audio_language/llama_omni/model/speech_projector/builder.py +9 -0
- helm/clients/audio_language/llama_omni/model/speech_projector/speech_projector.py +27 -0
- helm/clients/audio_language/llama_omni/preprocess.py +295 -0
- helm/clients/audio_language/llama_omni/utils.py +202 -0
- helm/clients/audio_language/llama_omni_client.py +2 -1
- helm/clients/audio_language/qwen2_5_omni_client.py +2 -1
- helm/clients/audio_language/qwen2_audiolm_client.py +2 -1
- helm/clients/audio_language/qwen_audiolm_client.py +2 -1
- helm/clients/audio_language/qwen_omni/configuration_qwen2_5_omni.py +519 -0
- helm/clients/audio_language/qwen_omni/modeling_qwen2_5_omni.py +4308 -0
- helm/clients/audio_language/qwen_omni/processing_qwen2_5_omni.py +270 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/__init__.py +0 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/__init__.py +8 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/audio_process.py +56 -0
- helm/clients/audio_language/qwen_omni/qwen2_5_omni_utils/v2_5/vision_process.py +380 -0
- helm/clients/bedrock_client.py +2 -0
- helm/clients/cohere_client.py +3 -0
- helm/clients/google_client.py +2 -0
- helm/clients/http_model_client.py +2 -0
- helm/clients/huggingface_client.py +2 -1
- helm/clients/ibm_client.py +3 -1
- helm/clients/image_generation/adobe_vision_client.py +2 -0
- helm/clients/image_generation/aleph_alpha_image_generation_client.py +2 -0
- helm/clients/image_generation/cogview2/sr_pipeline/dsr_model.py +1 -1
- helm/clients/image_generation/cogview2_client.py +2 -1
- helm/clients/image_generation/dalle2_client.py +2 -0
- helm/clients/image_generation/dalle_mini_client.py +2 -1
- helm/clients/image_generation/deep_floyd_client.py +2 -0
- helm/clients/image_generation/huggingface_diffusers_client.py +2 -1
- helm/clients/image_generation/lexica_client.py +2 -0
- helm/clients/image_generation/mindalle/models/stage1/layers.py +2 -2
- helm/clients/image_generation/mindalle_client.py +2 -1
- helm/clients/image_generation/together_image_generation_client.py +2 -0
- helm/clients/megatron_client.py +2 -0
- helm/clients/mistral_client.py +2 -0
- helm/clients/moderation_api_client.py +2 -0
- helm/clients/openai_client.py +36 -20
- helm/clients/openai_responses_client.py +27 -3
- helm/clients/openrouter_client.py +31 -0
- helm/clients/palmyra_client.py +2 -1
- helm/clients/reka_client.py +2 -1
- helm/clients/stanfordhealthcare_azure_openai_client.py +2 -2
- helm/clients/stanfordhealthcare_http_model_client.py +2 -0
- helm/clients/test_openrouter_client.py +69 -0
- helm/clients/together_client.py +52 -11
- helm/clients/vertexai_client.py +12 -2
- helm/clients/vision_language/huggingface_vision2seq_client.py +2 -1
- helm/clients/vision_language/huggingface_vlm_client.py +2 -0
- helm/clients/vision_language/idefics_client.py +2 -1
- helm/clients/vision_language/open_flamingo_client.py +2 -1
- helm/clients/vision_language/paligemma_client.py +2 -1
- helm/clients/vision_language/palmyra_vision_client.py +2 -0
- helm/clients/vision_language/qwen2_vlm_client.py +2 -1
- helm/clients/vision_language/qwen_vlm_client.py +2 -1
- helm/clients/writer_client.py +2 -0
- helm/common/hierarchical_logger.py +20 -0
- helm/common/optional_dependencies.py +1 -1
- helm/common/test_general.py +4 -0
- helm/config/model_deployments.yaml +300 -1
- helm/config/model_metadata.yaml +302 -9
- helm/config/tokenizer_configs.yaml +92 -4
- helm/proxy/example_queries.py +8 -8
- helm/proxy/server.py +2 -1
- helm/proxy/static/index.css +4 -0
- helm/proxy/static/index.js +7 -1
- helm/benchmark/metrics/aci_bench_metrics.py +0 -14
- helm/benchmark/metrics/chw_care_plan_metrics.py +0 -14
- helm/benchmark/metrics/dischargeme_metrics.py +0 -14
- helm/benchmark/metrics/med_dialog_metrics.py +0 -14
- helm/benchmark/metrics/medalign_metrics.py +0 -14
- helm/benchmark/metrics/medi_qa_metrics.py +0 -14
- helm/benchmark/metrics/medication_qa_metrics.py +0 -14
- helm/benchmark/metrics/mental_health_metrics.py +0 -14
- helm/benchmark/metrics/mimic_bhc_metrics.py +0 -14
- helm/benchmark/metrics/mimic_rrs_metrics.py +0 -14
- helm/benchmark/metrics/mtsamples_procedures_metrics.py +0 -14
- helm/benchmark/metrics/mtsamples_replicate_metrics.py +0 -14
- helm/benchmark/metrics/starr_patient_instructions_metrics.py +0 -14
- helm/benchmark/static_build/assets/index-b9779128.css +0 -1
- helm/benchmark/static_build/assets/index-e439d5e1.js +0 -10
- helm/benchmark/static_build/assets/react-f82877fd.js +0 -85
- helm/benchmark/static_build/assets/recharts-4037aff0.js +0 -97
- helm/benchmark/static_build/assets/tremor-38a10867.js +0 -10
- {crfm_helm-0.5.7.dist-info → crfm_helm-0.5.9.dist-info}/WHEEL +0 -0
- {crfm_helm-0.5.7.dist-info → crfm_helm-0.5.9.dist-info}/entry_points.txt +0 -0
- {crfm_helm-0.5.7.dist-info → crfm_helm-0.5.9.dist-info}/licenses/LICENSE +0 -0
- {crfm_helm-0.5.7.dist-info → crfm_helm-0.5.9.dist-info}/top_level.txt +0 -0
- /helm/benchmark/static_build/assets/{air-overview-d2e6c49f.png → air-overview-DpBbyagA.png} +0 -0
- /helm/benchmark/static_build/assets/{crfm-logo-74391ab8.png → crfm-logo-Du4T1uWZ.png} +0 -0
- /helm/benchmark/static_build/assets/{heim-logo-3e5e3aa4.png → heim-logo-BJtQlEbV.png} +0 -0
- /helm/benchmark/static_build/assets/{helm-logo-simple-2ed5400b.png → helm-logo-simple-DzOhNN41.png} +0 -0
- /helm/benchmark/static_build/assets/{helm-safety-2907a7b6.png → helm-safety-COfndXuS.png} +0 -0
- /helm/benchmark/static_build/assets/{helmhero-28e90f4d.png → helmhero-D9TvmJsp.png} +0 -0
- /helm/benchmark/static_build/assets/{medhelm-overview-eac29843.png → medhelm-overview-CND0EIsy.png} +0 -0
- /helm/benchmark/static_build/assets/{medhelm-v1-overview-3ddfcd65.png → medhelm-v1-overview-Cu2tphBB.png} +0 -0
- /helm/benchmark/static_build/assets/{overview-74aea3d8.png → overview-BwypNWnk.png} +0 -0
- /helm/benchmark/static_build/assets/{process-flow-bd2eba96.png → process-flow-DWDJC733.png} +0 -0
- /helm/benchmark/static_build/assets/{vhelm-aspects-1437d673.png → vhelm-aspects-NiDQofvP.png} +0 -0
- /helm/benchmark/static_build/assets/{vhelm-framework-a1ca3f3f.png → vhelm-framework-NxJE4fdA.png} +0 -0
- /helm/benchmark/static_build/assets/{vhelm-model-8afb7616.png → vhelm-model-ypCL5Yvq.png} +0 -0
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import logging
|
|
5
|
+
import math
|
|
6
|
+
import os
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
import warnings
|
|
10
|
+
from functools import lru_cache
|
|
11
|
+
from io import BytesIO
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
import torch
|
|
15
|
+
import torchvision
|
|
16
|
+
from packaging import version
|
|
17
|
+
from PIL import Image
|
|
18
|
+
from torchvision import io, transforms
|
|
19
|
+
from torchvision.transforms import InterpolationMode
|
|
20
|
+
from typing import List, Optional, Union
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
IMAGE_FACTOR = 28
|
|
26
|
+
MIN_PIXELS = 4 * 28 * 28
|
|
27
|
+
MAX_PIXELS = 16384 * 28 * 28
|
|
28
|
+
MAX_RATIO = 200
|
|
29
|
+
|
|
30
|
+
VIDEO_MIN_PIXELS = 128 * 28 * 28
|
|
31
|
+
VIDEO_MAX_PIXELS = 768 * 28 * 28
|
|
32
|
+
FRAME_FACTOR = 2
|
|
33
|
+
FPS = 2.0
|
|
34
|
+
FPS_MIN_FRAMES = 4
|
|
35
|
+
FPS_MAX_FRAMES = 768
|
|
36
|
+
|
|
37
|
+
# Set the maximum number of video token inputs.
|
|
38
|
+
# Here, 128K represents the maximum number of input tokens for the VLLM model.
|
|
39
|
+
# Remember to adjust it according to your own configuration.
|
|
40
|
+
VIDEO_TOTAL_PIXELS = int(float(os.environ.get("VIDEO_MAX_PIXELS", 128000 * 28 * 28 * 0.9)))
|
|
41
|
+
logger.info(f"set VIDEO_TOTAL_PIXELS: {VIDEO_TOTAL_PIXELS}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def round_by_factor(number: int, factor: int) -> int:
|
|
45
|
+
"""Returns the closest integer to 'number' that is divisible by 'factor'."""
|
|
46
|
+
return round(number / factor) * factor
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def ceil_by_factor(number: int, factor: int) -> int:
|
|
50
|
+
"""Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
|
|
51
|
+
return math.ceil(number / factor) * factor
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def floor_by_factor(number: int, factor: int) -> int:
|
|
55
|
+
"""Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
|
|
56
|
+
return math.floor(number / factor) * factor
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def smart_resize(
|
|
60
|
+
height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS
|
|
61
|
+
) -> tuple[int, int]:
|
|
62
|
+
"""
|
|
63
|
+
Rescales the image so that the following conditions are met:
|
|
64
|
+
|
|
65
|
+
1. Both dimensions (height and width) are divisible by 'factor'.
|
|
66
|
+
|
|
67
|
+
2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
|
|
68
|
+
|
|
69
|
+
3. The aspect ratio of the image is maintained as closely as possible.
|
|
70
|
+
"""
|
|
71
|
+
if max(height, width) / min(height, width) > MAX_RATIO:
|
|
72
|
+
raise ValueError(
|
|
73
|
+
f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}"
|
|
74
|
+
)
|
|
75
|
+
h_bar = max(factor, round_by_factor(height, factor))
|
|
76
|
+
w_bar = max(factor, round_by_factor(width, factor))
|
|
77
|
+
if h_bar * w_bar > max_pixels:
|
|
78
|
+
beta = math.sqrt((height * width) / max_pixels)
|
|
79
|
+
h_bar = floor_by_factor(int(height / beta), factor)
|
|
80
|
+
w_bar = floor_by_factor(int(width / beta), factor)
|
|
81
|
+
elif h_bar * w_bar < min_pixels:
|
|
82
|
+
beta = math.sqrt(min_pixels / (height * width))
|
|
83
|
+
h_bar = ceil_by_factor(int(height * beta), factor)
|
|
84
|
+
w_bar = ceil_by_factor(int(width * beta), factor)
|
|
85
|
+
return h_bar, w_bar
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def to_rgb(pil_image: Image.Image) -> Image.Image:
|
|
89
|
+
if pil_image.mode == "RGBA":
|
|
90
|
+
white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
|
|
91
|
+
white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask
|
|
92
|
+
return white_background
|
|
93
|
+
else:
|
|
94
|
+
return pil_image.convert("RGB")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def fetch_image(ele, size_factor: int = IMAGE_FACTOR) -> Image.Image:
|
|
98
|
+
if "image" in ele:
|
|
99
|
+
image = ele["image"]
|
|
100
|
+
else:
|
|
101
|
+
image = ele["image_url"]
|
|
102
|
+
image_obj = None
|
|
103
|
+
if isinstance(image, Image.Image):
|
|
104
|
+
image_obj = image
|
|
105
|
+
elif image.startswith("http://") or image.startswith("https://"):
|
|
106
|
+
response = requests.get(image, stream=True)
|
|
107
|
+
image_obj = Image.open(BytesIO(response.content))
|
|
108
|
+
elif image.startswith("file://"):
|
|
109
|
+
image_obj = Image.open(image[7:])
|
|
110
|
+
elif image.startswith("data:image"):
|
|
111
|
+
if "base64," in image:
|
|
112
|
+
_, base64_data = image.split("base64,", 1)
|
|
113
|
+
data = base64.b64decode(base64_data)
|
|
114
|
+
image_obj = Image.open(BytesIO(data))
|
|
115
|
+
else:
|
|
116
|
+
image_obj = Image.open(image)
|
|
117
|
+
if image_obj is None:
|
|
118
|
+
raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
|
|
119
|
+
image = to_rgb(image_obj)
|
|
120
|
+
# resize
|
|
121
|
+
if "resized_height" in ele and "resized_width" in ele:
|
|
122
|
+
resized_height, resized_width = smart_resize(
|
|
123
|
+
int(ele["resized_height"]),
|
|
124
|
+
int(ele["resized_width"]),
|
|
125
|
+
factor=size_factor,
|
|
126
|
+
)
|
|
127
|
+
else:
|
|
128
|
+
width, height = image.size
|
|
129
|
+
min_pixels = int(ele.get("min_pixels", MIN_PIXELS))
|
|
130
|
+
max_pixels = int(ele.get("max_pixels", MAX_PIXELS))
|
|
131
|
+
resized_height, resized_width = smart_resize(
|
|
132
|
+
height,
|
|
133
|
+
width,
|
|
134
|
+
factor=size_factor,
|
|
135
|
+
min_pixels=min_pixels,
|
|
136
|
+
max_pixels=max_pixels,
|
|
137
|
+
)
|
|
138
|
+
image = image.resize((resized_width, resized_height))
|
|
139
|
+
|
|
140
|
+
return image
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def smart_nframes(
|
|
144
|
+
ele: dict,
|
|
145
|
+
total_frames: int,
|
|
146
|
+
video_fps: Union[int, float],
|
|
147
|
+
) -> int:
|
|
148
|
+
"""calculate the number of frames for video used for model inputs.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
ele (dict): a dict contains the configuration of video.
|
|
152
|
+
support either `fps` or `nframes`:
|
|
153
|
+
- nframes: the number of frames to extract for model inputs.
|
|
154
|
+
- fps: the fps to extract frames for model inputs.
|
|
155
|
+
- min_frames: the minimum number of frames of the video, only used when fps is provided.
|
|
156
|
+
- max_frames: the maximum number of frames of the video, only used when fps is provided.
|
|
157
|
+
total_frames (int): the original total number of frames of the video.
|
|
158
|
+
video_fps (int | float): the original fps of the video.
|
|
159
|
+
|
|
160
|
+
Raises:
|
|
161
|
+
ValueError: nframes should in interval [FRAME_FACTOR, total_frames].
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
int: the number of frames for video used for model inputs.
|
|
165
|
+
"""
|
|
166
|
+
assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`"
|
|
167
|
+
if "nframes" in ele:
|
|
168
|
+
nframes = round_by_factor(ele["nframes"], FRAME_FACTOR)
|
|
169
|
+
else:
|
|
170
|
+
fps = ele.get("fps", FPS)
|
|
171
|
+
min_frames = ceil_by_factor(ele.get("min_frames", FPS_MIN_FRAMES), FRAME_FACTOR)
|
|
172
|
+
max_frames = floor_by_factor(ele.get("max_frames", min(FPS_MAX_FRAMES, total_frames)), FRAME_FACTOR)
|
|
173
|
+
nframes = total_frames / video_fps * fps
|
|
174
|
+
if nframes > total_frames:
|
|
175
|
+
logger.warning(f"smart_nframes: nframes[{nframes}] > total_frames[{total_frames}]")
|
|
176
|
+
nframes = min(min(max(nframes, min_frames), max_frames), total_frames)
|
|
177
|
+
nframes = floor_by_factor(nframes, FRAME_FACTOR)
|
|
178
|
+
if not (FRAME_FACTOR <= nframes and nframes <= total_frames):
|
|
179
|
+
raise ValueError(f"nframes should in interval [{FRAME_FACTOR}, {total_frames}], but got {nframes}.")
|
|
180
|
+
return nframes
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _read_video_torchvision(
|
|
184
|
+
ele: dict,
|
|
185
|
+
):
|
|
186
|
+
"""read video using torchvision.io.read_video
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
ele (dict): a dict contains the configuration of video.
|
|
190
|
+
support keys:
|
|
191
|
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
|
192
|
+
- video_start: the start time of video.
|
|
193
|
+
- video_end: the end time of video.
|
|
194
|
+
Returns:
|
|
195
|
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
|
196
|
+
"""
|
|
197
|
+
video_path = ele["video"]
|
|
198
|
+
if version.parse(torchvision.__version__) < version.parse("0.19.0"):
|
|
199
|
+
if "http://" in video_path or "https://" in video_path:
|
|
200
|
+
warnings.warn("torchvision < 0.19.0 does not support http/https video path, please upgrade to 0.19.0.")
|
|
201
|
+
if "file://" in video_path:
|
|
202
|
+
video_path = video_path[7:]
|
|
203
|
+
st = time.time()
|
|
204
|
+
video, audio, info = io.read_video(
|
|
205
|
+
video_path,
|
|
206
|
+
start_pts=ele.get("video_start", 0.0),
|
|
207
|
+
end_pts=ele.get("video_end", None),
|
|
208
|
+
pts_unit="sec",
|
|
209
|
+
output_format="TCHW",
|
|
210
|
+
)
|
|
211
|
+
total_frames, video_fps = video.size(0), info["video_fps"]
|
|
212
|
+
logger.info(f"torchvision: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
|
|
213
|
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
|
214
|
+
idx = torch.linspace(0, total_frames - 1, nframes).round().long()
|
|
215
|
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
|
216
|
+
video = video[idx]
|
|
217
|
+
return video, sample_fps
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def is_decord_available() -> bool:
|
|
221
|
+
import importlib.util
|
|
222
|
+
|
|
223
|
+
return importlib.util.find_spec("decord") is not None
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _read_video_decord(
|
|
227
|
+
ele: dict,
|
|
228
|
+
):
|
|
229
|
+
"""read video using decord.VideoReader
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
ele (dict): a dict contains the configuration of video.
|
|
233
|
+
support keys:
|
|
234
|
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
|
235
|
+
- video_start: the start time of video.
|
|
236
|
+
- video_end: the end time of video.
|
|
237
|
+
Returns:
|
|
238
|
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
|
239
|
+
"""
|
|
240
|
+
import decord
|
|
241
|
+
|
|
242
|
+
video_path = ele["video"]
|
|
243
|
+
st = time.time()
|
|
244
|
+
vr = decord.VideoReader(video_path)
|
|
245
|
+
# TODO: support start_pts and end_pts
|
|
246
|
+
if "video_start" in ele or "video_end" in ele:
|
|
247
|
+
raise NotImplementedError("not support start_pts and end_pts in decord for now.")
|
|
248
|
+
total_frames, video_fps = len(vr), vr.get_avg_fps()
|
|
249
|
+
logger.info(f"decord: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
|
|
250
|
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
|
251
|
+
idx = torch.linspace(0, total_frames - 1, nframes).round().long().tolist()
|
|
252
|
+
video = vr.get_batch(idx).asnumpy()
|
|
253
|
+
video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
|
|
254
|
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
|
255
|
+
return video, sample_fps
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
VIDEO_READER_BACKENDS = {
|
|
259
|
+
"decord": _read_video_decord,
|
|
260
|
+
"torchvision": _read_video_torchvision,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
FORCE_QWENVL_VIDEO_READER = os.getenv("FORCE_QWENVL_VIDEO_READER", None)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
@lru_cache(maxsize=1)
|
|
267
|
+
def get_video_reader_backend() -> str:
|
|
268
|
+
if FORCE_QWENVL_VIDEO_READER is not None:
|
|
269
|
+
video_reader_backend = FORCE_QWENVL_VIDEO_READER
|
|
270
|
+
elif is_decord_available():
|
|
271
|
+
video_reader_backend = "decord"
|
|
272
|
+
else:
|
|
273
|
+
video_reader_backend = "torchvision"
|
|
274
|
+
print(f"qwen-vl-utils using {video_reader_backend} to read video.", file=sys.stderr)
|
|
275
|
+
return video_reader_backend
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def fetch_video(ele: dict, image_factor: int = IMAGE_FACTOR, return_video_sample_fps: bool = False):
|
|
279
|
+
if isinstance(ele["video"], str):
|
|
280
|
+
video_reader_backend = get_video_reader_backend()
|
|
281
|
+
try:
|
|
282
|
+
video, sample_fps = VIDEO_READER_BACKENDS[video_reader_backend](ele)
|
|
283
|
+
except Exception as e:
|
|
284
|
+
logger.warning(f"video_reader_backend {video_reader_backend} error, use torchvision as default, msg: {e}")
|
|
285
|
+
video, sample_fps = VIDEO_READER_BACKENDS["torchvision"](ele)
|
|
286
|
+
|
|
287
|
+
nframes, _, height, width = video.shape
|
|
288
|
+
min_pixels = ele.get("min_pixels", VIDEO_MIN_PIXELS)
|
|
289
|
+
total_pixels = ele.get("total_pixels", VIDEO_TOTAL_PIXELS)
|
|
290
|
+
max_pixels = max(min(VIDEO_MAX_PIXELS, total_pixels / nframes * FRAME_FACTOR), int(min_pixels * 1.05))
|
|
291
|
+
max_pixels_supposed = ele.get("max_pixels", max_pixels)
|
|
292
|
+
if max_pixels_supposed > max_pixels:
|
|
293
|
+
logger.warning(f"The given max_pixels[{max_pixels_supposed}] exceeds limit[{max_pixels}].")
|
|
294
|
+
max_pixels = min(max_pixels_supposed, max_pixels)
|
|
295
|
+
if "resized_height" in ele and "resized_width" in ele:
|
|
296
|
+
resized_height, resized_width = smart_resize(
|
|
297
|
+
ele["resized_height"],
|
|
298
|
+
ele["resized_width"],
|
|
299
|
+
factor=image_factor,
|
|
300
|
+
)
|
|
301
|
+
else:
|
|
302
|
+
resized_height, resized_width = smart_resize(
|
|
303
|
+
height,
|
|
304
|
+
width,
|
|
305
|
+
factor=image_factor,
|
|
306
|
+
min_pixels=min_pixels,
|
|
307
|
+
max_pixels=max_pixels,
|
|
308
|
+
)
|
|
309
|
+
video = transforms.functional.resize(
|
|
310
|
+
video,
|
|
311
|
+
[resized_height, resized_width],
|
|
312
|
+
interpolation=InterpolationMode.BICUBIC,
|
|
313
|
+
antialias=True,
|
|
314
|
+
).float()
|
|
315
|
+
if return_video_sample_fps:
|
|
316
|
+
return video, sample_fps
|
|
317
|
+
return video
|
|
318
|
+
else:
|
|
319
|
+
assert isinstance(ele["video"], (list, tuple))
|
|
320
|
+
process_info = ele.copy()
|
|
321
|
+
process_info.pop("type", None)
|
|
322
|
+
process_info.pop("video", None)
|
|
323
|
+
images = [
|
|
324
|
+
fetch_image({"image": video_element, **process_info}, size_factor=image_factor)
|
|
325
|
+
for video_element in ele["video"]
|
|
326
|
+
]
|
|
327
|
+
nframes = ceil_by_factor(len(images), FRAME_FACTOR)
|
|
328
|
+
if len(images) < nframes:
|
|
329
|
+
images.extend([images[-1]] * (nframes - len(images)))
|
|
330
|
+
if return_video_sample_fps:
|
|
331
|
+
return images, process_info.pop("fps", 2.0)
|
|
332
|
+
return images
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def extract_vision_info(conversations) -> list[dict]:
|
|
336
|
+
vision_infos = []
|
|
337
|
+
if isinstance(conversations[0], dict):
|
|
338
|
+
conversations_p = [conversations]
|
|
339
|
+
for conversation in conversations_p:
|
|
340
|
+
for message in conversation:
|
|
341
|
+
if isinstance(message["content"], list):
|
|
342
|
+
for ele in message["content"]:
|
|
343
|
+
if (
|
|
344
|
+
"image" in ele
|
|
345
|
+
or "image_url" in ele
|
|
346
|
+
or "video" in ele
|
|
347
|
+
or ele["type"] in ("image", "image_url", "video")
|
|
348
|
+
):
|
|
349
|
+
vision_infos.append(ele)
|
|
350
|
+
return vision_infos
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def process_vision_info(
|
|
354
|
+
conversations: list[dict] | list[list[dict]],
|
|
355
|
+
return_video_kwargs: bool = False,
|
|
356
|
+
):
|
|
357
|
+
|
|
358
|
+
vision_infos = extract_vision_info(conversations)
|
|
359
|
+
# Read images or videos
|
|
360
|
+
image_inputs: Optional[List] = []
|
|
361
|
+
video_inputs: Optional[List] = []
|
|
362
|
+
video_sample_fps_list = []
|
|
363
|
+
for vision_info in vision_infos:
|
|
364
|
+
if "image" in vision_info or "image_url" in vision_info:
|
|
365
|
+
assert image_inputs is not None
|
|
366
|
+
image_inputs.append(fetch_image(vision_info))
|
|
367
|
+
elif "video" in vision_info:
|
|
368
|
+
assert video_inputs is not None
|
|
369
|
+
video_input, video_sample_fps = fetch_video(vision_info, return_video_sample_fps=True)
|
|
370
|
+
video_sample_fps_list.append(video_sample_fps)
|
|
371
|
+
video_inputs.append(video_input)
|
|
372
|
+
else:
|
|
373
|
+
raise ValueError("image, image_url or video should in content.")
|
|
374
|
+
if image_inputs is not None and len(image_inputs) == 0:
|
|
375
|
+
image_inputs = None
|
|
376
|
+
if video_inputs is not None and len(video_inputs) == 0:
|
|
377
|
+
video_inputs = None
|
|
378
|
+
if return_video_kwargs:
|
|
379
|
+
return image_inputs, video_inputs, {"fps": video_sample_fps_list}
|
|
380
|
+
return image_inputs, video_inputs
|
helm/clients/bedrock_client.py
CHANGED
|
@@ -7,6 +7,7 @@ from datetime import datetime
|
|
|
7
7
|
|
|
8
8
|
from helm.common.cache import CacheConfig
|
|
9
9
|
from helm.clients.client import CachingClient, truncate_and_tokenize_response_text
|
|
10
|
+
from helm.common.hierarchical_logger import hexception
|
|
10
11
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
11
12
|
from helm.clients.bedrock_utils import get_bedrock_client, get_bedrock_client_v1
|
|
12
13
|
from helm.tokenizers.tokenizer import Tokenizer
|
|
@@ -75,6 +76,7 @@ class BedrockClient(CachingClient):
|
|
|
75
76
|
response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
|
|
76
77
|
|
|
77
78
|
except Exception as error:
|
|
79
|
+
hexception(error)
|
|
78
80
|
return RequestResult(
|
|
79
81
|
success=False,
|
|
80
82
|
cached=False,
|
helm/clients/cohere_client.py
CHANGED
|
@@ -3,6 +3,7 @@ import requests
|
|
|
3
3
|
from typing import List, Optional, Sequence, TypedDict
|
|
4
4
|
|
|
5
5
|
from helm.common.cache import CacheConfig
|
|
6
|
+
from helm.common.hierarchical_logger import hexception
|
|
6
7
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
7
8
|
from helm.common.request import (
|
|
8
9
|
wrap_request_time,
|
|
@@ -123,6 +124,7 @@ class CohereClient(CachingClient):
|
|
|
123
124
|
|
|
124
125
|
response, cached = self.cache.get(raw_request, wrap_request_time(do_it))
|
|
125
126
|
except (requests.exceptions.RequestException, AssertionError) as e:
|
|
127
|
+
hexception(e)
|
|
126
128
|
error: str = f"CohereClient error: {e}"
|
|
127
129
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
128
130
|
|
|
@@ -232,6 +234,7 @@ class CohereChatClient(CachingClient):
|
|
|
232
234
|
|
|
233
235
|
response, cached = self.cache.get(raw_request, wrap_request_time(do_it))
|
|
234
236
|
except (requests.exceptions.RequestException, AssertionError) as e:
|
|
237
|
+
hexception(e)
|
|
235
238
|
error: str = f"CohereClient error: {e}"
|
|
236
239
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
237
240
|
|
helm/clients/google_client.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Dict
|
|
2
2
|
|
|
3
3
|
from helm.common.cache import CacheConfig
|
|
4
|
+
from helm.common.hierarchical_logger import hexception
|
|
4
5
|
from helm.common.request import Request, RequestResult, GeneratedOutput, Token
|
|
5
6
|
from helm.clients.client import CachingClient, truncate_sequence
|
|
6
7
|
|
|
@@ -44,6 +45,7 @@ class GoogleClient(CachingClient):
|
|
|
44
45
|
# If results are not cached for a given query, fail fast
|
|
45
46
|
response, cached = self.cache.get(cache_key, fail)
|
|
46
47
|
except RuntimeError as e:
|
|
48
|
+
hexception(e)
|
|
47
49
|
error: str = f"GoogleClient error: {e}"
|
|
48
50
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
49
51
|
|
|
@@ -3,6 +3,7 @@ from dataclasses import asdict
|
|
|
3
3
|
from typing import Any, Dict
|
|
4
4
|
|
|
5
5
|
from helm.common.cache import CacheConfig
|
|
6
|
+
from helm.common.hierarchical_logger import hexception
|
|
6
7
|
from helm.common.request import (
|
|
7
8
|
wrap_request_time,
|
|
8
9
|
Request,
|
|
@@ -76,5 +77,6 @@ class HTTPModelClient(CachingClient):
|
|
|
76
77
|
request_time=response["request_time"],
|
|
77
78
|
)
|
|
78
79
|
except requests.exceptions.RequestException as e:
|
|
80
|
+
hexception(e)
|
|
79
81
|
error: str = f"Request error: {e}"
|
|
80
82
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
@@ -8,7 +8,7 @@ from transformers.generation.stopping_criteria import (
|
|
|
8
8
|
from typing import Any, Dict, List, Optional, TypedDict
|
|
9
9
|
|
|
10
10
|
from helm.common.cache import CacheConfig
|
|
11
|
-
from helm.common.hierarchical_logger import htrack_block, hlog, hwarn
|
|
11
|
+
from helm.common.hierarchical_logger import hexception, htrack_block, hlog, hwarn
|
|
12
12
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
13
13
|
from helm.common.request import (
|
|
14
14
|
wrap_request_time,
|
|
@@ -345,6 +345,7 @@ class HuggingFaceClient(CachingClient):
|
|
|
345
345
|
response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
|
|
346
346
|
except Exception as e: # Do something if error is encountered.
|
|
347
347
|
error: str = f"HuggingFace error: {e}"
|
|
348
|
+
hexception(e)
|
|
348
349
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
349
350
|
|
|
350
351
|
completions = []
|
helm/clients/ibm_client.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from abc import ABC
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
|
|
4
|
-
from helm.common.hierarchical_logger import hlog
|
|
4
|
+
from helm.common.hierarchical_logger import hexception, hlog
|
|
5
5
|
from helm.common.cache import CacheConfig
|
|
6
6
|
from helm.common.request import (
|
|
7
7
|
Request,
|
|
@@ -249,6 +249,7 @@ class IbmChatClient(IbmClient):
|
|
|
249
249
|
)
|
|
250
250
|
|
|
251
251
|
except Exception as e:
|
|
252
|
+
hexception(e)
|
|
252
253
|
error: str = f"IBM Chat client Model error: {e}"
|
|
253
254
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
254
255
|
|
|
@@ -263,5 +264,6 @@ class IbmTextClient(IbmClient):
|
|
|
263
264
|
inference_handler=GenerateInferenceHandler(inference_engine=self.inference_engine), request=request
|
|
264
265
|
)
|
|
265
266
|
except Exception as e:
|
|
267
|
+
hexception(e)
|
|
266
268
|
error: str = f"IBM Text client Model error: {e}"
|
|
267
269
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Dict
|
|
2
2
|
|
|
3
3
|
from helm.common.cache import Cache, CacheConfig
|
|
4
|
+
from helm.common.hierarchical_logger import hexception
|
|
4
5
|
from helm.common.request import Request, RequestResult, GeneratedOutput
|
|
5
6
|
from helm.common.tokenization_request import (
|
|
6
7
|
TokenizationRequest,
|
|
@@ -54,6 +55,7 @@ class AdobeVisionClient(Client):
|
|
|
54
55
|
|
|
55
56
|
response, cached = self._cache.get(cache_key, fail)
|
|
56
57
|
except RuntimeError as e:
|
|
58
|
+
hexception(e)
|
|
57
59
|
error: str = f"Adobe Vision Client error: {e}"
|
|
58
60
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
59
61
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Dict
|
|
2
2
|
|
|
3
3
|
from helm.common.cache import Cache, CacheConfig
|
|
4
|
+
from helm.common.hierarchical_logger import hexception
|
|
4
5
|
from helm.common.request import Request, RequestResult, GeneratedOutput
|
|
5
6
|
from helm.common.tokenization_request import (
|
|
6
7
|
TokenizationRequest,
|
|
@@ -74,6 +75,7 @@ class AlephAlphaImageGenerationClient(Client):
|
|
|
74
75
|
|
|
75
76
|
response, cached = self._cache.get(cache_key, fail)
|
|
76
77
|
except RuntimeError as e:
|
|
78
|
+
hexception(e)
|
|
77
79
|
error: str = f"AlephAlphaVisionClient error: {e}"
|
|
78
80
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
79
81
|
|
|
@@ -9,7 +9,7 @@ from torchvision.utils import save_image
|
|
|
9
9
|
|
|
10
10
|
from helm.common.cache import CacheConfig, Cache
|
|
11
11
|
from helm.common.file_caches.file_cache import FileCache
|
|
12
|
-
from helm.common.hierarchical_logger import hlog, htrack_block
|
|
12
|
+
from helm.common.hierarchical_logger import hexception, hlog, htrack_block
|
|
13
13
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
14
14
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
15
15
|
from helm.common.tokenization_request import (
|
|
@@ -167,6 +167,7 @@ class CogView2Client(Client):
|
|
|
167
167
|
)
|
|
168
168
|
results, cached = self._cache.get(cache_key, wrap_request_time(do_it))
|
|
169
169
|
except RuntimeError as e:
|
|
170
|
+
hexception(e)
|
|
170
171
|
error: str = f"CogView2Client error: {e}"
|
|
171
172
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
172
173
|
|
|
@@ -4,6 +4,7 @@ import base64
|
|
|
4
4
|
from helm.common.cache import CacheConfig, Cache
|
|
5
5
|
from helm.common.general import hlog
|
|
6
6
|
from helm.common.file_caches.file_cache import FileCache
|
|
7
|
+
from helm.common.hierarchical_logger import hexception
|
|
7
8
|
from helm.common.media_object import MultimediaObject
|
|
8
9
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
9
10
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
@@ -124,6 +125,7 @@ class DALLE2Client(Client):
|
|
|
124
125
|
hlog(f"Failed safety check: {request.prompt}")
|
|
125
126
|
return self.get_content_policy_violated_result(request)
|
|
126
127
|
else:
|
|
128
|
+
hexception(error)
|
|
127
129
|
return RequestResult(
|
|
128
130
|
success=False, cached=False, error=f"DALL-E error: {error}", completions=[], embedding=[]
|
|
129
131
|
)
|
|
@@ -5,7 +5,7 @@ from functools import partial
|
|
|
5
5
|
|
|
6
6
|
from helm.common.cache import CacheConfig, Cache
|
|
7
7
|
from helm.common.file_caches.file_cache import FileCache
|
|
8
|
-
from helm.common.hierarchical_logger import hlog, htrack_block
|
|
8
|
+
from helm.common.hierarchical_logger import hexception, hlog, htrack_block
|
|
9
9
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
10
10
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
11
11
|
from helm.common.tokenization_request import (
|
|
@@ -166,6 +166,7 @@ class DALLEMiniClient(Client):
|
|
|
166
166
|
)
|
|
167
167
|
results, cached = self._cache.get(cache_key, wrap_request_time(do_it))
|
|
168
168
|
except RuntimeError as e:
|
|
169
|
+
hexception(e)
|
|
169
170
|
error: str = f"DALLEMiniClient error: {e}"
|
|
170
171
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
171
172
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import List, Dict
|
|
2
2
|
|
|
3
3
|
from helm.common.cache import Cache, CacheConfig
|
|
4
|
+
from helm.common.hierarchical_logger import hexception
|
|
4
5
|
from helm.common.request import Request, RequestResult, GeneratedOutput
|
|
5
6
|
from helm.common.tokenization_request import (
|
|
6
7
|
TokenizationRequest,
|
|
@@ -54,6 +55,7 @@ class DeepFloydClient(Client):
|
|
|
54
55
|
|
|
55
56
|
response, cached = self._cache.get(cache_key, fail)
|
|
56
57
|
except RuntimeError as e:
|
|
58
|
+
hexception(e)
|
|
57
59
|
error: str = f"DeepFloyd Client error: {e}"
|
|
58
60
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
59
61
|
|
|
@@ -7,7 +7,7 @@ import torch
|
|
|
7
7
|
from helm.common.cache import CacheConfig, Cache
|
|
8
8
|
from helm.common.file_caches.file_cache import FileCache
|
|
9
9
|
from helm.common.gpu_utils import get_torch_device_name, is_cuda_available
|
|
10
|
-
from helm.common.hierarchical_logger import hlog, htrack_block
|
|
10
|
+
from helm.common.hierarchical_logger import hexception, hlog, htrack_block
|
|
11
11
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
12
12
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
13
13
|
from helm.common.tokenization_request import (
|
|
@@ -178,6 +178,7 @@ class HuggingFaceDiffusersClient(Client):
|
|
|
178
178
|
)
|
|
179
179
|
results, cached = self._cache.get(cache_key, wrap_request_time(do_it))
|
|
180
180
|
except RuntimeError as ex:
|
|
181
|
+
hexception(ex)
|
|
181
182
|
error: str = f"HuggingFaceDiffusersClient error: {ex}"
|
|
182
183
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
183
184
|
|
|
@@ -5,6 +5,7 @@ import urllib.parse
|
|
|
5
5
|
|
|
6
6
|
from helm.common.cache import CacheConfig, Cache
|
|
7
7
|
from helm.common.file_caches.file_cache import FileCache
|
|
8
|
+
from helm.common.hierarchical_logger import hexception
|
|
8
9
|
from helm.common.images_utils import encode_base64
|
|
9
10
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
10
11
|
from helm.common.tokenization_request import (
|
|
@@ -62,6 +63,7 @@ class LexicaClient(Client):
|
|
|
62
63
|
|
|
63
64
|
response, cached = self.cache.get(cache_key, wrap_request_time(do_it))
|
|
64
65
|
except RuntimeError as e:
|
|
66
|
+
hexception(e)
|
|
65
67
|
error: str = f"LexicaClient error: {e}"
|
|
66
68
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
67
69
|
|
|
@@ -141,7 +141,7 @@ class Encoder(nn.Module):
|
|
|
141
141
|
in_channels: int,
|
|
142
142
|
resolution: int,
|
|
143
143
|
z_channels: int,
|
|
144
|
-
double_z: Optional[bool] = None
|
|
144
|
+
double_z: Optional[bool] = None,
|
|
145
145
|
) -> None:
|
|
146
146
|
super().__init__()
|
|
147
147
|
self.ch = ch
|
|
@@ -232,7 +232,7 @@ class Decoder(nn.Module):
|
|
|
232
232
|
in_channels: int,
|
|
233
233
|
resolution: int,
|
|
234
234
|
z_channels: int,
|
|
235
|
-
double_z: bool
|
|
235
|
+
double_z: bool,
|
|
236
236
|
) -> None:
|
|
237
237
|
super().__init__()
|
|
238
238
|
self.ch = ch
|
|
@@ -5,7 +5,7 @@ import numpy as np
|
|
|
5
5
|
from helm.common.cache import CacheConfig, Cache
|
|
6
6
|
from helm.common.file_caches.file_cache import FileCache
|
|
7
7
|
from helm.common.gpu_utils import get_torch_device_name
|
|
8
|
-
from helm.common.hierarchical_logger import hlog, htrack_block
|
|
8
|
+
from helm.common.hierarchical_logger import hexception, hlog, htrack_block
|
|
9
9
|
from helm.common.optional_dependencies import handle_module_not_found_error
|
|
10
10
|
from helm.common.request import Request, RequestResult, GeneratedOutput, wrap_request_time
|
|
11
11
|
from helm.common.tokenization_request import (
|
|
@@ -91,6 +91,7 @@ class MinDALLEClient(Client):
|
|
|
91
91
|
)
|
|
92
92
|
results, cached = self._cache.get(cache_key, wrap_request_time(do_it))
|
|
93
93
|
except RuntimeError as ex:
|
|
94
|
+
hexception(ex)
|
|
94
95
|
error: str = f"MinDALLEClient error: {ex}"
|
|
95
96
|
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
|
|
96
97
|
|