evalscope 0.10.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +11 -0
- evalscope/api/benchmark/adapters/__init__.py +7 -0
- evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +754 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +86 -0
- evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +157 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +404 -0
- evalscope/api/benchmark/meta.py +124 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +370 -0
- evalscope/api/dataset/loader.py +266 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +382 -0
- evalscope/api/evaluator/evaluator.py +61 -0
- evalscope/api/evaluator/state.py +280 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +248 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +60 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +2 -0
- evalscope/api/mixin/llm_judge_mixin.py +170 -0
- evalscope/api/mixin/sandbox_mixin.py +182 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +161 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/__init__.py +28 -0
- evalscope/app/app.py +38 -0
- evalscope/app/arguments.py +11 -0
- evalscope/app/constants.py +22 -0
- evalscope/app/ui/__init__.py +20 -0
- evalscope/app/ui/app_ui.py +53 -0
- evalscope/app/ui/multi_model.py +353 -0
- evalscope/app/ui/sidebar.py +42 -0
- evalscope/app/ui/single_model.py +220 -0
- evalscope/app/ui/visualization.py +36 -0
- evalscope/app/utils/data_utils.py +195 -0
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/localization.py +221 -0
- evalscope/app/utils/text_utils.py +119 -0
- evalscope/app/utils/visualization.py +96 -0
- evalscope/arguments.py +32 -9
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +10 -7
- evalscope/backend/rag_eval/__init__.py +1 -1
- evalscope/backend/rag_eval/backend_manager.py +23 -6
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +33 -21
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/cmteb/arguments.py +14 -1
- evalscope/backend/rag_eval/cmteb/task_template.py +19 -3
- evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +1 -1
- evalscope/backend/rag_eval/ragas/arguments.py +0 -1
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +9 -3
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -6
- evalscope/backend/rag_eval/utils/embedding.py +125 -32
- evalscope/backend/rag_eval/utils/llm.py +16 -16
- evalscope/backend/vlm_eval_kit/backend_manager.py +8 -3
- evalscope/benchmarks/__init__.py +17 -5
- evalscope/benchmarks/aa_lcr/__init__.py +0 -0
- evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
- evalscope/benchmarks/ai2d/__init__.py +0 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/aime/__init__.py +0 -0
- evalscope/benchmarks/aime/aime24_adapter.py +55 -0
- evalscope/benchmarks/aime/aime25_adapter.py +181 -0
- evalscope/benchmarks/aime/grader.py +307 -0
- evalscope/{metrics/math_accuracy.py → benchmarks/aime/math_normalize.py} +61 -72
- evalscope/benchmarks/alpaca_eval/__init__.py +0 -0
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
- evalscope/benchmarks/amc/__init__.py +0 -0
- evalscope/benchmarks/amc/amc_adapter.py +51 -0
- evalscope/benchmarks/arc/arc_adapter.py +34 -149
- evalscope/benchmarks/arena_hard/__init__.py +0 -0
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +149 -0
- evalscope/benchmarks/arena_hard/utils.py +186 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +117 -157
- evalscope/benchmarks/bfcl/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/bfcl_v3_adapter.py +370 -0
- evalscope/benchmarks/bfcl/v3/generation.py +222 -0
- evalscope/benchmarks/bfcl/v3/utils.py +23 -0
- evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
- evalscope/benchmarks/bfcl/v4/utils.py +410 -0
- evalscope/benchmarks/biomix_qa/__init__.py +0 -0
- evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -174
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +170 -0
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -140
- evalscope/benchmarks/coin_flip/__init__.py +0 -0
- evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
- evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
- evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +64 -112
- evalscope/benchmarks/data_collection/__init__.py +0 -0
- evalscope/benchmarks/data_collection/data_collection_adapter.py +215 -0
- evalscope/benchmarks/docmath/__init__.py +0 -0
- evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
- evalscope/benchmarks/docmath/utils.py +219 -0
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drivelology/__init__.py +0 -0
- evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
- evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
- evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
- evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
- evalscope/benchmarks/drop/__init__.py +0 -0
- evalscope/benchmarks/drop/drop_adapter.py +155 -0
- evalscope/benchmarks/drop/utils.py +156 -0
- evalscope/benchmarks/frames/__init__.py +0 -0
- evalscope/benchmarks/frames/frames_adapter.py +175 -0
- evalscope/benchmarks/frames/utils.py +37 -0
- evalscope/benchmarks/general_arena/__init__.py +0 -0
- evalscope/benchmarks/general_arena/general_arena_adapter.py +454 -0
- evalscope/benchmarks/general_arena/utils.py +223 -0
- evalscope/benchmarks/general_mcq/__init__.py +0 -0
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
- evalscope/benchmarks/general_qa/general_qa_adapter.py +75 -107
- evalscope/benchmarks/gpqa/__init__.py +0 -0
- evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
- evalscope/benchmarks/gpqa/prompt.py +88 -0
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +77 -144
- evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
- evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
- evalscope/benchmarks/halu_eval/__init__.py +0 -0
- evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
- evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +36 -134
- evalscope/benchmarks/hle/__init__.py +0 -0
- evalscope/benchmarks/hle/hle_adapter.py +153 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +80 -88
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/ifeval_adapter.py +71 -45
- evalscope/benchmarks/ifeval/instructions.py +112 -68
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -58
- evalscope/benchmarks/live_code_bench/__init__.py +0 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +195 -0
- evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +150 -0
- evalscope/benchmarks/live_code_bench/load_utils.py +63 -0
- evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
- evalscope/benchmarks/live_code_bench/prompts.py +207 -0
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/live_code_bench/testing_util.py +544 -0
- evalscope/benchmarks/logi_qa/__int__.py +0 -0
- evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
- evalscope/benchmarks/maritime_bench/__init__.py +0 -0
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
- evalscope/benchmarks/math_500/__init__.py +0 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +55 -0
- evalscope/benchmarks/math_qa/__init__.py +0 -0
- evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
- evalscope/benchmarks/math_verse/__init__.py +0 -0
- evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
- evalscope/benchmarks/math_vision/__init__.py +0 -0
- evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
- evalscope/benchmarks/med_mcqa/__init__.py +0 -0
- evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -210
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +87 -103
- evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
- evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/music_trivia/__init__.py +0 -0
- evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
- evalscope/benchmarks/musr/__init__.py +0 -0
- evalscope/benchmarks/musr/musr_adapter.py +43 -0
- evalscope/benchmarks/needle_haystack/__init__.py +0 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +389 -0
- evalscope/benchmarks/needle_haystack/utils.py +79 -0
- evalscope/benchmarks/ner/__init__.py +0 -0
- evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
- evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
- evalscope/benchmarks/ner/copious_adapter.py +85 -0
- evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
- evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
- evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
- evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
- evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
- evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
- evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
- evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
- evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
- evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
- evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
- evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
- evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
- evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
- evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
- evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
- evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
- evalscope/benchmarks/piqa/__init__.py +0 -0
- evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
- evalscope/benchmarks/poly_math/__init__.py +0 -0
- evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
- evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
- evalscope/benchmarks/pope/__init__.py +0 -0
- evalscope/benchmarks/pope/pope_adapter.py +112 -0
- evalscope/benchmarks/process_bench/__init__.py +0 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +171 -0
- evalscope/benchmarks/pumed_qa/__init__.py +0 -0
- evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
- evalscope/benchmarks/qasc/__init__.py +0 -0
- evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
- evalscope/benchmarks/race/race_adapter.py +33 -120
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/sciq/__init__.py +0 -0
- evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
- evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
- evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
- evalscope/benchmarks/simple_qa/__init__.py +0 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +169 -0
- evalscope/benchmarks/simple_vqa/__init__.py +0 -0
- evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
- evalscope/benchmarks/siqa/__init__.py +0 -0
- evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
- evalscope/benchmarks/super_gpqa/__init__.py +0 -0
- evalscope/benchmarks/super_gpqa/prompt.py +88 -0
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
- evalscope/benchmarks/super_gpqa/utils.py +86 -0
- evalscope/benchmarks/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
- evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
- evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench/tau_bench_adapter.py +168 -0
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/__init__.py +0 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
- evalscope/benchmarks/tool_bench/utils.py +203 -0
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -118
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -270
- evalscope/benchmarks/visu_logic/__init__.py +0 -0
- evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
- evalscope/benchmarks/winogrande/__init__.py +0 -0
- evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
- evalscope/benchmarks/wmt/__init__.py +0 -0
- evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
- evalscope/benchmarks/zerobench/__init__.py +0 -0
- evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +12 -2
- evalscope/cli/start_eval.py +4 -3
- evalscope/cli/start_perf.py +10 -2
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +27 -3
- evalscope/collections/sampler.py +12 -11
- evalscope/collections/schema.py +13 -12
- evalscope/config.py +218 -147
- evalscope/constants.py +78 -82
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +334 -318
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +59 -3
- evalscope/metrics/bert_score/__init__.py +0 -0
- evalscope/metrics/bert_score/scorer.py +338 -0
- evalscope/metrics/bert_score/utils.py +697 -0
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +20 -15
- evalscope/metrics/llm_judge.py +211 -0
- evalscope/metrics/math_parser.py +545 -0
- evalscope/metrics/metric.py +611 -0
- evalscope/metrics/metrics.py +112 -23
- evalscope/metrics/rouge_metric.py +11 -13
- evalscope/metrics/t2v_metrics/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/clipscore.py +14 -0
- evalscope/metrics/t2v_metrics/constants.py +12 -0
- evalscope/metrics/t2v_metrics/itmscore.py +14 -0
- evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +134 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +282 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +115 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +87 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +86 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +85 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +99 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +176 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +82 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +74 -0
- evalscope/metrics/t2v_metrics/models/model.py +45 -0
- evalscope/metrics/t2v_metrics/models/utils.py +25 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +306 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +84 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +223 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +153 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +24 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +190 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +100 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +313 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +192 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +320 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1111 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +457 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +370 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +765 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +274 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +896 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1876 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +83 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +58 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +187 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +179 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +115 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +348 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +870 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +273 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +514 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1291 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +476 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +35 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +393 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +129 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +18 -0
- evalscope/metrics/t2v_metrics/score.py +78 -0
- evalscope/metrics/t2v_metrics/vqascore.py +14 -0
- evalscope/models/__init__.py +23 -13
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +69 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +144 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +708 -0
- evalscope/perf/__init__.py +0 -1
- evalscope/perf/arguments.py +103 -69
- evalscope/perf/benchmark.py +114 -163
- evalscope/perf/http_client.py +59 -89
- evalscope/perf/main.py +91 -18
- evalscope/perf/plugin/__init__.py +3 -2
- evalscope/perf/plugin/api/__init__.py +4 -3
- evalscope/perf/plugin/api/base.py +27 -7
- evalscope/perf/plugin/api/custom_api.py +170 -57
- evalscope/perf/plugin/api/dashscope_api.py +4 -10
- evalscope/perf/plugin/api/default_api.py +214 -0
- evalscope/perf/plugin/api/openai_api.py +120 -41
- evalscope/perf/plugin/datasets/__init__.py +10 -6
- evalscope/perf/plugin/datasets/base.py +43 -1
- evalscope/perf/plugin/datasets/custom.py +22 -3
- evalscope/perf/plugin/datasets/flickr8k.py +5 -27
- evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
- evalscope/perf/plugin/datasets/line_by_line.py +7 -3
- evalscope/perf/plugin/datasets/longalpaca.py +7 -3
- evalscope/perf/plugin/datasets/openqa.py +13 -14
- evalscope/perf/plugin/datasets/random_dataset.py +67 -0
- evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
- evalscope/perf/plugin/datasets/speed_benchmark.py +11 -0
- evalscope/perf/plugin/registry.py +36 -16
- evalscope/perf/utils/analysis_result.py +24 -23
- evalscope/perf/utils/benchmark_util.py +95 -55
- evalscope/perf/utils/db_util.py +115 -78
- evalscope/perf/utils/local_server.py +12 -47
- evalscope/perf/utils/log_utils.py +63 -0
- evalscope/perf/utils/rich_display.py +192 -0
- evalscope/report/__init__.py +46 -3
- evalscope/report/combinator.py +143 -32
- evalscope/report/generator.py +74 -34
- evalscope/report/report.py +238 -0
- evalscope/run.py +71 -46
- evalscope/summarizer.py +5 -5
- evalscope/third_party/longbench_write/infer.py +1 -1
- evalscope/third_party/thinkbench/__init__.py +3 -0
- evalscope/third_party/thinkbench/eval.py +441 -0
- evalscope/third_party/thinkbench/infer.py +130 -0
- evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
- evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
- evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- evalscope/third_party/thinkbench/tools/llm.py +48 -0
- evalscope/third_party/thinkbench/tools/utils.py +13 -0
- evalscope/third_party/toolbench_static/llm/swift_infer.py +46 -20
- evalscope/third_party/toolbench_static/toolbench_static.py +2 -1
- evalscope/utils/__init__.py +82 -2
- evalscope/utils/argument_utils.py +64 -0
- evalscope/utils/chat_service.py +8 -6
- evalscope/utils/deprecation_utils.py +53 -0
- evalscope/utils/function_utils.py +266 -0
- evalscope/utils/import_utils.py +154 -0
- evalscope/utils/io_utils.py +336 -8
- evalscope/utils/json_schema.py +231 -0
- evalscope/utils/logger.py +121 -31
- evalscope/utils/model_utils.py +57 -1
- evalscope/utils/multi_choices.py +303 -0
- evalscope/utils/ner.py +377 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- evalscope-1.2.0.dist-info/METADATA +553 -0
- evalscope-1.2.0.dist-info/RECORD +628 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
- evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -46
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -76
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/ceval/samples.jsonl +0 -1
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -291
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/collections/evaluator.py +0 -198
- evalscope/evaluator/rating_eval.py +0 -157
- evalscope/evaluator/reviewer/__init__.py +0 -1
- evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
- evalscope/metrics/code_metric.py +0 -98
- evalscope/metrics/named_metrics.py +0 -17
- evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
- evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
- evalscope/models/base_adapter.py +0 -52
- evalscope/models/chat_adapter.py +0 -138
- evalscope/models/choice_adapter.py +0 -211
- evalscope/models/custom/__init__.py +0 -3
- evalscope/models/custom/custom_model.py +0 -53
- evalscope/models/custom/dummy_model.py +0 -63
- evalscope/models/custom_adapter.py +0 -67
- evalscope/models/local_model.py +0 -74
- evalscope/models/model.py +0 -229
- evalscope/models/server_adapter.py +0 -111
- evalscope/registry/__init__.py +0 -1
- evalscope/registry/config/cfg_arena.yaml +0 -77
- evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
- evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
- evalscope/registry/config/cfg_single.yaml +0 -78
- evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
- evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
- evalscope/registry/data/qa_browser/battle.jsonl +0 -634
- evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
- evalscope/registry/data/question.jsonl +0 -80
- evalscope/registry/tasks/arc.yaml +0 -28
- evalscope/registry/tasks/bbh.yaml +0 -26
- evalscope/registry/tasks/bbh_mini.yaml +0 -26
- evalscope/registry/tasks/ceval.yaml +0 -27
- evalscope/registry/tasks/ceval_mini.yaml +0 -26
- evalscope/registry/tasks/cmmlu.yaml +0 -27
- evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
- evalscope/registry/tasks/general_qa.yaml +0 -27
- evalscope/registry/tasks/gsm8k.yaml +0 -29
- evalscope/registry/tasks/mmlu.yaml +0 -29
- evalscope/registry/tasks/mmlu_mini.yaml +0 -27
- evalscope/report/app.py +0 -506
- evalscope/report/utils.py +0 -133
- evalscope/run_arena.py +0 -202
- evalscope/utils/arena_utils.py +0 -217
- evalscope/utils/completion_parsers.py +0 -82
- evalscope/utils/utils.py +0 -301
- evalscope-0.10.0.dist-info/METADATA +0 -565
- evalscope-0.10.0.dist-info/RECORD +0 -286
- tests/__init__.py +0 -1
- tests/cli/__init__.py +0 -1
- tests/cli/test_collection.py +0 -57
- tests/cli/test_run.py +0 -165
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -101
- tests/rag/test_clip_benchmark.py +0 -85
- tests/rag/test_mteb.py +0 -138
- tests/rag/test_ragas.py +0 -120
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -145
- tests/swift/test_run_swift_vlm_eval.py +0 -127
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -156
- tests/test_run_all.py +0 -12
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -60
- {tests/rag → evalscope/api}/__init__.py +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from random import Random
|
|
3
|
+
from typing import Any, Dict, List, Optional, Sequence, Union, overload
|
|
4
|
+
|
|
5
|
+
from evalscope.api.dataset import Sample
|
|
6
|
+
from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str, messages_to_markdown
|
|
7
|
+
from evalscope.api.model import ModelOutput
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Target(Sequence[str]):
|
|
11
|
+
"""Target for scoring against the current TaskState.
|
|
12
|
+
|
|
13
|
+
Target is a sequence of one or more strings. Use the
|
|
14
|
+
`text` property to access the value as a single string.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, target: Union[str, List[str]]) -> None:
|
|
18
|
+
self.target = target if isinstance(target, list) else [target]
|
|
19
|
+
|
|
20
|
+
@overload
|
|
21
|
+
def __getitem__(self, index: int) -> str:
|
|
22
|
+
...
|
|
23
|
+
|
|
24
|
+
@overload
|
|
25
|
+
def __getitem__(self, index: slice) -> Sequence[str]:
|
|
26
|
+
...
|
|
27
|
+
|
|
28
|
+
def __getitem__(self, index: Union[int, slice]) -> Union[str, Sequence[str]]:
|
|
29
|
+
return self.target[index]
|
|
30
|
+
|
|
31
|
+
def __len__(self) -> int:
|
|
32
|
+
return len(self.target)
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def text(self) -> str:
|
|
36
|
+
return ''.join(self.target)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class Choice:
|
|
41
|
+
"""
|
|
42
|
+
A `Choice` represents a single choice in a multiple choice question.
|
|
43
|
+
|
|
44
|
+
It is only relevant for the `multiple_choice` solver and corresponding
|
|
45
|
+
`choice` scorer.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
value: str
|
|
49
|
+
"""The original value of the choice from the `Sample`."""
|
|
50
|
+
|
|
51
|
+
correct: Optional[bool]
|
|
52
|
+
"""Did the model think this choice satisfies the question? `None`
|
|
53
|
+
indicates this has not been set yet"""
|
|
54
|
+
|
|
55
|
+
original_position: int
|
|
56
|
+
"""Choices may be re-ordered during processing, this represents the
|
|
57
|
+
original position in the sample's list of choices"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class Choices(Sequence[Choice]):
|
|
61
|
+
"""
|
|
62
|
+
Wrapper class for a list of `Choice` objects.
|
|
63
|
+
|
|
64
|
+
Primarily simply to abstract away implementations of choice-specific
|
|
65
|
+
functionality from the already-big `TaskState` class.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(self, choices: Union[List[str], List[Choice]]) -> None:
|
|
69
|
+
"""
|
|
70
|
+
Setter for choices, intended to only be used with the `multiple_choice` scorer.
|
|
71
|
+
|
|
72
|
+
Choices come from a list of choices for the sample, specifically used by
|
|
73
|
+
the `multiple_choice` scorer.
|
|
74
|
+
|
|
75
|
+
For example, if the sample was a multiple choice question like "What is
|
|
76
|
+
the capital of France? A) Paris B) London C) Berlin", we would store the
|
|
77
|
+
possible answers here.
|
|
78
|
+
"""
|
|
79
|
+
self._choices: List[Choice] = []
|
|
80
|
+
|
|
81
|
+
for i, choice in enumerate(choices):
|
|
82
|
+
if isinstance(choice, str):
|
|
83
|
+
self._choices.append(Choice(value=choice, correct=None, original_position=i))
|
|
84
|
+
elif isinstance(choice, Choice):
|
|
85
|
+
self._choices.append(choice)
|
|
86
|
+
|
|
87
|
+
@overload
|
|
88
|
+
def __getitem__(self, index: int) -> Choice:
|
|
89
|
+
...
|
|
90
|
+
|
|
91
|
+
@overload
|
|
92
|
+
def __getitem__(self, index: slice) -> Sequence[Choice]:
|
|
93
|
+
...
|
|
94
|
+
|
|
95
|
+
def __getitem__(self, index: Union[int, slice]) -> Union[Choice, Sequence[Choice]]:
|
|
96
|
+
return self._choices[index]
|
|
97
|
+
|
|
98
|
+
def __len__(self) -> int:
|
|
99
|
+
return len(self._choices)
|
|
100
|
+
|
|
101
|
+
def mark_choice(self, index: int, correct: bool) -> None:
|
|
102
|
+
"""Set the value of a specific choice"""
|
|
103
|
+
self._choices[index].correct = correct
|
|
104
|
+
|
|
105
|
+
def shuffle(self, rand: Random = Random()) -> None:
|
|
106
|
+
"""
|
|
107
|
+
Shuffle the choice order, setting the `original_position` so they can be mapped back to their original order.
|
|
108
|
+
|
|
109
|
+
Some evals will shuffle the choices from the original sample to try to
|
|
110
|
+
avoid the model answering correctly due to fine-tuning (or similar) on
|
|
111
|
+
specific datasets.
|
|
112
|
+
"""
|
|
113
|
+
shuffled_positions = list(range(len(self._choices)))
|
|
114
|
+
rand.shuffle(shuffled_positions)
|
|
115
|
+
|
|
116
|
+
shuffled_choices = [Choice('notachoice', None, -1)] * len(self._choices)
|
|
117
|
+
|
|
118
|
+
for i, shuffled_position in enumerate(shuffled_positions):
|
|
119
|
+
shuffled_choices[i] = self._choices[shuffled_position]
|
|
120
|
+
shuffled_choices[i].original_position = shuffled_position
|
|
121
|
+
|
|
122
|
+
self._choices = shuffled_choices
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class TaskState:
|
|
126
|
+
"""
|
|
127
|
+
The `TaskState` represents the internal state of the `Task` being run for a single `Sample`.
|
|
128
|
+
|
|
129
|
+
The `TaskState` is passed to and returned from each solver during a sample's
|
|
130
|
+
evaluation. It allows us to maintain the manipulated message history, the tools
|
|
131
|
+
available to the model, the final output of the model, and whether the task
|
|
132
|
+
is completed or has hit a limit.
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
def __init__(
|
|
136
|
+
self,
|
|
137
|
+
model: str,
|
|
138
|
+
sample: Sample,
|
|
139
|
+
messages: List[ChatMessage] = [],
|
|
140
|
+
output: Optional[ModelOutput] = None,
|
|
141
|
+
completed: bool = False,
|
|
142
|
+
) -> None:
|
|
143
|
+
self._model = model
|
|
144
|
+
self._sample = sample
|
|
145
|
+
self._sample_id = sample.id
|
|
146
|
+
self._group_id = sample.group_id
|
|
147
|
+
self._input = sample.input
|
|
148
|
+
self._target = Target(sample.target)
|
|
149
|
+
self._metadata = sample.metadata
|
|
150
|
+
self._messages: List[ChatMessage] = messages
|
|
151
|
+
self._output = output if output else ModelOutput(model=str(model))
|
|
152
|
+
self._completed = completed
|
|
153
|
+
if sample.choices:
|
|
154
|
+
self._choices = Choices(sample.choices)
|
|
155
|
+
else:
|
|
156
|
+
self._choices = Choices([])
|
|
157
|
+
|
|
158
|
+
@property
|
|
159
|
+
def model(self) -> str:
|
|
160
|
+
"""Name of model being evaluated."""
|
|
161
|
+
return self._model
|
|
162
|
+
|
|
163
|
+
@property
|
|
164
|
+
def sample_id(self) -> int:
|
|
165
|
+
"""Unique id for sample."""
|
|
166
|
+
return self._sample_id
|
|
167
|
+
|
|
168
|
+
@property
|
|
169
|
+
def group_id(self) -> int:
|
|
170
|
+
"""Group id for sample."""
|
|
171
|
+
return self._group_id
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def input(self) -> Union[str, List[ChatMessage]]:
|
|
175
|
+
"""Input from the `Sample`, should be considered immutable."""
|
|
176
|
+
return self._input
|
|
177
|
+
|
|
178
|
+
@property
|
|
179
|
+
def input_text(self) -> str:
|
|
180
|
+
"""
|
|
181
|
+
Convenience function for accessing the initial input from the `Sample` as a string.
|
|
182
|
+
|
|
183
|
+
If the `input` is a `List[ChatMessage]`, this will return the text from
|
|
184
|
+
the last chat message
|
|
185
|
+
"""
|
|
186
|
+
if isinstance(self._input, str):
|
|
187
|
+
return self._input
|
|
188
|
+
else:
|
|
189
|
+
return messages_pretty_str(self._input)
|
|
190
|
+
|
|
191
|
+
@property
|
|
192
|
+
def input_markdown(self) -> str:
|
|
193
|
+
"""Get the input text as markdown.
|
|
194
|
+
|
|
195
|
+
For multi-modal content, images will be represented in markdown format.
|
|
196
|
+
"""
|
|
197
|
+
if isinstance(self._input, str):
|
|
198
|
+
return self._input
|
|
199
|
+
else:
|
|
200
|
+
return messages_to_markdown(self._input)
|
|
201
|
+
|
|
202
|
+
@property
|
|
203
|
+
def choices(self) -> Choices:
|
|
204
|
+
"""Choices for the sample, if applicable."""
|
|
205
|
+
return self._choices
|
|
206
|
+
|
|
207
|
+
@property
|
|
208
|
+
def user_prompt(self) -> ChatMessageUser:
|
|
209
|
+
"""User prompt for this state.
|
|
210
|
+
|
|
211
|
+
Tasks are very general and can have may types of inputs.
|
|
212
|
+
However, in many cases solvers assume they can interact with
|
|
213
|
+
the state as a "chat" in a predictable fashion (e.g. prompt
|
|
214
|
+
engineering solvers). This property enables easy read and
|
|
215
|
+
write access to the user chat prompt. Raises an
|
|
216
|
+
exception if there is no user prompt
|
|
217
|
+
"""
|
|
218
|
+
prompt = next((m for m in reversed(self.messages) if m.role == 'user'), None)
|
|
219
|
+
if prompt:
|
|
220
|
+
return prompt
|
|
221
|
+
else:
|
|
222
|
+
raise ValueError('user_prompt requested from TaskState but none available')
|
|
223
|
+
|
|
224
|
+
@property
|
|
225
|
+
def metadata(self) -> Dict[str, Any]:
|
|
226
|
+
"""Metadata from the `Sample` for this `TaskState`"""
|
|
227
|
+
return self._metadata
|
|
228
|
+
|
|
229
|
+
@metadata.setter
|
|
230
|
+
def metadata(self, metadata: Dict[str, Any]) -> None:
|
|
231
|
+
self._metadata = metadata
|
|
232
|
+
|
|
233
|
+
@property
|
|
234
|
+
def messages(self) -> List[ChatMessage]:
|
|
235
|
+
"""
|
|
236
|
+
Chat conversation history for sample.
|
|
237
|
+
|
|
238
|
+
This will generally get appended to every time a `generate` call is made
|
|
239
|
+
to the model. Useful for both debug and for solvers/scorers to assess
|
|
240
|
+
model performance or choose the next step.
|
|
241
|
+
"""
|
|
242
|
+
return self._messages
|
|
243
|
+
|
|
244
|
+
@messages.setter
|
|
245
|
+
def messages(self, messages: List[ChatMessage]) -> None:
|
|
246
|
+
self._messages = messages
|
|
247
|
+
|
|
248
|
+
@property
|
|
249
|
+
def output(self) -> ModelOutput:
|
|
250
|
+
"""
|
|
251
|
+
The 'final' model output once we've completed all solving.
|
|
252
|
+
|
|
253
|
+
For simple evals this may just be the last `message` from the
|
|
254
|
+
conversation history, but more complex solvers may set this directly.
|
|
255
|
+
"""
|
|
256
|
+
return self._output
|
|
257
|
+
|
|
258
|
+
@output.setter
|
|
259
|
+
def output(self, output: ModelOutput) -> None:
|
|
260
|
+
self._output = output
|
|
261
|
+
|
|
262
|
+
@property
|
|
263
|
+
def completed(self) -> bool:
|
|
264
|
+
"""Is the task completed."""
|
|
265
|
+
return self._completed
|
|
266
|
+
|
|
267
|
+
@completed.setter
|
|
268
|
+
def completed(self, completed: bool) -> None:
|
|
269
|
+
"""Set the completed status."""
|
|
270
|
+
self._completed = completed
|
|
271
|
+
|
|
272
|
+
@property
|
|
273
|
+
def target(self) -> str:
|
|
274
|
+
"""The scoring target for this `Sample`."""
|
|
275
|
+
return self._target.text
|
|
276
|
+
|
|
277
|
+
@target.setter
|
|
278
|
+
def target(self, text: str) -> None:
|
|
279
|
+
"""Set the target for review purposes."""
|
|
280
|
+
self._target = Target(text)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .filter import Filter, FilterEnsemble, build_filter_ensemble
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Any, Callable, Dict, Iterable, List, Union
|
|
4
|
+
|
|
5
|
+
from evalscope.api.registry import get_filter
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Filter(ABC):
|
|
9
|
+
"""
|
|
10
|
+
Filter classes operate on a sample level.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
14
|
+
"""
|
|
15
|
+
Can define custom behavior here, if an individual instantiation of a Filter class should have state.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
@abstractmethod
|
|
19
|
+
def apply(self, instance: List[str]) -> List[str]:
|
|
20
|
+
|
|
21
|
+
return instance
|
|
22
|
+
|
|
23
|
+
def __call__(self, instance: str) -> str:
|
|
24
|
+
"""
|
|
25
|
+
Allows the filter to be called like a function.
|
|
26
|
+
"""
|
|
27
|
+
return self.apply([instance])[0]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class FilterEnsemble:
|
|
32
|
+
"""
|
|
33
|
+
FilterEnsemble creates a pipeline applying multiple filters.
|
|
34
|
+
Its intended usage is to stack multiple post-processing steps in order.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
name: str
|
|
38
|
+
filters: List[Callable[[], Filter]]
|
|
39
|
+
|
|
40
|
+
def apply(self, instance: List[str]) -> List[str]:
|
|
41
|
+
|
|
42
|
+
for f in self.filters:
|
|
43
|
+
# apply filters in sequence
|
|
44
|
+
instance = f.apply(instance)
|
|
45
|
+
|
|
46
|
+
return instance
|
|
47
|
+
|
|
48
|
+
def __call__(self, instance: str) -> str:
|
|
49
|
+
"""
|
|
50
|
+
Allows the filter ensemble to be called like a function.
|
|
51
|
+
"""
|
|
52
|
+
return self.apply([instance])[0]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def build_filter_ensemble(name: str = 'default', filters: Dict[str, Any] = {}) -> FilterEnsemble:
|
|
56
|
+
"""
|
|
57
|
+
Create a filtering pipeline.
|
|
58
|
+
"""
|
|
59
|
+
filter_funcs = []
|
|
60
|
+
for filter_name, filter_args in filters.items():
|
|
61
|
+
filter_cls = get_filter(filter_name)
|
|
62
|
+
if isinstance(filter_args, list):
|
|
63
|
+
filter_function = filter_cls(*filter_args)
|
|
64
|
+
elif isinstance(filter_args, dict):
|
|
65
|
+
filter_function = filter_cls(**filter_args)
|
|
66
|
+
else:
|
|
67
|
+
# Assume single value for simple filters
|
|
68
|
+
filter_function = filter_cls(filter_args)
|
|
69
|
+
# add the filter as a pipeline step
|
|
70
|
+
filter_funcs.append(filter_function)
|
|
71
|
+
|
|
72
|
+
return FilterEnsemble(name=name, filters=filter_funcs)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from .chat_message import (
|
|
2
|
+
ChatMessage,
|
|
3
|
+
ChatMessageAssistant,
|
|
4
|
+
ChatMessageSystem,
|
|
5
|
+
ChatMessageTool,
|
|
6
|
+
ChatMessageUser,
|
|
7
|
+
dict_to_chat_message,
|
|
8
|
+
messages_pretty_str,
|
|
9
|
+
messages_to_markdown,
|
|
10
|
+
)
|
|
11
|
+
from .content import Content, ContentAudio, ContentData, ContentImage, ContentReasoning, ContentText, ContentVideo
|
|
12
|
+
from .utils import parse_content_with_reasoning
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from pydantic import BaseModel, Field, JsonValue, model_validator
|
|
3
|
+
from typing import Any, Dict, List, Literal, Optional, Type, Union
|
|
4
|
+
|
|
5
|
+
from evalscope.api.tool import ToolCall, ToolCallError
|
|
6
|
+
from .content import Content, ContentAudio, ContentImage, ContentReasoning, ContentText
|
|
7
|
+
from .utils import parse_content_with_reasoning
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ChatMessageBase(BaseModel):
|
|
11
|
+
"""Base class for chat messages."""
|
|
12
|
+
|
|
13
|
+
id: Optional[str] = Field(default=None)
|
|
14
|
+
"""Unique identifer for message."""
|
|
15
|
+
|
|
16
|
+
content: Union[str, List[Content]]
|
|
17
|
+
"""Content (simple string or list of content objects)"""
|
|
18
|
+
|
|
19
|
+
source: Optional[Literal['input', 'generate']] = Field(default=None)
|
|
20
|
+
"""Source of message."""
|
|
21
|
+
|
|
22
|
+
metadata: Optional[Dict[str, Any]] = Field(default=None)
|
|
23
|
+
"""Additional message metadata."""
|
|
24
|
+
|
|
25
|
+
internal: Optional[JsonValue] = Field(default=None)
|
|
26
|
+
"""Model provider specific payload - typically used to aid transformation back to model types."""
|
|
27
|
+
|
|
28
|
+
def model_post_init(self, __context: Any) -> None:
|
|
29
|
+
# Generate ID
|
|
30
|
+
if self.id is None:
|
|
31
|
+
self.id = uuid.uuid4().hex[:8] # Shorten to 8 characters for simplicity
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def text(self) -> str:
|
|
35
|
+
"""Get the text content of this message.
|
|
36
|
+
|
|
37
|
+
ChatMessage content is very general and can contain either
|
|
38
|
+
a simple text value or a list of content parts (each of which
|
|
39
|
+
can either be text or an image). Solvers (e.g. for prompt
|
|
40
|
+
engineering) often need to interact with chat messages with
|
|
41
|
+
the assumption that they are a simple string. The text
|
|
42
|
+
property returns either the plain str content, or if the
|
|
43
|
+
content is a list of text and images, the text items
|
|
44
|
+
concatenated together (separated by newline)
|
|
45
|
+
"""
|
|
46
|
+
if isinstance(self.content, str):
|
|
47
|
+
return self.content
|
|
48
|
+
else:
|
|
49
|
+
all_text = [content.text for content in self.content if content.type == 'text']
|
|
50
|
+
return '\n'.join(all_text)
|
|
51
|
+
|
|
52
|
+
@text.setter
|
|
53
|
+
def text(self, text: str) -> None:
|
|
54
|
+
"""Set the primary text content for this message.
|
|
55
|
+
|
|
56
|
+
ChatMessage content is very general and can contain either
|
|
57
|
+
a simple text value or a list of content parts (each of which
|
|
58
|
+
can either be text or an image). Solvers (e.g. for prompt
|
|
59
|
+
engineering) often need to interact with chat messages with
|
|
60
|
+
the assumption that they are a simple string. The text property
|
|
61
|
+
sets text either to content directly (if it is a `str`) or to
|
|
62
|
+
the first text content item in the message (inserting one at
|
|
63
|
+
the beginning if necessary). If there are multiple text content
|
|
64
|
+
items in the message then after the set there will be only
|
|
65
|
+
one remaining (image content will remain).
|
|
66
|
+
"""
|
|
67
|
+
if isinstance(self.content, str):
|
|
68
|
+
self.content = text
|
|
69
|
+
else:
|
|
70
|
+
all_other = [content for content in self.content if content.type != 'text']
|
|
71
|
+
self.content = all_other + [ContentText(text=text)]
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ChatMessageSystem(ChatMessageBase):
|
|
75
|
+
"""System chat message."""
|
|
76
|
+
|
|
77
|
+
role: Literal['system'] = Field(default='system')
|
|
78
|
+
"""Conversation role."""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class ChatMessageUser(ChatMessageBase):
|
|
82
|
+
"""User chat message."""
|
|
83
|
+
|
|
84
|
+
role: Literal['user'] = Field(default='user')
|
|
85
|
+
"""Conversation role."""
|
|
86
|
+
|
|
87
|
+
tool_call_id: Optional[List[str]] = Field(default=None)
|
|
88
|
+
"""ID(s) of tool call(s) this message has the content payload for."""
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class ChatMessageAssistant(ChatMessageBase):
|
|
92
|
+
"""Assistant chat message."""
|
|
93
|
+
|
|
94
|
+
role: Literal['assistant'] = Field(default='assistant')
|
|
95
|
+
"""Conversation role."""
|
|
96
|
+
|
|
97
|
+
tool_calls: Optional[List[ToolCall]] = Field(default=None)
|
|
98
|
+
"""Tool calls made by the model."""
|
|
99
|
+
|
|
100
|
+
model: Optional[str] = Field(default=None)
|
|
101
|
+
"""Model used to generate assistant message."""
|
|
102
|
+
|
|
103
|
+
# Some OpenAI compatible REST endpoints include reasoning as a field alongside
|
|
104
|
+
# content, however since this field doesn't exist in the OpenAI interface,
|
|
105
|
+
# hosting providers (so far we've seen this with Together and Groq) may
|
|
106
|
+
# include the reasoning in a <think></think> tag before the main response.
|
|
107
|
+
# We expect this pattern to be repeated elsewhere, so include this hook to
|
|
108
|
+
# automatically extract the reasoning content when the response is prefaced
|
|
109
|
+
# with a <think> block. If this ends up being an overeach we can fall back
|
|
110
|
+
# to each provider manually parsing out <think> using a helper function.
|
|
111
|
+
# The implementation isn't important here, the critical thing to establish
|
|
112
|
+
# is that EvalScope makes reasoning content available separately.
|
|
113
|
+
@model_validator(mode='before')
|
|
114
|
+
@classmethod
|
|
115
|
+
def extract_reasoning(cls, data: Any) -> Any:
|
|
116
|
+
if isinstance(data, dict):
|
|
117
|
+
# cleave apart <think> blocks
|
|
118
|
+
content = data.get('content', None)
|
|
119
|
+
if isinstance(content, str):
|
|
120
|
+
content_text, content_reasoning = parse_content_with_reasoning(content)
|
|
121
|
+
if content_reasoning:
|
|
122
|
+
data['content'] = [
|
|
123
|
+
content_reasoning,
|
|
124
|
+
ContentText(text=content_text),
|
|
125
|
+
]
|
|
126
|
+
# migrate messages that has explicit 'reasoning' field
|
|
127
|
+
# (which was our original representation of reasoning)
|
|
128
|
+
reasoning = data.get('reasoning', None)
|
|
129
|
+
if isinstance(reasoning, str):
|
|
130
|
+
# ensure that content is a list
|
|
131
|
+
content = data.get('content', None)
|
|
132
|
+
if content is None:
|
|
133
|
+
data['content'] = []
|
|
134
|
+
elif isinstance(content, str):
|
|
135
|
+
data['content'] = [ContentText(text=content)]
|
|
136
|
+
elif not isinstance(content, list):
|
|
137
|
+
data['content'] = []
|
|
138
|
+
data['content'].insert(0, ContentReasoning(reasoning=reasoning))
|
|
139
|
+
|
|
140
|
+
del data['reasoning']
|
|
141
|
+
return data
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class ChatMessageTool(ChatMessageBase):
|
|
145
|
+
"""Tool chat message."""
|
|
146
|
+
|
|
147
|
+
role: Literal['tool'] = Field(default='tool')
|
|
148
|
+
"""Conversation role."""
|
|
149
|
+
|
|
150
|
+
tool_call_id: Optional[str] = Field(default=None)
|
|
151
|
+
"""ID of tool call."""
|
|
152
|
+
|
|
153
|
+
function: Optional[str] = Field(default=None)
|
|
154
|
+
"""Name of function called."""
|
|
155
|
+
|
|
156
|
+
error: Optional[ToolCallError] = Field(default=None)
|
|
157
|
+
"""Error which occurred during tool call."""
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
ChatMessage = Union[ChatMessageSystem, ChatMessageUser, ChatMessageAssistant, ChatMessageTool]
|
|
161
|
+
"""Message in a chat conversation"""
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def dict_to_chat_message(data: Dict[str, Any]) -> ChatMessage:
|
|
165
|
+
"""Convert a dictionary to a ChatMessage."""
|
|
166
|
+
|
|
167
|
+
if isinstance(data, ChatMessage):
|
|
168
|
+
return data
|
|
169
|
+
|
|
170
|
+
if 'role' not in data:
|
|
171
|
+
raise ValueError('ChatMessage must have a "role" field')
|
|
172
|
+
|
|
173
|
+
role = data['role']
|
|
174
|
+
if role == 'system':
|
|
175
|
+
return ChatMessageSystem.model_validate(data)
|
|
176
|
+
elif role == 'user':
|
|
177
|
+
return ChatMessageUser.model_validate(data)
|
|
178
|
+
elif role == 'assistant':
|
|
179
|
+
return ChatMessageAssistant.model_validate(data)
|
|
180
|
+
elif role == 'tool':
|
|
181
|
+
return ChatMessageTool.model_validate(data)
|
|
182
|
+
else:
|
|
183
|
+
raise ValueError(f'Unknown chat message role: {role}')
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def messages_pretty_str(messages: List[ChatMessage]) -> str:
|
|
187
|
+
"""Pretty print a list of chat messages. Without images or other multi-modal contents."""
|
|
188
|
+
output = []
|
|
189
|
+
for message in messages:
|
|
190
|
+
role = message.role.capitalize()
|
|
191
|
+
content = message.text
|
|
192
|
+
if isinstance(message, ChatMessageTool):
|
|
193
|
+
if message.error:
|
|
194
|
+
content += f'\nError: {message.error.message}'
|
|
195
|
+
if message.function:
|
|
196
|
+
content += f'\nFunction: {message.function}'
|
|
197
|
+
output.append(f'**{role}**: {content}')
|
|
198
|
+
return '\n\n'.join(output)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def messages_to_markdown(messages: List[ChatMessage], max_length: Optional[int] = None) -> str:
|
|
202
|
+
"""Convert a list of chat messages to markdown format.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
messages (List[ChatMessage]): The list of chat messages to convert.
|
|
206
|
+
max_length (Optional[int]): If provided, truncates the base64 string of images to this length.
|
|
207
|
+
"""
|
|
208
|
+
output = []
|
|
209
|
+
for message in messages:
|
|
210
|
+
role = message.role.capitalize()
|
|
211
|
+
|
|
212
|
+
# Start with role header
|
|
213
|
+
content_parts = [f'**{role}**: ']
|
|
214
|
+
|
|
215
|
+
# Handle content based on type
|
|
216
|
+
if isinstance(message.content, str):
|
|
217
|
+
content_parts.append(message.content)
|
|
218
|
+
else:
|
|
219
|
+
for content_item in message.content:
|
|
220
|
+
if isinstance(content_item, ContentText):
|
|
221
|
+
content_parts.append(content_item.text)
|
|
222
|
+
elif isinstance(content_item, ContentImage):
|
|
223
|
+
# Use markdown image syntax
|
|
224
|
+
image_base64 = content_item.image
|
|
225
|
+
if max_length and len(image_base64) > max_length:
|
|
226
|
+
image_base64 = image_base64[:max_length]
|
|
227
|
+
content_parts.append(f'')
|
|
228
|
+
elif isinstance(content_item, ContentAudio):
|
|
229
|
+
audio_base64 = content_item.audio
|
|
230
|
+
if max_length and len(audio_base64) > max_length:
|
|
231
|
+
audio_base64 = audio_base64[:max_length]
|
|
232
|
+
content_parts.append(f"<audio controls src='{audio_base64}'></audio>")
|
|
233
|
+
elif isinstance(content_item, ContentReasoning):
|
|
234
|
+
content_parts.append(f'**Reasoning:** {content_item.reasoning}')
|
|
235
|
+
|
|
236
|
+
# Add tool-specific information
|
|
237
|
+
if isinstance(message, ChatMessageTool):
|
|
238
|
+
if message.error:
|
|
239
|
+
content_parts.append(f'**Error:** {message.error.message}')
|
|
240
|
+
if message.function:
|
|
241
|
+
content_parts.append(f'**Function:** {message.function}')
|
|
242
|
+
elif isinstance(message, ChatMessageAssistant) and message.tool_calls:
|
|
243
|
+
for tool_call in message.tool_calls:
|
|
244
|
+
content_parts.append(f'**Tool Call:** {tool_call.function}')
|
|
245
|
+
|
|
246
|
+
output.append('\n'.join(content_parts))
|
|
247
|
+
|
|
248
|
+
return '\n\n'.join(output)
|