evalscope 0.17.1__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (611) hide show
  1. {evalscope-0.17.1/evalscope.egg-info → evalscope-1.0.1}/PKG-INFO +87 -49
  2. {evalscope-0.17.1 → evalscope-1.0.1}/README.md +86 -48
  3. evalscope-1.0.1/evalscope/__init__.py +8 -0
  4. evalscope-1.0.1/evalscope/api/benchmark/__init__.py +3 -0
  5. evalscope-1.0.1/evalscope/api/benchmark/adapters/__init__.py +5 -0
  6. evalscope-1.0.1/evalscope/api/benchmark/adapters/default_data_adapter.py +684 -0
  7. evalscope-1.0.1/evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  8. evalscope-1.0.1/evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
  9. evalscope-1.0.1/evalscope/api/benchmark/adapters/text2image_adapter.py +156 -0
  10. evalscope-1.0.1/evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
  11. evalscope-1.0.1/evalscope/api/benchmark/benchmark.py +356 -0
  12. evalscope-1.0.1/evalscope/api/benchmark/meta.py +121 -0
  13. evalscope-1.0.1/evalscope/api/dataset/__init__.py +2 -0
  14. evalscope-1.0.1/evalscope/api/dataset/dataset.py +349 -0
  15. evalscope-1.0.1/evalscope/api/dataset/loader.py +262 -0
  16. evalscope-1.0.1/evalscope/api/dataset/utils.py +143 -0
  17. evalscope-1.0.1/evalscope/api/evaluator/__init__.py +3 -0
  18. evalscope-1.0.1/evalscope/api/evaluator/cache.py +378 -0
  19. evalscope-1.0.1/evalscope/api/evaluator/evaluator.py +56 -0
  20. evalscope-1.0.1/evalscope/api/evaluator/state.py +275 -0
  21. evalscope-1.0.1/evalscope/api/filter/__init__.py +1 -0
  22. evalscope-1.0.1/evalscope/api/filter/filter.py +72 -0
  23. evalscope-1.0.1/evalscope/api/messages/__init__.py +12 -0
  24. evalscope-1.0.1/evalscope/api/messages/chat_message.py +243 -0
  25. evalscope-1.0.1/evalscope/api/messages/content.py +102 -0
  26. evalscope-1.0.1/evalscope/api/messages/utils.py +35 -0
  27. evalscope-1.0.1/evalscope/api/metric/__init__.py +2 -0
  28. evalscope-1.0.1/evalscope/api/metric/metric.py +55 -0
  29. evalscope-1.0.1/evalscope/api/metric/scorer.py +113 -0
  30. evalscope-1.0.1/evalscope/api/mixin/__init__.py +1 -0
  31. evalscope-1.0.1/evalscope/api/mixin/llm_judge_mixin.py +168 -0
  32. evalscope-1.0.1/evalscope/api/model/__init__.py +12 -0
  33. evalscope-1.0.1/evalscope/api/model/generate_config.py +155 -0
  34. evalscope-1.0.1/evalscope/api/model/model.py +386 -0
  35. evalscope-1.0.1/evalscope/api/model/model_output.py +285 -0
  36. evalscope-1.0.1/evalscope/api/registry.py +182 -0
  37. evalscope-1.0.1/evalscope/api/tool/__init__.py +3 -0
  38. evalscope-1.0.1/evalscope/api/tool/tool_call.py +101 -0
  39. evalscope-1.0.1/evalscope/api/tool/tool_info.py +173 -0
  40. evalscope-1.0.1/evalscope/api/tool/utils.py +64 -0
  41. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/app.py +3 -0
  42. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/app_ui.py +2 -1
  43. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/multi_model.py +50 -25
  44. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/single_model.py +26 -14
  45. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/data_utils.py +43 -27
  46. evalscope-1.0.1/evalscope/app/utils/env_utils.py +12 -0
  47. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/text_utils.py +14 -14
  48. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/visualization.py +9 -4
  49. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/arguments.py +7 -10
  50. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/api_meta_template.py +2 -1
  51. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/backend_manager.py +6 -5
  52. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
  53. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  54. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  55. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  56. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  57. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
  58. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
  59. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/embedding.py +10 -1
  60. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/llm.py +13 -12
  61. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/__init__.py +0 -2
  62. evalscope-1.0.1/evalscope/benchmarks/aime/aime24_adapter.py +50 -0
  63. evalscope-1.0.1/evalscope/benchmarks/aime/aime25_adapter.py +46 -0
  64. evalscope-1.0.1/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
  65. evalscope-1.0.1/evalscope/benchmarks/arc/arc_adapter.py +46 -0
  66. evalscope-1.0.1/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +148 -0
  67. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/arena_hard/utils.py +37 -1
  68. evalscope-1.0.1/evalscope/benchmarks/bbh/bbh_adapter.py +175 -0
  69. evalscope-1.0.1/evalscope/benchmarks/bfcl/bfcl_adapter.py +254 -0
  70. evalscope-1.0.1/evalscope/benchmarks/bfcl/generation.py +222 -0
  71. evalscope-1.0.1/evalscope/benchmarks/ceval/ceval_adapter.py +169 -0
  72. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
  73. evalscope-1.0.1/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +122 -0
  74. evalscope-1.0.1/evalscope/benchmarks/competition_math/competition_math_adapter.py +73 -0
  75. evalscope-1.0.1/evalscope/benchmarks/data_collection/data_collection_adapter.py +214 -0
  76. evalscope-1.0.1/evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
  77. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/docmath/utils.py +4 -5
  78. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/drop/drop_adapter.py +88 -40
  79. evalscope-1.0.1/evalscope/benchmarks/frames/frames_adapter.py +175 -0
  80. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/general_arena/general_arena_adapter.py +140 -98
  81. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/general_arena/utils.py +23 -27
  82. evalscope-1.0.1/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
  83. evalscope-1.0.1/evalscope/benchmarks/general_qa/general_qa_adapter.py +94 -0
  84. evalscope-1.0.1/evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
  85. evalscope-0.17.1/evalscope/benchmarks/gpqa/chain_of_thought.txt → evalscope-1.0.1/evalscope/benchmarks/gpqa/prompt.py +12 -5
  86. evalscope-1.0.1/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +76 -0
  87. evalscope-1.0.1/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +62 -0
  88. evalscope-1.0.1/evalscope/benchmarks/hle/hle_adapter.py +152 -0
  89. evalscope-1.0.1/evalscope/benchmarks/humaneval/humaneval_adapter.py +124 -0
  90. evalscope-1.0.1/evalscope/benchmarks/ifeval/ifeval_adapter.py +83 -0
  91. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions.py +109 -64
  92. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  93. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  94. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/utils.py +6 -7
  95. evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  96. evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  97. evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  98. evalscope-1.0.1/evalscope/benchmarks/iquiz/iquiz_adapter.py +35 -0
  99. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
  100. evalscope-1.0.1/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +138 -0
  101. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
  102. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
  103. evalscope-1.0.1/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
  104. evalscope-1.0.1/evalscope/benchmarks/math_500/math_500_adapter.py +51 -0
  105. evalscope-1.0.1/evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
  106. evalscope-1.0.1/evalscope/benchmarks/mmlu/mmlu_adapter.py +107 -0
  107. evalscope-1.0.1/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +94 -0
  108. evalscope-1.0.1/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
  109. evalscope-1.0.1/evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  110. evalscope-1.0.1/evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
  111. evalscope-1.0.1/evalscope/benchmarks/musr/musr_adapter.py +43 -0
  112. evalscope-1.0.1/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +392 -0
  113. evalscope-1.0.1/evalscope/benchmarks/process_bench/process_bench_adapter.py +170 -0
  114. evalscope-1.0.1/evalscope/benchmarks/race/race_adapter.py +49 -0
  115. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
  116. evalscope-0.17.1/evalscope/benchmarks/super_gpqa/five_shot_prompt.txt → evalscope-1.0.1/evalscope/benchmarks/super_gpqa/prompt.py +14 -16
  117. evalscope-1.0.1/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
  118. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/super_gpqa/utils.py +2 -1
  119. evalscope-1.0.1/evalscope/benchmarks/tau_bench/generation.py +147 -0
  120. evalscope-1.0.1/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +164 -0
  121. evalscope-1.0.1/evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
  122. evalscope-1.0.1/evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
  123. evalscope-1.0.1/evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
  124. evalscope-1.0.1/evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
  125. evalscope-1.0.1/evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
  126. evalscope-1.0.1/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
  127. evalscope-1.0.1/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +74 -0
  128. evalscope-1.0.1/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +91 -0
  129. {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models → evalscope-1.0.1/evalscope/benchmarks/winogrande}/__init__.py +0 -0
  130. evalscope-1.0.1/evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
  131. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/cli.py +2 -0
  132. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_app.py +7 -1
  133. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_perf.py +7 -1
  134. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_server.py +6 -3
  135. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/collections/__init__.py +2 -10
  136. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/collections/sampler.py +10 -10
  137. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/collections/schema.py +13 -11
  138. evalscope-1.0.1/evalscope/config.py +273 -0
  139. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/constants.py +37 -61
  140. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/evaluator/__init__.py +1 -1
  141. evalscope-1.0.1/evalscope/evaluator/evaluator.py +339 -0
  142. evalscope-1.0.1/evalscope/filters/__init__.py +2 -0
  143. evalscope-1.0.1/evalscope/filters/extraction.py +126 -0
  144. evalscope-1.0.1/evalscope/filters/selection.py +57 -0
  145. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/__init__.py +13 -13
  146. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/llm_judge.py +47 -33
  147. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/math_parser.py +27 -22
  148. evalscope-1.0.1/evalscope/metrics/metric.py +307 -0
  149. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/metrics.py +22 -18
  150. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
  151. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
  152. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
  153. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
  154. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
  155. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
  156. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
  157. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
  158. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
  159. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
  160. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
  161. evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
  162. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
  163. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
  164. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
  165. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
  166. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
  167. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
  168. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
  169. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
  170. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
  171. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
  172. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
  173. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
  174. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
  175. evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
  176. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
  177. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
  178. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
  179. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
  180. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
  181. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
  182. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
  183. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
  184. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
  185. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
  186. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
  187. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
  188. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
  189. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
  190. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
  191. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
  192. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
  193. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
  194. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
  195. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
  196. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
  197. evalscope-1.0.1/evalscope/models/__init__.py +26 -0
  198. evalscope-1.0.1/evalscope/models/image_edit_model.py +125 -0
  199. evalscope-1.0.1/evalscope/models/mockllm.py +65 -0
  200. evalscope-1.0.1/evalscope/models/model_apis.py +67 -0
  201. evalscope-1.0.1/evalscope/models/modelscope.py +455 -0
  202. evalscope-1.0.1/evalscope/models/openai_compatible.py +126 -0
  203. evalscope-1.0.1/evalscope/models/text2image_model.py +124 -0
  204. evalscope-1.0.1/evalscope/models/utils/openai.py +701 -0
  205. evalscope-1.0.1/evalscope/perf/__init__.py +0 -0
  206. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/benchmark.py +4 -1
  207. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/http_client.py +4 -2
  208. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/custom_api.py +5 -4
  209. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/openai_api.py +11 -9
  210. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/custom.py +2 -1
  211. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/flickr8k.py +1 -1
  212. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
  213. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/line_by_line.py +2 -1
  214. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/longalpaca.py +2 -1
  215. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/openqa.py +4 -2
  216. evalscope-1.0.1/evalscope/perf/utils/__init__.py +0 -0
  217. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/benchmark_util.py +15 -10
  218. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/db_util.py +9 -6
  219. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/local_server.py +11 -3
  220. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/rich_display.py +16 -10
  221. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/report/__init__.py +2 -3
  222. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/report/combinator.py +18 -12
  223. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/report/generator.py +51 -35
  224. evalscope-0.17.1/evalscope/report/utils.py → evalscope-1.0.1/evalscope/report/report.py +8 -6
  225. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/run.py +33 -47
  226. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/summarizer.py +1 -1
  227. evalscope-1.0.1/evalscope/third_party/thinkbench/tools/__init__.py +0 -0
  228. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  229. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/__init__.py +21 -2
  230. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/chat_service.py +3 -2
  231. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/deprecation_utils.py +12 -1
  232. evalscope-1.0.1/evalscope/utils/function_utils.py +29 -0
  233. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/import_utils.py +23 -1
  234. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/io_utils.py +142 -6
  235. evalscope-1.0.1/evalscope/utils/json_schema.py +208 -0
  236. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/logger.py +51 -12
  237. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/model_utils.py +11 -7
  238. evalscope-1.0.1/evalscope/utils/multi_choices.py +288 -0
  239. evalscope-1.0.1/evalscope/utils/url_utils.py +65 -0
  240. evalscope-1.0.1/evalscope/version.py +4 -0
  241. {evalscope-0.17.1 → evalscope-1.0.1/evalscope.egg-info}/PKG-INFO +87 -49
  242. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/SOURCES.txt +83 -51
  243. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/requires.txt +21 -13
  244. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/aigc.txt +2 -0
  245. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/dev.txt +1 -1
  246. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/framework.txt +7 -8
  247. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/rag.txt +1 -0
  248. {evalscope-0.17.1 → evalscope-1.0.1}/setup.cfg +14 -5
  249. evalscope-1.0.1/tests/benchmark/test_eval.py +385 -0
  250. evalscope-1.0.1/tests/benchmark/test_image_edit.py +65 -0
  251. {evalscope-0.17.1/tests/aigc → evalscope-1.0.1/tests/benchmark}/test_t2i.py +22 -4
  252. evalscope-1.0.1/tests/benchmark/test_vlm.py +80 -0
  253. {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/test_all.py +85 -47
  254. {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/test_collection.py +20 -8
  255. {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/test_custom.py +22 -15
  256. evalscope-1.0.1/tests/cli/test_reasoning.py +81 -0
  257. evalscope-1.0.1/tests/common.py +73 -0
  258. {evalscope-0.17.1 → evalscope-1.0.1}/tests/perf/test_perf.py +4 -2
  259. evalscope-1.0.1/tests/rag/__init__.py +0 -0
  260. {evalscope-0.17.1 → evalscope-1.0.1}/tests/rag/test_clip_benchmark.py +0 -2
  261. evalscope-0.17.1/evalscope/__init__.py +0 -5
  262. evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/base.py +0 -56
  263. evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +0 -78
  264. evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +0 -58
  265. evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +0 -58
  266. evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +0 -57
  267. evalscope-0.17.1/evalscope/benchmarks/aigc/t2i/tifa_adapter.py +0 -37
  268. evalscope-0.17.1/evalscope/benchmarks/aime/aime24_adapter.py +0 -52
  269. evalscope-0.17.1/evalscope/benchmarks/aime/aime25_adapter.py +0 -52
  270. evalscope-0.17.1/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +0 -107
  271. evalscope-0.17.1/evalscope/benchmarks/arc/ai2_arc.py +0 -151
  272. evalscope-0.17.1/evalscope/benchmarks/arc/arc_adapter.py +0 -159
  273. evalscope-0.17.1/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +0 -122
  274. evalscope-0.17.1/evalscope/benchmarks/bbh/bbh_adapter.py +0 -247
  275. evalscope-0.17.1/evalscope/benchmarks/benchmark.py +0 -81
  276. evalscope-0.17.1/evalscope/benchmarks/bfcl/bfcl_adapter.py +0 -237
  277. evalscope-0.17.1/evalscope/benchmarks/ceval/ceval_adapter.py +0 -238
  278. evalscope-0.17.1/evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  279. evalscope-0.17.1/evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  280. evalscope-0.17.1/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -213
  281. evalscope-0.17.1/evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  282. evalscope-0.17.1/evalscope/benchmarks/competition_math/competition_math.py +0 -79
  283. evalscope-0.17.1/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -125
  284. evalscope-0.17.1/evalscope/benchmarks/data_adapter.py +0 -528
  285. evalscope-0.17.1/evalscope/benchmarks/data_collection/data_collection_adapter.py +0 -72
  286. evalscope-0.17.1/evalscope/benchmarks/docmath/docmath_adapter.py +0 -85
  287. evalscope-0.17.1/evalscope/benchmarks/filters.py +0 -59
  288. evalscope-0.17.1/evalscope/benchmarks/frames/frames_adapter.py +0 -91
  289. evalscope-0.17.1/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +0 -119
  290. evalscope-0.17.1/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -155
  291. evalscope-0.17.1/evalscope/benchmarks/gpqa/gpqa_adapter.py +0 -129
  292. evalscope-0.17.1/evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  293. evalscope-0.17.1/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -156
  294. evalscope-0.17.1/evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  295. evalscope-0.17.1/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -151
  296. evalscope-0.17.1/evalscope/benchmarks/hle/hle_adapter.py +0 -118
  297. evalscope-0.17.1/evalscope/benchmarks/humaneval/humaneval.py +0 -79
  298. evalscope-0.17.1/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -93
  299. evalscope-0.17.1/evalscope/benchmarks/ifeval/ifeval_adapter.py +0 -54
  300. evalscope-0.17.1/evalscope/benchmarks/iquiz/iquiz_adapter.py +0 -70
  301. evalscope-0.17.1/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +0 -88
  302. evalscope-0.17.1/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +0 -82
  303. evalscope-0.17.1/evalscope/benchmarks/math_500/math_500_adapter.py +0 -58
  304. evalscope-0.17.1/evalscope/benchmarks/mmlu/mmlu.py +0 -160
  305. evalscope-0.17.1/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -280
  306. evalscope-0.17.1/evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  307. evalscope-0.17.1/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +0 -113
  308. evalscope-0.17.1/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +0 -185
  309. evalscope-0.17.1/evalscope/benchmarks/musr/musr_adapter.py +0 -74
  310. evalscope-0.17.1/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +0 -348
  311. evalscope-0.17.1/evalscope/benchmarks/process_bench/critique_template.txt +0 -13
  312. evalscope-0.17.1/evalscope/benchmarks/process_bench/process_bench_adapter.py +0 -102
  313. evalscope-0.17.1/evalscope/benchmarks/race/race.py +0 -104
  314. evalscope-0.17.1/evalscope/benchmarks/race/race_adapter.py +0 -135
  315. evalscope-0.17.1/evalscope/benchmarks/race/samples.jsonl +0 -5
  316. evalscope-0.17.1/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +0 -209
  317. evalscope-0.17.1/evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
  318. evalscope-0.17.1/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +0 -110
  319. evalscope-0.17.1/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +0 -81
  320. evalscope-0.17.1/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  321. evalscope-0.17.1/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -142
  322. evalscope-0.17.1/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  323. evalscope-0.17.1/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -287
  324. evalscope-0.17.1/evalscope/benchmarks/utils.py +0 -60
  325. evalscope-0.17.1/evalscope/benchmarks/winogrande/winogrande_adapter.py +0 -60
  326. evalscope-0.17.1/evalscope/collections/evaluator.py +0 -375
  327. evalscope-0.17.1/evalscope/config.py +0 -173
  328. evalscope-0.17.1/evalscope/evaluator/evaluator.py +0 -483
  329. evalscope-0.17.1/evalscope/metrics/completion_parsers.py +0 -227
  330. evalscope-0.17.1/evalscope/metrics/named_metrics.py +0 -55
  331. evalscope-0.17.1/evalscope/metrics/t2v_metrics/__init__.py +0 -52
  332. evalscope-0.17.1/evalscope/models/__init__.py +0 -49
  333. evalscope-0.17.1/evalscope/models/adapters/__init__.py +0 -14
  334. evalscope-0.17.1/evalscope/models/adapters/base_adapter.py +0 -84
  335. evalscope-0.17.1/evalscope/models/adapters/bfcl_adapter.py +0 -246
  336. evalscope-0.17.1/evalscope/models/adapters/chat_adapter.py +0 -207
  337. evalscope-0.17.1/evalscope/models/adapters/choice_adapter.py +0 -222
  338. evalscope-0.17.1/evalscope/models/adapters/custom_adapter.py +0 -71
  339. evalscope-0.17.1/evalscope/models/adapters/server_adapter.py +0 -236
  340. evalscope-0.17.1/evalscope/models/adapters/t2i_adapter.py +0 -79
  341. evalscope-0.17.1/evalscope/models/adapters/tau_bench_adapter.py +0 -189
  342. evalscope-0.17.1/evalscope/models/custom/__init__.py +0 -4
  343. evalscope-0.17.1/evalscope/models/custom/custom_model.py +0 -50
  344. evalscope-0.17.1/evalscope/models/custom/dummy_model.py +0 -99
  345. evalscope-0.17.1/evalscope/models/local_model.py +0 -128
  346. evalscope-0.17.1/evalscope/models/register.py +0 -41
  347. evalscope-0.17.1/evalscope/version.py +0 -4
  348. evalscope-0.17.1/tests/cli/test_run.py +0 -489
  349. {evalscope-0.17.1 → evalscope-1.0.1}/LICENSE +0 -0
  350. {evalscope-0.17.1 → evalscope-1.0.1}/MANIFEST.in +0 -0
  351. {evalscope-0.17.1/evalscope/backend → evalscope-1.0.1/evalscope/api}/__init__.py +0 -0
  352. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/__init__.py +0 -0
  353. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/arguments.py +0 -0
  354. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/constants.py +0 -0
  355. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/__init__.py +0 -0
  356. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/sidebar.py +0 -0
  357. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/ui/visualization.py +0 -0
  358. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/app/utils/localization.py +0 -0
  359. {evalscope-0.17.1/evalscope/backend/rag_eval/clip_benchmark/tasks → evalscope-1.0.1/evalscope/backend}/__init__.py +0 -0
  360. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/base.py +0 -0
  361. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/__init__.py +0 -0
  362. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  363. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
  364. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
  365. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/__init__.py +0 -0
  366. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/backend_manager.py +0 -0
  367. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
  368. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
  369. {evalscope-0.17.1/evalscope/backend/rag_eval/utils → evalscope-1.0.1/evalscope/backend/rag_eval/clip_benchmark/tasks}/__init__.py +0 -0
  370. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
  371. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
  372. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
  373. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
  374. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
  375. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
  376. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
  377. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
  378. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
  379. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
  380. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
  381. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
  382. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
  383. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
  384. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
  385. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
  386. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
  387. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
  388. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
  389. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
  390. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
  391. {evalscope-0.17.1/evalscope/benchmarks/aigc → evalscope-1.0.1/evalscope/backend/rag_eval/utils}/__init__.py +0 -0
  392. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/clip.py +0 -0
  393. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/tools.py +0 -0
  394. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  395. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
  396. {evalscope-0.17.1/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/aime}/__init__.py +0 -0
  397. {evalscope-0.17.1/evalscope/benchmarks/aime → evalscope-1.0.1/evalscope/benchmarks/alpaca_eval}/__init__.py +0 -0
  398. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/arc/__init__.py +0 -0
  399. {evalscope-0.17.1/evalscope/benchmarks/alpaca_eval → evalscope-1.0.1/evalscope/benchmarks/arena_hard}/__init__.py +0 -0
  400. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/__init__.py +0 -0
  401. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  402. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  403. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  404. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  405. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  406. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  407. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  408. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  409. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  410. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  411. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  412. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  413. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  414. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  415. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  416. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  417. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  418. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  419. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  420. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  421. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  422. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  423. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  424. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  425. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  426. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  427. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  428. {evalscope-0.17.1/evalscope/benchmarks/arena_hard → evalscope-1.0.1/evalscope/benchmarks/bfcl}/__init__.py +0 -0
  429. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/ceval/__init__.py +0 -0
  430. {evalscope-0.17.1/evalscope/benchmarks/bfcl → evalscope-1.0.1/evalscope/benchmarks/chinese_simple_qa}/__init__.py +0 -0
  431. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  432. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  433. {evalscope-0.17.1/evalscope/benchmarks/chinese_simple_qa → evalscope-1.0.1/evalscope/benchmarks/data_collection}/__init__.py +0 -0
  434. {evalscope-0.17.1/evalscope/benchmarks/data_collection → evalscope-1.0.1/evalscope/benchmarks/docmath}/__init__.py +0 -0
  435. {evalscope-0.17.1/evalscope/benchmarks/docmath → evalscope-1.0.1/evalscope/benchmarks/drop}/__init__.py +0 -0
  436. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/drop/utils.py +0 -0
  437. {evalscope-0.17.1/evalscope/benchmarks/drop → evalscope-1.0.1/evalscope/benchmarks/frames}/__init__.py +0 -0
  438. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/frames/utils.py +0 -0
  439. {evalscope-0.17.1/evalscope/benchmarks/frames → evalscope-1.0.1/evalscope/benchmarks/general_arena}/__init__.py +0 -0
  440. {evalscope-0.17.1/evalscope/benchmarks/general_arena → evalscope-1.0.1/evalscope/benchmarks/general_mcq}/__init__.py +0 -0
  441. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  442. {evalscope-0.17.1/evalscope/benchmarks/general_mcq → evalscope-1.0.1/evalscope/benchmarks/gpqa}/__init__.py +0 -0
  443. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  444. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  445. {evalscope-0.17.1/evalscope/benchmarks/gpqa → evalscope-1.0.1/evalscope/benchmarks/hle}/__init__.py +0 -0
  446. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  447. {evalscope-0.17.1/evalscope/benchmarks/hle → evalscope-1.0.1/evalscope/benchmarks/ifeval}/__init__.py +0 -0
  448. {evalscope-0.17.1/evalscope/benchmarks/ifeval → evalscope-1.0.1/evalscope/benchmarks/image_edit}/__init__.py +0 -0
  449. {evalscope-0.17.1/evalscope/benchmarks/iquiz → evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit}/__init__.py +0 -0
  450. {evalscope-0.17.1/evalscope/benchmarks/live_code_bench → evalscope-1.0.1/evalscope/benchmarks/iquiz}/__init__.py +0 -0
  451. {evalscope-0.17.1/evalscope/benchmarks/maritime_bench → evalscope-1.0.1/evalscope/benchmarks/live_code_bench}/__init__.py +0 -0
  452. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/extract_utils.py +0 -0
  453. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/pass_k_utils.py +0 -0
  454. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/prompts.py +0 -0
  455. {evalscope-0.17.1/evalscope/benchmarks/math_500 → evalscope-1.0.1/evalscope/benchmarks/maritime_bench}/__init__.py +0 -0
  456. {evalscope-0.17.1/evalscope/benchmarks/mmlu_pro → evalscope-1.0.1/evalscope/benchmarks/math_500}/__init__.py +0 -0
  457. {evalscope-0.17.1/evalscope/benchmarks/mmlu_redux → evalscope-1.0.1/evalscope/benchmarks/math_vista}/__init__.py +0 -0
  458. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  459. {evalscope-0.17.1/evalscope/benchmarks/musr → evalscope-1.0.1/evalscope/benchmarks/mmlu_pro}/__init__.py +0 -0
  460. {evalscope-0.17.1/evalscope/benchmarks/needle_haystack → evalscope-1.0.1/evalscope/benchmarks/mmlu_redux}/__init__.py +0 -0
  461. {evalscope-0.17.1/evalscope/benchmarks/process_bench → evalscope-1.0.1/evalscope/benchmarks/mmmu}/__init__.py +0 -0
  462. {evalscope-0.17.1/evalscope/benchmarks/simple_qa → evalscope-1.0.1/evalscope/benchmarks/mmmu_pro}/__init__.py +0 -0
  463. {evalscope-0.17.1/evalscope/benchmarks/super_gpqa → evalscope-1.0.1/evalscope/benchmarks/musr}/__init__.py +0 -0
  464. {evalscope-0.17.1/evalscope/benchmarks/tau_bench → evalscope-1.0.1/evalscope/benchmarks/needle_haystack}/__init__.py +0 -0
  465. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/needle_haystack/utils.py +0 -0
  466. {evalscope-0.17.1/evalscope/benchmarks/tool_bench → evalscope-1.0.1/evalscope/benchmarks/process_bench}/__init__.py +0 -0
  467. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/race/__init__.py +0 -0
  468. {evalscope-0.17.1/evalscope/benchmarks/winogrande → evalscope-1.0.1/evalscope/benchmarks/simple_qa}/__init__.py +0 -0
  469. {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models → evalscope-1.0.1/evalscope/benchmarks/super_gpqa}/__init__.py +0 -0
  470. {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model → evalscope-1.0.1/evalscope/benchmarks/tau_bench}/__init__.py +0 -0
  471. {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward → evalscope-1.0.1/evalscope/benchmarks/text2image}/__init__.py +0 -0
  472. {evalscope-0.17.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5 → evalscope-1.0.1/evalscope/benchmarks/tool_bench}/__init__.py +0 -0
  473. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/tool_bench/utils.py +0 -0
  474. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  475. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
  476. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  477. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/__init__.py +0 -0
  478. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/base.py +0 -0
  479. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/cli/start_eval.py +0 -0
  480. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  481. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
  482. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/rouge_metric.py +0 -0
  483. {evalscope-0.17.1/evalscope/perf → evalscope-1.0.1/evalscope/metrics/t2v_metrics}/__init__.py +0 -0
  484. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/clipscore.py +0 -0
  485. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/constants.py +0 -0
  486. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/itmscore.py +0 -0
  487. {evalscope-0.17.1/evalscope/perf/utils → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models}/__init__.py +0 -0
  488. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +0 -0
  489. {evalscope-0.17.1/evalscope/third_party/thinkbench/tools → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model}/__init__.py +0 -0
  490. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +0 -0
  491. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +0 -0
  492. {evalscope-0.17.1/tests/rag → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward}/__init__.py +0 -0
  493. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/model.py +0 -0
  494. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/utils.py +0 -0
  495. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +0 -0
  496. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +0 -0
  497. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +0 -0
  498. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +0 -0
  499. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +0 -0
  500. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  501. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  502. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  503. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +0 -0
  504. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +0 -0
  505. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +0 -0
  506. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +0 -0
  507. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +0 -0
  508. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +0 -0
  509. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +0 -0
  510. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +0 -0
  511. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +0 -0
  512. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +0 -0
  513. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +0 -0
  514. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +0 -0
  515. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +0 -0
  516. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +0 -0
  517. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +0 -0
  518. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +0 -0
  519. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +0 -0
  520. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +0 -0
  521. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +0 -0
  522. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +0 -0
  523. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +0 -0
  524. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +0 -0
  525. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +0 -0
  526. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +0 -0
  527. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +0 -0
  528. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +0 -0
  529. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +0 -0
  530. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +0 -0
  531. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +0 -0
  532. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +0 -0
  533. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +0 -0
  534. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +0 -0
  535. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/score.py +0 -0
  536. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/vqascore.py +0 -0
  537. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/arguments.py +0 -0
  538. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/main.py +0 -0
  539. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/__init__.py +0 -0
  540. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/__init__.py +0 -0
  541. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/base.py +0 -0
  542. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
  543. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/api/default_api.py +0 -0
  544. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/__init__.py +0 -0
  545. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/base.py +0 -0
  546. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/random_dataset.py +0 -0
  547. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/random_vl_dataset.py +0 -0
  548. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
  549. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/plugin/registry.py +0 -0
  550. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/analysis_result.py +0 -0
  551. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/handler.py +0 -0
  552. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/perf/utils/log_utils.py +0 -0
  553. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/__init__.py +0 -0
  554. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/README.md +0 -0
  555. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/__init__.py +0 -0
  556. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/default_task.json +0 -0
  557. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
  558. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/eval.py +0 -0
  559. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/infer.py +0 -0
  560. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
  561. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
  562. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
  563. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
  564. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
  565. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
  566. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
  567. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
  568. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/openai_api.py +0 -0
  569. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/longbench_write/utils.py +0 -0
  570. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/__init__.py +0 -0
  571. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/eval.py +0 -0
  572. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/infer.py +0 -0
  573. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/resources/critique_template.txt +0 -0
  574. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/resources/reformat_template.txt +0 -0
  575. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/tools/llm.py +0 -0
  576. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/thinkbench/tools/utils.py +0 -0
  577. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/README.md +0 -0
  578. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  579. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/config_default.json +0 -0
  580. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
  581. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/eval.py +0 -0
  582. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/infer.py +0 -0
  583. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
  584. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
  585. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  586. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope/utils/argument_utils.py +0 -0
  587. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/dependency_links.txt +0 -0
  588. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/entry_points.txt +0 -0
  589. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/not-zip-safe +0 -0
  590. {evalscope-0.17.1 → evalscope-1.0.1}/evalscope.egg-info/top_level.txt +0 -0
  591. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/app.txt +0 -0
  592. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/docs.txt +0 -0
  593. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/opencompass.txt +0 -0
  594. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/perf.txt +0 -0
  595. {evalscope-0.17.1 → evalscope-1.0.1}/requirements/vlmeval.txt +0 -0
  596. {evalscope-0.17.1 → evalscope-1.0.1}/requirements.txt +0 -0
  597. {evalscope-0.17.1 → evalscope-1.0.1}/setup.py +0 -0
  598. {evalscope-0.17.1 → evalscope-1.0.1}/tests/__init__.py +0 -0
  599. {evalscope-0.17.1/tests/aigc → evalscope-1.0.1/tests/benchmark}/__init__.py +0 -0
  600. {evalscope-0.17.1 → evalscope-1.0.1}/tests/cli/__init__.py +0 -0
  601. {evalscope-0.17.1 → evalscope-1.0.1}/tests/perf/__init__.py +0 -0
  602. {evalscope-0.17.1 → evalscope-1.0.1}/tests/rag/test_mteb.py +0 -0
  603. {evalscope-0.17.1 → evalscope-1.0.1}/tests/rag/test_ragas.py +0 -0
  604. {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/__init__.py +0 -0
  605. {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/test_run_swift_eval.py +0 -0
  606. {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/test_run_swift_vlm_eval.py +0 -0
  607. {evalscope-0.17.1 → evalscope-1.0.1}/tests/swift/test_run_swift_vlm_jugde_eval.py +0 -0
  608. {evalscope-0.17.1 → evalscope-1.0.1}/tests/test_run_all.py +0 -0
  609. {evalscope-0.17.1 → evalscope-1.0.1}/tests/utils.py +0 -0
  610. {evalscope-0.17.1 → evalscope-1.0.1}/tests/vlm/__init__.py +0 -0
  611. {evalscope-0.17.1 → evalscope-1.0.1}/tests/vlm/test_vlmeval.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.17.1
3
+ Version: 1.0.1
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -57,9 +57,9 @@ License-File: LICENSE
57
57
  - [📝 Introduction](#-introduction)
58
58
  - [☎ User Groups](#-user-groups)
59
59
  - [🎉 News](#-news)
60
- - [🛠️ Installation](#️-installation)
61
- - [Method 1: Install Using pip](#method-1-install-using-pip)
62
- - [Method 2: Install from Source](#method-2-install-from-source)
60
+ - [🛠️ Environment Setup](#️-environment-setup)
61
+ - [Method 1. Install via pip](#method-1-install-via-pip)
62
+ - [Method 2. Install from source](#method-2-install-from-source)
63
63
  - [🚀 Quick Start](#-quick-start)
64
64
  - [Method 1. Using Command Line](#method-1-using-command-line)
65
65
  - [Method 2. Using Python Code](#method-2-using-python-code)
@@ -140,6 +140,15 @@ Please scan the QR code below to join our community groups:
140
140
 
141
141
 
142
142
  ## 🎉 News
143
+
144
+ > [!IMPORTANT]
145
+ > **Version 1.0 Refactoring**
146
+ >
147
+ > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
148
+
149
+ - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
150
+ - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
151
+ - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
143
152
  - 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
144
153
  - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
145
154
  - 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
@@ -147,16 +156,16 @@ Please scan the QR code below to join our community groups:
147
156
  - 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
148
157
  - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
149
158
  - 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
150
- - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
159
+ - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
151
160
  - 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
152
161
  - 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
162
+ <details><summary>More</summary>
163
+
153
164
  - 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
154
165
  - 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
155
166
  - 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
156
167
  - 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
157
- <details><summary>More</summary>
158
-
159
- - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
168
+ - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
160
169
  - 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
161
170
  - 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
162
171
  - 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
@@ -188,58 +197,87 @@ Please scan the QR code below to join our community groups:
188
197
 
189
198
  </details>
190
199
 
191
- ## 🛠️ Installation
192
- ### Method 1: Install Using pip
193
- We recommend using conda to manage your environment and installing dependencies with pip:
200
+ ## 🛠️ Environment Setup
201
+
202
+ ### Method 1. Install via pip
203
+
204
+ We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
194
205
 
195
206
  1. Create a conda environment (optional)
207
+ ```shell
208
+ # Python 3.10 is recommended
209
+ conda create -n evalscope python=3.10
210
+
211
+ # Activate the conda environment
212
+ conda activate evalscope
213
+ ```
214
+ 2. Install dependencies via pip
215
+ ```shell
216
+ pip install evalscope
217
+ ```
218
+ 3. Install additional dependencies (optional)
219
+ - To use model service inference benchmarking features, install the perf dependency:
196
220
  ```shell
197
- # It is recommended to use Python 3.10
198
- conda create -n evalscope python=3.10
199
- # Activate the conda environment
200
- conda activate evalscope
221
+ pip install 'evalscope[perf]'
201
222
  ```
202
-
203
- 2. Install dependencies using pip
223
+ - To use visualization features, install the app dependency:
204
224
  ```shell
205
- pip install evalscope # Install Native backend (default)
206
- # Additional options
207
- pip install 'evalscope[opencompass]' # Install OpenCompass backend
208
- pip install 'evalscope[vlmeval]' # Install VLMEvalKit backend
209
- pip install 'evalscope[rag]' # Install RAGEval backend
210
- pip install 'evalscope[perf]' # Install dependencies for the model performance testing module
211
- pip install 'evalscope[app]' # Install dependencies for visualization
212
- pip install 'evalscope[all]' # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
225
+ pip install 'evalscope[app]'
226
+ ```
227
+ - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
228
+ ```shell
229
+ pip install 'evalscope[opencompass]'
230
+ pip install 'evalscope[vlmeval]'
231
+ pip install 'evalscope[rag]'
232
+ ```
233
+ - To install all dependencies:
234
+ ```shell
235
+ pip install 'evalscope[all]'
213
236
  ```
214
237
 
215
- > [!WARNING]
216
- > As the project has been renamed to `evalscope`, for versions `v0.4.3` or earlier, you can install using the following command:
238
+ > [!NOTE]
239
+ > The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
217
240
  > ```shell
218
- > pip install llmuses<=0.4.3
241
+ > pip install llmuses<=0.4.3
219
242
  > ```
220
- > To import relevant dependencies using `llmuses`:
221
- > ``` python
243
+ > Then, import related dependencies using `llmuses`:
244
+ > ```python
222
245
  > from llmuses import ...
223
246
  > ```
224
247
 
225
- ### Method 2: Install from Source
226
- 1. Download the source code
227
- ```shell
228
- git clone https://github.com/modelscope/evalscope.git
229
- ```
248
+ ### Method 2. Install from source
249
+
250
+ Installing from source allows you to use the latest code and makes it easier for further development and debugging.
230
251
 
252
+ 1. Clone the source code
253
+ ```shell
254
+ git clone https://github.com/modelscope/evalscope.git
255
+ ```
231
256
  2. Install dependencies
232
- ```shell
233
- cd evalscope/
234
- pip install -e . # Install Native backend
235
- # Additional options
236
- pip install -e '.[opencompass]' # Install OpenCompass backend
237
- pip install -e '.[vlmeval]' # Install VLMEvalKit backend
238
- pip install -e '.[rag]' # Install RAGEval backend
239
- pip install -e '.[perf]' # Install Perf dependencies
240
- pip install -e '.[app]' # Install visualization dependencies
241
- pip install -e '.[all]' # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
242
- ```
257
+ ```shell
258
+ cd evalscope/
259
+
260
+ pip install -e .
261
+ ```
262
+ 3. Install additional dependencies
263
+ - To use model service inference benchmarking features, install the perf dependency:
264
+ ```shell
265
+ pip install '.[perf]'
266
+ ```
267
+ - To use visualization features, install the app dependency:
268
+ ```shell
269
+ pip install '.[app]'
270
+ ```
271
+ - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
272
+ ```shell
273
+ pip install '.[opencompass]'
274
+ pip install '.[vlmeval]'
275
+ pip install '.[rag]'
276
+ ```
277
+ - To install all dependencies:
278
+ ```shell
279
+ pip install '.[all]'
280
+ ```
243
281
 
244
282
 
245
283
  ## 🚀 Quick Start
@@ -326,7 +364,7 @@ run_task(task_cfg="config.json")
326
364
 
327
365
  ### Basic Parameter
328
366
  - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
329
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
367
+ - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
330
368
  - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
331
369
 
332
370
  ### Output Results
@@ -415,7 +453,7 @@ For more customized evaluations, such as customizing model parameters or dataset
415
453
  evalscope eval \
416
454
  --model Qwen/Qwen3-0.6B \
417
455
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
418
- --generation-config '{"do_sample":true,"temperature":0.6,"max_new_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
456
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
419
457
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
420
458
  --datasets gsm8k \
421
459
  --limit 10
@@ -429,7 +467,7 @@ evalscope eval \
429
467
  - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
430
468
  - `do_sample`: Whether to use sampling
431
469
  - `temperature`: Generation temperature
432
- - `max_new_tokens`: Maximum length of generated tokens
470
+ - `max_tokens`: Maximum length of generated tokens
433
471
  - `chat_template_kwargs`: Model inference template parameters
434
472
  - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
435
473
  - `few_shot_num`: Number of few-shot examples
@@ -28,9 +28,9 @@
28
28
  - [📝 Introduction](#-introduction)
29
29
  - [☎ User Groups](#-user-groups)
30
30
  - [🎉 News](#-news)
31
- - [🛠️ Installation](#️-installation)
32
- - [Method 1: Install Using pip](#method-1-install-using-pip)
33
- - [Method 2: Install from Source](#method-2-install-from-source)
31
+ - [🛠️ Environment Setup](#️-environment-setup)
32
+ - [Method 1. Install via pip](#method-1-install-via-pip)
33
+ - [Method 2. Install from source](#method-2-install-from-source)
34
34
  - [🚀 Quick Start](#-quick-start)
35
35
  - [Method 1. Using Command Line](#method-1-using-command-line)
36
36
  - [Method 2. Using Python Code](#method-2-using-python-code)
@@ -111,6 +111,15 @@ Please scan the QR code below to join our community groups:
111
111
 
112
112
 
113
113
  ## 🎉 News
114
+
115
+ > [!IMPORTANT]
116
+ > **Version 1.0 Refactoring**
117
+ >
118
+ > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
119
+
120
+ - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
121
+ - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
122
+ - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
114
123
  - 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
115
124
  - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
116
125
  - 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
@@ -118,16 +127,16 @@ Please scan the QR code below to join our community groups:
118
127
  - 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
119
128
  - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
120
129
  - 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
121
- - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
130
+ - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
122
131
  - 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
123
132
  - 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
133
+ <details><summary>More</summary>
134
+
124
135
  - 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
125
136
  - 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
126
137
  - 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
127
138
  - 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
128
- <details><summary>More</summary>
129
-
130
- - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
139
+ - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
131
140
  - 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
132
141
  - 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
133
142
  - 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
@@ -159,58 +168,87 @@ Please scan the QR code below to join our community groups:
159
168
 
160
169
  </details>
161
170
 
162
- ## 🛠️ Installation
163
- ### Method 1: Install Using pip
164
- We recommend using conda to manage your environment and installing dependencies with pip:
171
+ ## 🛠️ Environment Setup
172
+
173
+ ### Method 1. Install via pip
174
+
175
+ We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
165
176
 
166
177
  1. Create a conda environment (optional)
178
+ ```shell
179
+ # Python 3.10 is recommended
180
+ conda create -n evalscope python=3.10
181
+
182
+ # Activate the conda environment
183
+ conda activate evalscope
184
+ ```
185
+ 2. Install dependencies via pip
186
+ ```shell
187
+ pip install evalscope
188
+ ```
189
+ 3. Install additional dependencies (optional)
190
+ - To use model service inference benchmarking features, install the perf dependency:
167
191
  ```shell
168
- # It is recommended to use Python 3.10
169
- conda create -n evalscope python=3.10
170
- # Activate the conda environment
171
- conda activate evalscope
192
+ pip install 'evalscope[perf]'
172
193
  ```
173
-
174
- 2. Install dependencies using pip
194
+ - To use visualization features, install the app dependency:
175
195
  ```shell
176
- pip install evalscope # Install Native backend (default)
177
- # Additional options
178
- pip install 'evalscope[opencompass]' # Install OpenCompass backend
179
- pip install 'evalscope[vlmeval]' # Install VLMEvalKit backend
180
- pip install 'evalscope[rag]' # Install RAGEval backend
181
- pip install 'evalscope[perf]' # Install dependencies for the model performance testing module
182
- pip install 'evalscope[app]' # Install dependencies for visualization
183
- pip install 'evalscope[all]' # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
196
+ pip install 'evalscope[app]'
197
+ ```
198
+ - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
199
+ ```shell
200
+ pip install 'evalscope[opencompass]'
201
+ pip install 'evalscope[vlmeval]'
202
+ pip install 'evalscope[rag]'
203
+ ```
204
+ - To install all dependencies:
205
+ ```shell
206
+ pip install 'evalscope[all]'
184
207
  ```
185
208
 
186
- > [!WARNING]
187
- > As the project has been renamed to `evalscope`, for versions `v0.4.3` or earlier, you can install using the following command:
209
+ > [!NOTE]
210
+ > The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
188
211
  > ```shell
189
- > pip install llmuses<=0.4.3
212
+ > pip install llmuses<=0.4.3
190
213
  > ```
191
- > To import relevant dependencies using `llmuses`:
192
- > ``` python
214
+ > Then, import related dependencies using `llmuses`:
215
+ > ```python
193
216
  > from llmuses import ...
194
217
  > ```
195
218
 
196
- ### Method 2: Install from Source
197
- 1. Download the source code
198
- ```shell
199
- git clone https://github.com/modelscope/evalscope.git
200
- ```
219
+ ### Method 2. Install from source
220
+
221
+ Installing from source allows you to use the latest code and makes it easier for further development and debugging.
201
222
 
223
+ 1. Clone the source code
224
+ ```shell
225
+ git clone https://github.com/modelscope/evalscope.git
226
+ ```
202
227
  2. Install dependencies
203
- ```shell
204
- cd evalscope/
205
- pip install -e . # Install Native backend
206
- # Additional options
207
- pip install -e '.[opencompass]' # Install OpenCompass backend
208
- pip install -e '.[vlmeval]' # Install VLMEvalKit backend
209
- pip install -e '.[rag]' # Install RAGEval backend
210
- pip install -e '.[perf]' # Install Perf dependencies
211
- pip install -e '.[app]' # Install visualization dependencies
212
- pip install -e '.[all]' # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
213
- ```
228
+ ```shell
229
+ cd evalscope/
230
+
231
+ pip install -e .
232
+ ```
233
+ 3. Install additional dependencies
234
+ - To use model service inference benchmarking features, install the perf dependency:
235
+ ```shell
236
+ pip install '.[perf]'
237
+ ```
238
+ - To use visualization features, install the app dependency:
239
+ ```shell
240
+ pip install '.[app]'
241
+ ```
242
+ - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
243
+ ```shell
244
+ pip install '.[opencompass]'
245
+ pip install '.[vlmeval]'
246
+ pip install '.[rag]'
247
+ ```
248
+ - To install all dependencies:
249
+ ```shell
250
+ pip install '.[all]'
251
+ ```
214
252
 
215
253
 
216
254
  ## 🚀 Quick Start
@@ -297,7 +335,7 @@ run_task(task_cfg="config.json")
297
335
 
298
336
  ### Basic Parameter
299
337
  - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
300
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
338
+ - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
301
339
  - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
302
340
 
303
341
  ### Output Results
@@ -386,7 +424,7 @@ For more customized evaluations, such as customizing model parameters or dataset
386
424
  evalscope eval \
387
425
  --model Qwen/Qwen3-0.6B \
388
426
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
389
- --generation-config '{"do_sample":true,"temperature":0.6,"max_new_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
427
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
390
428
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
391
429
  --datasets gsm8k \
392
430
  --limit 10
@@ -400,7 +438,7 @@ evalscope eval \
400
438
  - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
401
439
  - `do_sample`: Whether to use sampling
402
440
  - `temperature`: Generation temperature
403
- - `max_new_tokens`: Maximum length of generated tokens
441
+ - `max_tokens`: Maximum length of generated tokens
404
442
  - `chat_template_kwargs`: Model inference template parameters
405
443
  - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
406
444
  - `few_shot_num`: Number of few-shot examples
@@ -0,0 +1,8 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+ from evalscope.benchmarks import * # registered benchmarks
3
+ from evalscope.config import TaskConfig
4
+ from evalscope.filters import extraction, selection # registered filters
5
+ from evalscope.metrics import metric # registered metrics
6
+ from evalscope.models import model_apis # need for register model apis
7
+ from evalscope.run import run_task
8
+ from .version import __release_datetime__, __version__
@@ -0,0 +1,3 @@
1
+ from .adapters import DefaultDataAdapter, ImageEditAdapter, MultiChoiceAdapter, Text2ImageAdapter, VisionLanguageAdapter
2
+ from .benchmark import DataAdapter
3
+ from .meta import BenchmarkMeta
@@ -0,0 +1,5 @@
1
+ from .default_data_adapter import DefaultDataAdapter
2
+ from .image_edit_adapter import ImageEditAdapter
3
+ from .multi_choice_adapter import MultiChoiceAdapter
4
+ from .text2image_adapter import Text2ImageAdapter
5
+ from .vision_language_adapter import VisionLanguageAdapter