evalscope 1.0.0__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (529) hide show
  1. {evalscope-1.0.0/evalscope.egg-info → evalscope-1.0.1}/PKG-INFO +9 -7
  2. {evalscope-1.0.0 → evalscope-1.0.1}/README.md +8 -6
  3. evalscope-1.0.1/evalscope/api/benchmark/__init__.py +3 -0
  4. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/benchmark/adapters/__init__.py +2 -0
  5. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/benchmark/adapters/default_data_adapter.py +1 -0
  6. evalscope-1.0.1/evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  7. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/benchmark/adapters/text2image_adapter.py +7 -6
  8. evalscope-1.0.1/evalscope/api/benchmark/adapters/vision_language_adapter.py +6 -0
  9. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/benchmark/benchmark.py +35 -0
  10. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/benchmark/meta.py +6 -0
  11. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/dataset/dataset.py +6 -6
  12. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/dataset/loader.py +2 -1
  13. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/evaluator/cache.py +24 -1
  14. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/evaluator/state.py +12 -1
  15. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/messages/__init__.py +1 -0
  16. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/messages/chat_message.py +47 -2
  17. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/metric/scorer.py +15 -7
  18. evalscope-1.0.1/evalscope/api/mixin/__init__.py +1 -0
  19. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/model/generate_config.py +1 -3
  20. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/model/model.py +4 -1
  21. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/app.py +3 -0
  22. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/ui/single_model.py +3 -3
  23. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/utils/data_utils.py +7 -7
  24. evalscope-1.0.1/evalscope/app/utils/env_utils.py +12 -0
  25. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/utils/text_utils.py +14 -12
  26. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/arguments.py +2 -4
  27. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/opencompass/backend_manager.py +0 -2
  28. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/embedding.py +9 -1
  29. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bfcl/bfcl_adapter.py +2 -6
  30. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bfcl/generation.py +2 -2
  31. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
  32. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
  33. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/frames/frames_adapter.py +2 -1
  34. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
  35. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  36. evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  37. evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  38. evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  39. evalscope-1.0.1/evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
  40. evalscope-1.0.1/evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  41. evalscope-1.0.1/evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +129 -0
  42. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +5 -1
  43. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/tau_bench/generation.py +1 -1
  44. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +15 -19
  45. {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/text2image}/evalmuse_adapter.py +3 -1
  46. {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/text2image}/genai_bench_adapter.py +2 -2
  47. {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/text2image}/general_t2i_adapter.py +1 -1
  48. {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/text2image}/hpdv2_adapter.py +7 -2
  49. {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/text2image}/tifa_adapter.py +1 -0
  50. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
  51. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/start_app.py +7 -1
  52. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/start_perf.py +7 -1
  53. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/config.py +72 -13
  54. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/constants.py +8 -0
  55. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/evaluator/evaluator.py +6 -4
  56. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/llm_judge.py +19 -7
  57. {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model}/__init__.py +0 -0
  58. {evalscope-1.0.0/evalscope/third_party/thinkbench/tools → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models}/__init__.py +0 -0
  59. evalscope-1.0.1/evalscope/models/image_edit_model.py +125 -0
  60. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/model_apis.py +20 -0
  61. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/openai_compatible.py +3 -0
  62. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/text2image_model.py +2 -2
  63. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/utils/openai.py +7 -4
  64. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/benchmark.py +2 -0
  65. evalscope-1.0.1/evalscope/perf/utils/__init__.py +0 -0
  66. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/benchmark_util.py +8 -5
  67. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/local_server.py +3 -0
  68. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/report/__init__.py +0 -1
  69. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/report/generator.py +8 -87
  70. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/run.py +9 -5
  71. evalscope-1.0.1/evalscope/third_party/thinkbench/tools/__init__.py +0 -0
  72. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
  73. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/chat_service.py +1 -1
  74. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/import_utils.py +23 -1
  75. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/io_utils.py +42 -1
  76. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/model_utils.py +4 -3
  77. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/multi_choices.py +23 -6
  78. evalscope-1.0.1/evalscope/version.py +4 -0
  79. {evalscope-1.0.0 → evalscope-1.0.1/evalscope.egg-info}/PKG-INFO +9 -7
  80. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope.egg-info/SOURCES.txt +26 -12
  81. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope.egg-info/requires.txt +3 -8
  82. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/aigc.txt +1 -0
  83. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/framework.txt +0 -4
  84. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/rag.txt +1 -0
  85. {evalscope-1.0.0 → evalscope-1.0.1}/tests/benchmark/test_eval.py +30 -31
  86. evalscope-1.0.1/tests/benchmark/test_image_edit.py +65 -0
  87. evalscope-1.0.1/tests/benchmark/test_vlm.py +80 -0
  88. {evalscope-1.0.0 → evalscope-1.0.1}/tests/cli/test_all.py +83 -43
  89. {evalscope-1.0.0 → evalscope-1.0.1}/tests/cli/test_collection.py +8 -5
  90. evalscope-1.0.1/tests/cli/test_reasoning.py +81 -0
  91. evalscope-1.0.1/tests/common.py +73 -0
  92. {evalscope-1.0.0 → evalscope-1.0.1}/tests/perf/test_perf.py +4 -2
  93. evalscope-1.0.1/tests/rag/__init__.py +0 -0
  94. {evalscope-1.0.0 → evalscope-1.0.1}/tests/rag/test_clip_benchmark.py +0 -3
  95. evalscope-1.0.0/evalscope/api/benchmark/__init__.py +0 -3
  96. evalscope-1.0.0/evalscope/api/mixin/__init__.py +0 -2
  97. evalscope-1.0.0/evalscope/api/mixin/dataset_mixin.py +0 -105
  98. evalscope-1.0.0/evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
  99. evalscope-1.0.0/evalscope/version.py +0 -4
  100. evalscope-1.0.0/tests/vlm/__init__.py +0 -1
  101. {evalscope-1.0.0 → evalscope-1.0.1}/LICENSE +0 -0
  102. {evalscope-1.0.0 → evalscope-1.0.1}/MANIFEST.in +0 -0
  103. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/__init__.py +0 -0
  104. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/__init__.py +0 -0
  105. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/benchmark/adapters/multi_choice_adapter.py +0 -0
  106. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/dataset/__init__.py +0 -0
  107. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/dataset/utils.py +0 -0
  108. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/evaluator/__init__.py +0 -0
  109. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/evaluator/evaluator.py +0 -0
  110. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/filter/__init__.py +0 -0
  111. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/filter/filter.py +0 -0
  112. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/messages/content.py +0 -0
  113. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/messages/utils.py +0 -0
  114. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/metric/__init__.py +0 -0
  115. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/metric/metric.py +0 -0
  116. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/mixin/llm_judge_mixin.py +0 -0
  117. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/model/__init__.py +0 -0
  118. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/model/model_output.py +0 -0
  119. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/registry.py +0 -0
  120. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/tool/__init__.py +0 -0
  121. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/tool/tool_call.py +0 -0
  122. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/tool/tool_info.py +0 -0
  123. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/api/tool/utils.py +0 -0
  124. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/__init__.py +0 -0
  125. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/arguments.py +0 -0
  126. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/constants.py +0 -0
  127. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/ui/__init__.py +0 -0
  128. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/ui/app_ui.py +0 -0
  129. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/ui/multi_model.py +0 -0
  130. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/ui/sidebar.py +0 -0
  131. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/ui/visualization.py +0 -0
  132. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/utils/localization.py +0 -0
  133. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/app/utils/visualization.py +0 -0
  134. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/__init__.py +0 -0
  135. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/base.py +0 -0
  136. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/opencompass/__init__.py +0 -0
  137. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/opencompass/api_meta_template.py +0 -0
  138. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  139. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
  140. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
  141. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/__init__.py +0 -0
  142. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/backend_manager.py +0 -0
  143. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
  144. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
  145. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +0 -0
  146. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +0 -0
  147. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py +0 -0
  148. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
  149. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
  150. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
  151. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
  152. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
  153. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
  154. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
  155. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
  156. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
  157. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
  158. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
  159. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
  160. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
  161. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
  162. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
  163. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
  164. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
  165. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
  166. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
  167. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
  168. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/task_template.py +0 -0
  169. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
  170. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +0 -0
  171. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +0 -0
  172. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +0 -0
  173. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +0 -0
  174. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/__init__.py +0 -0
  175. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/clip.py +0 -0
  176. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/llm.py +0 -0
  177. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/rag_eval/utils/tools.py +0 -0
  178. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  179. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
  180. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/__init__.py +0 -0
  181. {evalscope-1.0.0/evalscope/benchmarks/aigc → evalscope-1.0.1/evalscope/benchmarks/aime}/__init__.py +0 -0
  182. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/aime/aime24_adapter.py +0 -0
  183. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/aime/aime25_adapter.py +0 -0
  184. {evalscope-1.0.0/evalscope/benchmarks/aigc/i2i → evalscope-1.0.1/evalscope/benchmarks/alpaca_eval}/__init__.py +0 -0
  185. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +0 -0
  186. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/arc/__init__.py +0 -0
  187. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
  188. {evalscope-1.0.0/evalscope/benchmarks/aigc/t2i → evalscope-1.0.1/evalscope/benchmarks/arena_hard}/__init__.py +0 -0
  189. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +0 -0
  190. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/arena_hard/utils.py +0 -0
  191. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/__init__.py +0 -0
  192. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
  193. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  194. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  195. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  196. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  197. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  198. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  199. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  200. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  201. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  202. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  203. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  204. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  205. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  206. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  207. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  208. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  209. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  210. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  211. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  212. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  213. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  214. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  215. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  216. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  217. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  218. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  219. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  220. {evalscope-1.0.0/evalscope/benchmarks/aime → evalscope-1.0.1/evalscope/benchmarks/bfcl}/__init__.py +0 -0
  221. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ceval/__init__.py +0 -0
  222. {evalscope-1.0.0/evalscope/benchmarks/alpaca_eval → evalscope-1.0.1/evalscope/benchmarks/chinese_simple_qa}/__init__.py +0 -0
  223. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +0 -0
  224. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  225. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
  226. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  227. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
  228. {evalscope-1.0.0/evalscope/benchmarks/arena_hard → evalscope-1.0.1/evalscope/benchmarks/data_collection}/__init__.py +0 -0
  229. {evalscope-1.0.0/evalscope/benchmarks/bfcl → evalscope-1.0.1/evalscope/benchmarks/docmath}/__init__.py +0 -0
  230. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/docmath/docmath_adapter.py +0 -0
  231. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/docmath/utils.py +0 -0
  232. {evalscope-1.0.0/evalscope/benchmarks/chinese_simple_qa → evalscope-1.0.1/evalscope/benchmarks/drop}/__init__.py +0 -0
  233. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/drop/drop_adapter.py +0 -0
  234. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/drop/utils.py +0 -0
  235. {evalscope-1.0.0/evalscope/benchmarks/data_collection → evalscope-1.0.1/evalscope/benchmarks/frames}/__init__.py +0 -0
  236. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/frames/utils.py +0 -0
  237. {evalscope-1.0.0/evalscope/benchmarks/docmath → evalscope-1.0.1/evalscope/benchmarks/general_arena}/__init__.py +0 -0
  238. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/general_arena/utils.py +0 -0
  239. {evalscope-1.0.0/evalscope/benchmarks/drop → evalscope-1.0.1/evalscope/benchmarks/general_mcq}/__init__.py +0 -0
  240. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +0 -0
  241. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  242. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
  243. {evalscope-1.0.0/evalscope/benchmarks/frames → evalscope-1.0.1/evalscope/benchmarks/gpqa}/__init__.py +0 -0
  244. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/gpqa/gpqa_adapter.py +0 -0
  245. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/gpqa/prompt.py +0 -0
  246. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  247. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
  248. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  249. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
  250. {evalscope-1.0.0/evalscope/benchmarks/general_arena → evalscope-1.0.1/evalscope/benchmarks/hle}/__init__.py +0 -0
  251. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/hle/hle_adapter.py +0 -0
  252. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  253. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -0
  254. {evalscope-1.0.0/evalscope/benchmarks/general_mcq → evalscope-1.0.1/evalscope/benchmarks/ifeval}/__init__.py +0 -0
  255. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/ifeval_adapter.py +0 -0
  256. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions.py +0 -0
  257. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/instructions_registry.py +0 -0
  258. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/ifeval/utils.py +0 -0
  259. {evalscope-1.0.0/evalscope/benchmarks/gpqa → evalscope-1.0.1/evalscope/benchmarks/image_edit}/__init__.py +0 -0
  260. {evalscope-1.0.0/evalscope/benchmarks/hle → evalscope-1.0.1/evalscope/benchmarks/image_edit/gedit}/__init__.py +0 -0
  261. {evalscope-1.0.0/evalscope/benchmarks/ifeval → evalscope-1.0.1/evalscope/benchmarks/iquiz}/__init__.py +0 -0
  262. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/iquiz/iquiz_adapter.py +0 -0
  263. {evalscope-1.0.0/evalscope/benchmarks/iquiz → evalscope-1.0.1/evalscope/benchmarks/live_code_bench}/__init__.py +0 -0
  264. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/evaluate_utils.py +0 -0
  265. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/extract_utils.py +0 -0
  266. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +0 -0
  267. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/load_utils.py +0 -0
  268. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/pass_k_utils.py +0 -0
  269. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/prompts.py +0 -0
  270. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/live_code_bench/testing_util.py +0 -0
  271. {evalscope-1.0.0/evalscope/benchmarks/live_code_bench → evalscope-1.0.1/evalscope/benchmarks/maritime_bench}/__init__.py +0 -0
  272. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +0 -0
  273. {evalscope-1.0.0/evalscope/benchmarks/maritime_bench → evalscope-1.0.1/evalscope/benchmarks/math_500}/__init__.py +0 -0
  274. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/math_500/math_500_adapter.py +0 -0
  275. {evalscope-1.0.0/evalscope/benchmarks/math_500 → evalscope-1.0.1/evalscope/benchmarks/math_vista}/__init__.py +0 -0
  276. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  277. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
  278. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/mmlu_pro/__init__.py +0 -0
  279. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +0 -0
  280. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
  281. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +0 -0
  282. {evalscope-1.0.0/evalscope/benchmarks/musr → evalscope-1.0.1/evalscope/benchmarks/mmmu}/__init__.py +0 -0
  283. {evalscope-1.0.0/evalscope/benchmarks/needle_haystack → evalscope-1.0.1/evalscope/benchmarks/mmmu_pro}/__init__.py +0 -0
  284. {evalscope-1.0.0/evalscope/benchmarks/process_bench → evalscope-1.0.1/evalscope/benchmarks/musr}/__init__.py +0 -0
  285. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/musr/musr_adapter.py +0 -0
  286. {evalscope-1.0.0/evalscope/benchmarks/simple_qa → evalscope-1.0.1/evalscope/benchmarks/needle_haystack}/__init__.py +0 -0
  287. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/needle_haystack/utils.py +0 -0
  288. {evalscope-1.0.0/evalscope/benchmarks/super_gpqa → evalscope-1.0.1/evalscope/benchmarks/process_bench}/__init__.py +0 -0
  289. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/process_bench/process_bench_adapter.py +0 -0
  290. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/race/__init__.py +0 -0
  291. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/race/race_adapter.py +0 -0
  292. {evalscope-1.0.0/evalscope/benchmarks/tau_bench → evalscope-1.0.1/evalscope/benchmarks/simple_qa}/__init__.py +0 -0
  293. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/simple_qa/simple_qa_adapter.py +0 -0
  294. {evalscope-1.0.0/evalscope/benchmarks/tool_bench → evalscope-1.0.1/evalscope/benchmarks/super_gpqa}/__init__.py +0 -0
  295. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/super_gpqa/prompt.py +0 -0
  296. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +0 -0
  297. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/super_gpqa/utils.py +0 -0
  298. {evalscope-1.0.0/evalscope/benchmarks/winogrande → evalscope-1.0.1/evalscope/benchmarks/tau_bench}/__init__.py +0 -0
  299. {evalscope-1.0.0/evalscope/metrics/t2v_metrics → evalscope-1.0.1/evalscope/benchmarks/text2image}/__init__.py +0 -0
  300. {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models → evalscope-1.0.1/evalscope/benchmarks/tool_bench}/__init__.py +0 -0
  301. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +0 -0
  302. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/tool_bench/utils.py +0 -0
  303. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  304. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
  305. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
  306. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  307. {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model → evalscope-1.0.1/evalscope/benchmarks/winogrande}/__init__.py +0 -0
  308. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/benchmarks/winogrande/winogrande_adapter.py +0 -0
  309. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/__init__.py +0 -0
  310. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/base.py +0 -0
  311. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/cli.py +0 -0
  312. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/start_eval.py +0 -0
  313. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/cli/start_server.py +0 -0
  314. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/collections/__init__.py +0 -0
  315. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/collections/sampler.py +0 -0
  316. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/collections/schema.py +0 -0
  317. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/evaluator/__init__.py +0 -0
  318. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/filters/__init__.py +0 -0
  319. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/filters/extraction.py +0 -0
  320. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/filters/selection.py +0 -0
  321. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/__init__.py +0 -0
  322. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  323. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
  324. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/math_parser.py +0 -0
  325. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/metric.py +0 -0
  326. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/metrics.py +0 -0
  327. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/rouge_metric.py +0 -0
  328. {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward → evalscope-1.0.1/evalscope/metrics/t2v_metrics}/__init__.py +0 -0
  329. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/clipscore.py +0 -0
  330. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/constants.py +0 -0
  331. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/itmscore.py +0 -0
  332. {evalscope-1.0.0/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5 → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models}/__init__.py +0 -0
  333. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +0 -0
  334. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +0 -0
  335. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +0 -0
  336. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +0 -0
  337. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +0 -0
  338. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +0 -0
  339. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +0 -0
  340. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +0 -0
  341. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +0 -0
  342. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +0 -0
  343. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +0 -0
  344. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +0 -0
  345. {evalscope-1.0.0/evalscope/perf → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward}/__init__.py +0 -0
  346. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +0 -0
  347. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +0 -0
  348. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/model.py +0 -0
  349. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/utils.py +0 -0
  350. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +0 -0
  351. {evalscope-1.0.0/evalscope/perf/utils → evalscope-1.0.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5}/__init__.py +0 -0
  352. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +0 -0
  353. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +0 -0
  354. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +0 -0
  355. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +0 -0
  356. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +0 -0
  357. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +0 -0
  358. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +0 -0
  359. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +0 -0
  360. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  361. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  362. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  363. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  364. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  365. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  366. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  367. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  368. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  369. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  370. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +0 -0
  371. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +0 -0
  372. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +0 -0
  373. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +0 -0
  374. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +0 -0
  375. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +0 -0
  376. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +0 -0
  377. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +0 -0
  378. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +0 -0
  379. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +0 -0
  380. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +0 -0
  381. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +0 -0
  382. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +0 -0
  383. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +0 -0
  384. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +0 -0
  385. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +0 -0
  386. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +0 -0
  387. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +0 -0
  388. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +0 -0
  389. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +0 -0
  390. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +0 -0
  391. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +0 -0
  392. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +0 -0
  393. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +0 -0
  394. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +0 -0
  395. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +0 -0
  396. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +0 -0
  397. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +0 -0
  398. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +0 -0
  399. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +0 -0
  400. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +0 -0
  401. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +0 -0
  402. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +0 -0
  403. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +0 -0
  404. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +0 -0
  405. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +0 -0
  406. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +0 -0
  407. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +0 -0
  408. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +0 -0
  409. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +0 -0
  410. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +0 -0
  411. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +0 -0
  412. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +0 -0
  413. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +0 -0
  414. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +0 -0
  415. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +0 -0
  416. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +0 -0
  417. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +0 -0
  418. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +0 -0
  419. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +0 -0
  420. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +0 -0
  421. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +0 -0
  422. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +0 -0
  423. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +0 -0
  424. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +0 -0
  425. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/score.py +0 -0
  426. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/metrics/t2v_metrics/vqascore.py +0 -0
  427. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/__init__.py +0 -0
  428. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/mockllm.py +0 -0
  429. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/models/modelscope.py +0 -0
  430. {evalscope-1.0.0/tests/rag → evalscope-1.0.1/evalscope/perf}/__init__.py +0 -0
  431. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/arguments.py +0 -0
  432. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/http_client.py +0 -0
  433. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/main.py +0 -0
  434. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/__init__.py +0 -0
  435. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/api/__init__.py +0 -0
  436. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/api/base.py +0 -0
  437. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/api/custom_api.py +0 -0
  438. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
  439. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/api/default_api.py +0 -0
  440. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/api/openai_api.py +0 -0
  441. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/__init__.py +0 -0
  442. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/base.py +0 -0
  443. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/custom.py +0 -0
  444. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/flickr8k.py +0 -0
  445. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/kontext_bench.py +0 -0
  446. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/line_by_line.py +0 -0
  447. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/longalpaca.py +0 -0
  448. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/openqa.py +0 -0
  449. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/random_dataset.py +0 -0
  450. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/random_vl_dataset.py +0 -0
  451. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
  452. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/plugin/registry.py +0 -0
  453. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/analysis_result.py +0 -0
  454. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/db_util.py +0 -0
  455. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/handler.py +0 -0
  456. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/log_utils.py +0 -0
  457. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/perf/utils/rich_display.py +0 -0
  458. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/report/combinator.py +0 -0
  459. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/report/report.py +0 -0
  460. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/summarizer.py +0 -0
  461. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/__init__.py +0 -0
  462. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/README.md +0 -0
  463. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/__init__.py +0 -0
  464. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/default_task.json +0 -0
  465. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
  466. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/eval.py +0 -0
  467. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/infer.py +0 -0
  468. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
  469. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
  470. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
  471. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
  472. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
  473. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
  474. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
  475. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
  476. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/tools/openai_api.py +0 -0
  477. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/longbench_write/utils.py +0 -0
  478. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/__init__.py +0 -0
  479. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/eval.py +0 -0
  480. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/infer.py +0 -0
  481. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/resources/critique_template.txt +0 -0
  482. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/resources/reformat_template.txt +0 -0
  483. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/tools/llm.py +0 -0
  484. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/thinkbench/tools/utils.py +0 -0
  485. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/README.md +0 -0
  486. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  487. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/config_default.json +0 -0
  488. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
  489. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/eval.py +0 -0
  490. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/infer.py +0 -0
  491. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
  492. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
  493. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  494. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/__init__.py +0 -0
  495. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/argument_utils.py +0 -0
  496. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/deprecation_utils.py +0 -0
  497. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/function_utils.py +0 -0
  498. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/json_schema.py +0 -0
  499. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/logger.py +0 -0
  500. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope/utils/url_utils.py +0 -0
  501. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope.egg-info/dependency_links.txt +0 -0
  502. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope.egg-info/entry_points.txt +0 -0
  503. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope.egg-info/not-zip-safe +0 -0
  504. {evalscope-1.0.0 → evalscope-1.0.1}/evalscope.egg-info/top_level.txt +0 -0
  505. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/app.txt +0 -0
  506. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/dev.txt +0 -0
  507. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/docs.txt +0 -0
  508. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/opencompass.txt +0 -0
  509. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/perf.txt +0 -0
  510. {evalscope-1.0.0 → evalscope-1.0.1}/requirements/vlmeval.txt +0 -0
  511. {evalscope-1.0.0 → evalscope-1.0.1}/requirements.txt +0 -0
  512. {evalscope-1.0.0 → evalscope-1.0.1}/setup.cfg +0 -0
  513. {evalscope-1.0.0 → evalscope-1.0.1}/setup.py +0 -0
  514. {evalscope-1.0.0 → evalscope-1.0.1}/tests/__init__.py +0 -0
  515. {evalscope-1.0.0/tests/aigc → evalscope-1.0.1/tests/benchmark}/__init__.py +0 -0
  516. {evalscope-1.0.0/tests/aigc → evalscope-1.0.1/tests/benchmark}/test_t2i.py +0 -0
  517. {evalscope-1.0.0/tests/benchmark → evalscope-1.0.1/tests/cli}/__init__.py +0 -0
  518. {evalscope-1.0.0 → evalscope-1.0.1}/tests/cli/test_custom.py +0 -0
  519. {evalscope-1.0.0/tests/cli → evalscope-1.0.1/tests/perf}/__init__.py +0 -0
  520. {evalscope-1.0.0 → evalscope-1.0.1}/tests/rag/test_mteb.py +0 -0
  521. {evalscope-1.0.0 → evalscope-1.0.1}/tests/rag/test_ragas.py +0 -0
  522. {evalscope-1.0.0/tests/perf → evalscope-1.0.1/tests/swift}/__init__.py +0 -0
  523. {evalscope-1.0.0 → evalscope-1.0.1}/tests/swift/test_run_swift_eval.py +0 -0
  524. {evalscope-1.0.0 → evalscope-1.0.1}/tests/swift/test_run_swift_vlm_eval.py +0 -0
  525. {evalscope-1.0.0 → evalscope-1.0.1}/tests/swift/test_run_swift_vlm_jugde_eval.py +0 -0
  526. {evalscope-1.0.0 → evalscope-1.0.1}/tests/test_run_all.py +0 -0
  527. {evalscope-1.0.0 → evalscope-1.0.1}/tests/utils.py +0 -0
  528. {evalscope-1.0.0/tests/swift → evalscope-1.0.1/tests/vlm}/__init__.py +0 -0
  529. {evalscope-1.0.0 → evalscope-1.0.1}/tests/vlm/test_vlmeval.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -146,7 +146,9 @@ Please scan the QR code below to join our community groups:
146
146
  >
147
147
  > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
148
148
 
149
- - 🔥 **[2025.08.22]** Version 1.0 Refactoring.
149
+ - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
150
+ - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
151
+ - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
150
152
  - 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
151
153
  - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
152
154
  - 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
@@ -154,7 +156,7 @@ Please scan the QR code below to join our community groups:
154
156
  - 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
155
157
  - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
156
158
  - 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
157
- - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
159
+ - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
158
160
  - 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
159
161
  - 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
160
162
  <details><summary>More</summary>
@@ -163,7 +165,7 @@ Please scan the QR code below to join our community groups:
163
165
  - 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
164
166
  - 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
165
167
  - 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
166
- - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
168
+ - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
167
169
  - 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
168
170
  - 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
169
171
  - 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
@@ -362,7 +364,7 @@ run_task(task_cfg="config.json")
362
364
 
363
365
  ### Basic Parameter
364
366
  - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
365
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
367
+ - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
366
368
  - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
367
369
 
368
370
  ### Output Results
@@ -451,7 +453,7 @@ For more customized evaluations, such as customizing model parameters or dataset
451
453
  evalscope eval \
452
454
  --model Qwen/Qwen3-0.6B \
453
455
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
454
- --generation-config '{"do_sample":true,"temperature":0.6,"max_new_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
456
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
455
457
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
456
458
  --datasets gsm8k \
457
459
  --limit 10
@@ -465,7 +467,7 @@ evalscope eval \
465
467
  - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
466
468
  - `do_sample`: Whether to use sampling
467
469
  - `temperature`: Generation temperature
468
- - `max_new_tokens`: Maximum length of generated tokens
470
+ - `max_tokens`: Maximum length of generated tokens
469
471
  - `chat_template_kwargs`: Model inference template parameters
470
472
  - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
471
473
  - `few_shot_num`: Number of few-shot examples
@@ -117,7 +117,9 @@ Please scan the QR code below to join our community groups:
117
117
  >
118
118
  > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
119
119
 
120
- - 🔥 **[2025.08.22]** Version 1.0 Refactoring.
120
+ - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
121
+ - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
122
+ - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
121
123
  - 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
122
124
  - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
123
125
  - 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
@@ -125,7 +127,7 @@ Please scan the QR code below to join our community groups:
125
127
  - 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
126
128
  - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
127
129
  - 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
128
- - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
130
+ - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
129
131
  - 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
130
132
  - 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
131
133
  <details><summary>More</summary>
@@ -134,7 +136,7 @@ Please scan the QR code below to join our community groups:
134
136
  - 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
135
137
  - 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
136
138
  - 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
137
- - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
139
+ - 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
138
140
  - 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
139
141
  - 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
140
142
  - 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
@@ -333,7 +335,7 @@ run_task(task_cfg="config.json")
333
335
 
334
336
  ### Basic Parameter
335
337
  - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
336
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
338
+ - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
337
339
  - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
338
340
 
339
341
  ### Output Results
@@ -422,7 +424,7 @@ For more customized evaluations, such as customizing model parameters or dataset
422
424
  evalscope eval \
423
425
  --model Qwen/Qwen3-0.6B \
424
426
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
425
- --generation-config '{"do_sample":true,"temperature":0.6,"max_new_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
427
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
426
428
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
427
429
  --datasets gsm8k \
428
430
  --limit 10
@@ -436,7 +438,7 @@ evalscope eval \
436
438
  - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
437
439
  - `do_sample`: Whether to use sampling
438
440
  - `temperature`: Generation temperature
439
- - `max_new_tokens`: Maximum length of generated tokens
441
+ - `max_tokens`: Maximum length of generated tokens
440
442
  - `chat_template_kwargs`: Model inference template parameters
441
443
  - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
442
444
  - `few_shot_num`: Number of few-shot examples
@@ -0,0 +1,3 @@
1
+ from .adapters import DefaultDataAdapter, ImageEditAdapter, MultiChoiceAdapter, Text2ImageAdapter, VisionLanguageAdapter
2
+ from .benchmark import DataAdapter
3
+ from .meta import BenchmarkMeta
@@ -1,3 +1,5 @@
1
1
  from .default_data_adapter import DefaultDataAdapter
2
+ from .image_edit_adapter import ImageEditAdapter
2
3
  from .multi_choice_adapter import MultiChoiceAdapter
3
4
  from .text2image_adapter import Text2ImageAdapter
5
+ from .vision_language_adapter import VisionLanguageAdapter
@@ -241,6 +241,7 @@ class DefaultDataAdapter(DataAdapter):
241
241
  filter_func=self.sample_filter,
242
242
  limit=self.limit if not self.reformat_subset else None, # Limit number of samples if specified
243
243
  repeats=self.repeats, # Number of repetitions for each sample
244
+ shuffle=self.shuffle, # Shuffle dataset if enabled
244
245
  shuffle_choices=self.shuffle_choices, # Shuffle choices if requested
245
246
  data_source=self.dataset_hub, # Data source configuration
246
247
  )
@@ -0,0 +1,82 @@
1
+ import os
2
+ from typing import Optional
3
+
4
+ from evalscope.constants import EvalType, FileConstants
5
+ from evalscope.utils import get_logger
6
+ from evalscope.utils.function_utils import thread_safe
7
+ from evalscope.utils.io_utils import jsonl_to_list
8
+ from .text2image_adapter import Text2ImageAdapter
9
+
10
+ logger = get_logger()
11
+
12
+
13
+ class ImageEditAdapter(Text2ImageAdapter):
14
+ """
15
+ Support two methods:
16
+ 1. Inference using modelscope pipeline
17
+ 2. Load local inference jsonl file with key to corresponding prompt
18
+ """
19
+
20
+ def __init__(self, **kwargs):
21
+ super().__init__(**kwargs)
22
+
23
+ self.local_file = self.extra_params.get('local_file', None)
24
+ self.id_key = self.extra_params.get('id_key', FileConstants.ID)
25
+ self.image_key = self.extra_params.get('image_key', FileConstants.IMAGE_PATH)
26
+ self.local_data = self.load_local_file()
27
+
28
+ def load_local_file(self) -> Optional[dict]:
29
+ if not self.local_file:
30
+ return None
31
+
32
+ # Load file and check
33
+ data_list = jsonl_to_list(self.local_file)
34
+ data_dict = {}
35
+ for record in data_list:
36
+ if self.image_key not in record:
37
+ raise ValueError(f"Image key '{self.image_key}' not found in record: {record}, file {self.local_file}")
38
+ if self.id_key not in record:
39
+ raise ValueError(f"ID key '{self.id_key}' not found in record: {record}, file {self.local_file}")
40
+
41
+ image_path = record[self.image_key]
42
+ if not os.path.isabs(image_path):
43
+ image_path = os.path.join(os.path.dirname(self.local_file), image_path)
44
+ if not os.path.exists(image_path):
45
+ raise FileNotFoundError(f"Image file '{image_path}' not found.")
46
+
47
+ data_dict[record[self.id_key]] = record
48
+ return data_dict
49
+
50
+ def get_image_path_from_id(self, image_id) -> Optional[str]:
51
+ if not self.local_file:
52
+ return None
53
+
54
+ record = self.local_data.get(image_id)
55
+ if not record:
56
+ return None
57
+
58
+ return record[self.image_key]
59
+
60
+ def _post_process_samples(self):
61
+ super()._post_process_samples()
62
+
63
+ # Add local image path if exists
64
+ for subset in self.test_dataset.keys():
65
+ for sample in self.test_dataset[subset]:
66
+ local_image_path = self.get_image_path_from_id(sample.metadata.get(FileConstants.ID))
67
+ if local_image_path:
68
+ sample.metadata[FileConstants.IMAGE_PATH] = local_image_path
69
+
70
+ def sample_filter(self, sample) -> bool:
71
+ """
72
+ Filter samples based on metadata availability.
73
+ If local file is not available, all samples are considered valid.
74
+ Otherwise, only samples with valid metadata and image path are kept.
75
+ """
76
+ if not self.local_data:
77
+ return True
78
+ else:
79
+ sample_id = sample.metadata.get(FileConstants.ID)
80
+ if (not sample_id) or (not self.get_image_path_from_id(sample_id)):
81
+ return False
82
+ return True
@@ -8,7 +8,7 @@ from evalscope.api.messages.content import ContentImage
8
8
  from evalscope.api.metric import Score
9
9
  from evalscope.api.model import ChatCompletionChoice, Model, ModelOutput
10
10
  from evalscope.api.registry import get_metric
11
- from evalscope.constants import EvalType
11
+ from evalscope.constants import EvalType, FileConstants
12
12
  from evalscope.utils import get_logger
13
13
  from evalscope.utils.function_utils import thread_safe
14
14
  from .default_data_adapter import DefaultDataAdapter
@@ -27,11 +27,12 @@ class Text2ImageAdapter(DefaultDataAdapter):
27
27
  return Sample(
28
28
  input=[ChatMessageUser(content=record['prompt'])],
29
29
  metadata={
30
- 'id': record['id'],
31
30
  'prompt': record['prompt'],
32
31
  'category': record.get('category', ''),
33
32
  'tags': record.get('tags', []),
34
- 'image_path': record.get('image_path', ''), # Optional field for existing image path
33
+ FileConstants.ID: record[FileConstants.ID],
34
+ FileConstants.IMAGE_PATH: record.get(FileConstants.IMAGE_PATH,
35
+ ''), # Optional field for existing image path
35
36
  }
36
37
  )
37
38
 
@@ -83,7 +84,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
83
84
  completed=True,
84
85
  )
85
86
  else:
86
- image_id = f"{sample.metadata.get('id',sample.id)}_{sample.group_id}"
87
+ image_id = f'{sample.metadata.get(FileConstants.ID, sample.id)}_{sample.group_id}'
87
88
  output_path = os.path.join(output_dir, 'images', f'{image_id}.png')
88
89
  if not os.path.exists(os.path.dirname(output_path)):
89
90
  os.makedirs(os.path.dirname(output_path))
@@ -96,7 +97,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
96
97
  with open(output_path, 'wb') as f:
97
98
  f.write(base64.b64decode(image_base64))
98
99
 
99
- sample.metadata['image_path'] = output_path
100
+ sample.metadata[FileConstants.IMAGE_PATH] = output_path
100
101
  return TaskState(
101
102
  model=model.name,
102
103
  sample=sample,
@@ -111,7 +112,7 @@ class Text2ImageAdapter(DefaultDataAdapter):
111
112
  self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
112
113
  ) -> Score:
113
114
  # Get prediction and prompt from task state
114
- image_path = task_state.metadata.get('image_path', original_prediction)
115
+ image_path = task_state.metadata.get(FileConstants.IMAGE_PATH, original_prediction)
115
116
  prompt = task_state.input[0].content
116
117
  meta = task_state.metadata
117
118
 
@@ -0,0 +1,6 @@
1
+ from .default_data_adapter import DefaultDataAdapter
2
+
3
+
4
+ class VisionLanguageAdapter(DefaultDataAdapter):
5
+ """Adapter for vision-language benchmarks. e.g., image captioning, visual question answering, etc."""
6
+ pass
@@ -170,6 +170,13 @@ class DataAdapter(LLMJudgeMixin, ABC):
170
170
  """
171
171
  return self._benchmark_meta.default_subset
172
172
 
173
+ @default_subset.setter
174
+ def default_subset(self, value: str):
175
+ """
176
+ Set the default subset of the benchmark.
177
+ """
178
+ self._benchmark_meta.default_subset = value
179
+
173
180
  @property
174
181
  def few_shot_num(self) -> int:
175
182
  """
@@ -299,6 +306,34 @@ class DataAdapter(LLMJudgeMixin, ABC):
299
306
  """
300
307
  return self._task_config.seed
301
308
 
309
+ @property
310
+ def shuffle(self) -> bool:
311
+ """
312
+ Return whether to shuffle the dataset before evaluation.
313
+ """
314
+ return self._benchmark_meta.shuffle
315
+
316
+ @shuffle.setter
317
+ def shuffle(self, value: bool):
318
+ """
319
+ Set whether to shuffle the dataset before evaluation.
320
+ """
321
+ self._benchmark_meta.shuffle = value
322
+
323
+ @property
324
+ def shuffle_choices(self) -> bool:
325
+ """
326
+ Return whether to shuffle the choices in multiple-choice datasets.
327
+ """
328
+ return self._benchmark_meta.shuffle_choices
329
+
330
+ @shuffle_choices.setter
331
+ def shuffle_choices(self, value: bool):
332
+ """
333
+ Set whether to shuffle the choices in multiple-choice datasets.
334
+ """
335
+ self._benchmark_meta.shuffle_choices = value
336
+
302
337
  @contextlib.contextmanager
303
338
  def _temporary_attribute(self, attr_name: str, new_value):
304
339
  """
@@ -73,6 +73,12 @@ class BenchmarkMeta:
73
73
  aggregation: str = 'mean'
74
74
  """ Aggregation function for the metrics. Default is 'mean'. Can be 'mean', 'pass@<k>' or a custom function name."""
75
75
 
76
+ shuffle: bool = False
77
+ """Whether to shuffle the dataset before evaluation."""
78
+
79
+ shuffle_choices: bool = False
80
+ """Whether to shuffle the choices in multiple-choice datasets."""
81
+
76
82
  extra_params: Dict = field(default_factory=dict)
77
83
  """ Additional parameters for the benchmark."""
78
84
 
@@ -5,9 +5,8 @@ from dataclasses import dataclass, field
5
5
  from pydantic import BaseModel, Field
6
6
  from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
7
7
 
8
- from evalscope.api.messages import ChatMessage, messages_pretty_str
8
+ from evalscope.api.messages import ChatMessage, messages_to_markdown
9
9
  from evalscope.api.tool import ToolInfo
10
- from evalscope.utils.multi_choices import answer_character, answer_index
11
10
 
12
11
 
13
12
  class Sample(BaseModel):
@@ -31,9 +30,6 @@ class Sample(BaseModel):
31
30
  tools: Optional[List[ToolInfo]] = None
32
31
  """List of tools available to the model during inference (optional)."""
33
32
 
34
- category: Optional[str] = None
35
- """Category of the sample (optional)."""
36
-
37
33
  subset_key: Optional[str] = None
38
34
  """Key for the subset this sample belongs to, used for generating subsets (optional)."""
39
35
 
@@ -54,7 +50,7 @@ class Sample(BaseModel):
54
50
  if isinstance(self.input, str):
55
51
  input_text = self.input
56
52
  else:
57
- input_text = messages_pretty_str(self.input)
53
+ input_text = messages_to_markdown(self.input, max_length=50)
58
54
  return f'Sample ID: {self.id}\nInput: {input_text}\nTarget: {self.target}'
59
55
 
60
56
 
@@ -230,6 +226,8 @@ class MemoryDataset(Dataset):
230
226
  self._shuffled = True
231
227
 
232
228
  def shuffle_choices(self, seed: Optional[int] = None) -> None:
229
+ from evalscope.utils.multi_choices import answer_character
230
+
233
231
  rand = random.Random(seed)
234
232
  for sample in self.samples:
235
233
  if not sample.choices:
@@ -249,6 +247,8 @@ class MemoryDataset(Dataset):
249
247
  sample.target = self._remap_target(sample.target, position_map=position_map)
250
248
 
251
249
  def _remap_target(self, target: Union[str, List[str]], position_map: Dict[int, str]) -> Union[str, List[str]]:
250
+ from evalscope.utils.multi_choices import answer_index
251
+
252
252
  if isinstance(target, list):
253
253
  return [position_map[answer_index(t)] for t in target]
254
254
  else:
@@ -126,7 +126,8 @@ class RemoteDataLoader(DataLoader):
126
126
  self.limit = int(len(dataset) * self.limit)
127
127
  elif isinstance(self.limit, int) and self.limit < 0:
128
128
  raise ValueError('Limit must be a non-negative integer or a float between 0 and 1.')
129
- dataset = dataset.select(range(self.limit))
129
+ if len(dataset) > self.limit:
130
+ dataset = dataset.select(range(self.limit))
130
131
 
131
132
  # convert to list
132
133
  dataset = dataset.to_list()
@@ -299,6 +299,15 @@ class ModelResult(BaseModel):
299
299
  completed=True, # Mark as completed since it was cached
300
300
  )
301
301
 
302
+ def pretty_print(self) -> str:
303
+ """
304
+ Generate a pretty-printed string representation of the model result.
305
+
306
+ Returns:
307
+ A string representation of the model result
308
+ """
309
+ return self.model_dump_json(indent=2)
310
+
302
311
 
303
312
  class ReviewResult(BaseModel):
304
313
  """
@@ -340,7 +349,7 @@ class ReviewResult(BaseModel):
340
349
 
341
350
  return cls(
342
351
  index=state.sample_id,
343
- input=state.input_text,
352
+ input=state.input_markdown,
344
353
  target=state.target,
345
354
  sample_score=sample_score,
346
355
  )
@@ -353,3 +362,17 @@ class ReviewResult(BaseModel):
353
362
  The sample score object
354
363
  """
355
364
  return self.sample_score
365
+
366
+ def pretty_print(self) -> str:
367
+ """
368
+ Generate a pretty-printed string representation of the review result.
369
+
370
+ Returns:
371
+ A string representation of the review result
372
+ """
373
+ output = [
374
+ f'Review Result for Sample {self.index}:',
375
+ f'Target: {self.target}',
376
+ f'Score: {self.sample_score.model_dump_json(indent=2)}',
377
+ ]
378
+ return '\n'.join(output)
@@ -3,7 +3,7 @@ from random import Random
3
3
  from typing import Any, Dict, List, Optional, Sequence, Union, overload
4
4
 
5
5
  from evalscope.api.dataset import Sample
6
- from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str
6
+ from evalscope.api.messages import ChatMessage, ChatMessageUser, messages_pretty_str, messages_to_markdown
7
7
  from evalscope.api.model import ModelOutput
8
8
 
9
9
 
@@ -188,6 +188,17 @@ class TaskState:
188
188
  else:
189
189
  return messages_pretty_str(self._input)
190
190
 
191
+ @property
192
+ def input_markdown(self) -> str:
193
+ """Get the input text as markdown.
194
+
195
+ For multi-modal content, images will be represented in markdown format.
196
+ """
197
+ if isinstance(self._input, str):
198
+ return self._input
199
+ else:
200
+ return messages_to_markdown(self._input)
201
+
191
202
  @property
192
203
  def choices(self) -> Choices:
193
204
  """Choices for the sample, if applicable."""
@@ -6,6 +6,7 @@ from .chat_message import (
6
6
  ChatMessageUser,
7
7
  dict_to_chat_message,
8
8
  messages_pretty_str,
9
+ messages_to_markdown,
9
10
  )
10
11
  from .content import Content, ContentAudio, ContentData, ContentImage, ContentReasoning, ContentText, ContentVideo
11
12
  from .utils import parse_content_with_reasoning
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field, JsonValue, model_validator
3
3
  from typing import Any, Dict, List, Literal, Optional, Type, Union
4
4
 
5
5
  from evalscope.api.tool import ToolCall, ToolCallError
6
- from .content import Content, ContentReasoning, ContentText
6
+ from .content import Content, ContentImage, ContentReasoning, ContentText
7
7
  from .utils import parse_content_with_reasoning
8
8
 
9
9
 
@@ -184,7 +184,7 @@ def dict_to_chat_message(data: Dict[str, Any]) -> ChatMessage:
184
184
 
185
185
 
186
186
  def messages_pretty_str(messages: List[ChatMessage]) -> str:
187
- """Pretty print a list of chat messages."""
187
+ """Pretty print a list of chat messages. Without images or other multi-modal contents."""
188
188
  output = []
189
189
  for message in messages:
190
190
  role = message.role.capitalize()
@@ -196,3 +196,48 @@ def messages_pretty_str(messages: List[ChatMessage]) -> str:
196
196
  content += f'\nFunction: {message.function}'
197
197
  output.append(f'**{role}**: {content}')
198
198
  return '\n\n'.join(output)
199
+
200
+
201
+ def messages_to_markdown(messages: List[ChatMessage], max_length: Optional[int] = None) -> str:
202
+ """Convert a list of chat messages to markdown format.
203
+
204
+ Args:
205
+ messages (List[ChatMessage]): The list of chat messages to convert.
206
+ max_length (Optional[int]): If provided, truncates the base64 string of images to this length.
207
+ """
208
+ output = []
209
+ for message in messages:
210
+ role = message.role.capitalize()
211
+
212
+ # Start with role header
213
+ content_parts = [f'**{role}**: ']
214
+
215
+ # Handle content based on type
216
+ if isinstance(message.content, str):
217
+ content_parts.append(message.content)
218
+ else:
219
+ for content_item in message.content:
220
+ if isinstance(content_item, ContentText):
221
+ content_parts.append(content_item.text)
222
+ elif isinstance(content_item, ContentImage):
223
+ # Use markdown image syntax
224
+ image_base64 = content_item.image
225
+ if max_length and len(image_base64) > max_length:
226
+ image_base64 = image_base64[:max_length]
227
+ content_parts.append(f'![image]({image_base64})')
228
+ elif isinstance(content_item, ContentReasoning):
229
+ content_parts.append(f'**Reasoning:** {content_item.reasoning}')
230
+
231
+ # Add tool-specific information
232
+ if isinstance(message, ChatMessageTool):
233
+ if message.error:
234
+ content_parts.append(f'**Error:** {message.error.message}')
235
+ if message.function:
236
+ content_parts.append(f'**Function:** {message.function}')
237
+ elif isinstance(message, ChatMessageAssistant) and message.tool_calls:
238
+ for tool_call in message.tool_calls:
239
+ content_parts.append(f'**Tool Call:** {tool_call.function}')
240
+
241
+ output.append('\n'.join(content_parts))
242
+
243
+ return '\n\n'.join(output)
@@ -35,20 +35,28 @@ class Score(BaseModel):
35
35
  """Main score value."""
36
36
  if self.main_score_name and self.main_score_name in self.value:
37
37
  return self.value[self.main_score_name]
38
- return next(iter(self.value.values()), None)
38
+ elif self.value:
39
+ # If main_score_name is not set or not found, use the first value and update main_score_name
40
+ first_key = next(iter(self.value))
41
+ self.main_score_name = first_key
42
+ return self.value[first_key]
43
+ return None
39
44
 
40
45
  @main_value.setter
41
46
  def main_value(self, value: Union[int, float, bool]):
42
47
  """Set the main score value."""
43
48
  if self.main_score_name:
49
+ # If main_score_name is already set, use it
44
50
  self.value[self.main_score_name] = value
51
+ elif self.value:
52
+ # If no main_score_name but value dict exists, use the first key
53
+ first_key = next(iter(self.value))
54
+ self.main_score_name = first_key
55
+ self.value[first_key] = value
45
56
  else:
46
- # If no main score name is set, just update the first value
47
- if self.value:
48
- first_key = next(iter(self.value))
49
- self.value[first_key] = value
50
- else:
51
- self.value['default'] = value
57
+ # If neither main_score_name nor value dict exists, initialize both
58
+ self.main_score_name = 'default'
59
+ self.value[self.main_score_name] = value
52
60
 
53
61
 
54
62
  class SampleScore(BaseModel):
@@ -0,0 +1 @@
1
+ from .llm_judge_mixin import LLMJudgeMixin
@@ -25,9 +25,7 @@ class ResponseSchema(BaseModel):
25
25
 
26
26
  class GenerateConfig(BaseModel):
27
27
  """Model generation options."""
28
-
29
- max_retries: Optional[int] = Field(default=None)
30
- """Maximum number of times to retry request (defaults to unlimited)."""
28
+ model_config = {'extra': 'allow'}
31
29
 
32
30
  timeout: Optional[int] = Field(default=None)
33
31
  """Request timeout (in seconds)."""
@@ -318,7 +318,7 @@ def get_model_with_task_config(task_config: 'TaskConfig') -> Model:
318
318
 
319
319
  @thread_safe
320
320
  def get_model(
321
- model: str,
321
+ model: Union[str, Model, ModelAPI],
322
322
  eval_type: str,
323
323
  base_url: Optional[str] = None,
324
324
  api_key: Optional[str] = None,
@@ -346,6 +346,9 @@ def get_model(
346
346
  if isinstance(model, Model):
347
347
  return model
348
348
 
349
+ if isinstance(model, ModelAPI):
350
+ return Model(model, config, model_args)
351
+
349
352
  # see if we can return a memoized model instance
350
353
  # (exclude mockllm since custom_outputs is an infinite generator)
351
354
  model_cache_key: str = ''