evalscope 1.1.0__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (599) hide show
  1. {evalscope-1.1.0/evalscope.egg-info → evalscope-1.1.1}/PKG-INFO +288 -323
  2. {evalscope-1.1.0 → evalscope-1.1.1}/README.md +187 -318
  3. evalscope-1.1.1/evalscope/api/benchmark/__init__.py +10 -0
  4. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/adapters/__init__.py +1 -0
  5. evalscope-1.1.1/evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  6. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/benchmark.py +14 -0
  7. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/dataset/dataset.py +21 -0
  8. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/dataset/loader.py +6 -2
  9. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/mixin/sandbox_mixin.py +32 -54
  10. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/model/generate_config.py +6 -0
  11. evalscope-1.1.1/evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  12. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bfcl/bfcl_adapter.py +1 -1
  13. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/data_collection/data_collection_adapter.py +2 -1
  14. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/general_arena/general_arena_adapter.py +1 -1
  15. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  16. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  17. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +23 -4
  18. evalscope-1.1.1/evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +158 -0
  19. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/humaneval/humaneval_adapter.py +2 -1
  20. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +3 -1
  21. evalscope-1.1.1/evalscope/benchmarks/math_verse/math_verse_adapter.py +100 -0
  22. evalscope-1.1.1/evalscope/benchmarks/math_vision/math_vision_adapter.py +111 -0
  23. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/math_vista/math_vista_adapter.py +6 -26
  24. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -1
  25. evalscope-1.1.1/evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  26. evalscope-1.1.1/evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  27. evalscope-1.1.1/evalscope/benchmarks/ner/copious_adapter.py +85 -0
  28. evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  29. evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  30. evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  31. evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  32. evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  33. evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  34. evalscope-1.1.1/evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  35. evalscope-1.1.1/evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  36. evalscope-1.1.1/evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  37. evalscope-1.1.1/evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  38. evalscope-1.1.1/evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  39. evalscope-1.1.1/evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  40. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/utils.py +1 -0
  41. evalscope-1.1.1/evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  42. evalscope-1.1.1/evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  43. evalscope-1.1.1/evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  44. evalscope-1.1.1/evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  45. evalscope-1.1.1/evalscope/benchmarks/poly_math/poly_math_adapter.py +127 -0
  46. evalscope-1.1.1/evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  47. evalscope-1.1.1/evalscope/benchmarks/pope/pope_adapter.py +111 -0
  48. evalscope-1.1.1/evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  49. {evalscope-1.1.0/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models → evalscope-1.1.1/evalscope/benchmarks/simple_vqa}/__init__.py +0 -0
  50. evalscope-1.1.1/evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  51. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/tau_bench/tau_bench_adapter.py +1 -1
  52. evalscope-1.1.1/evalscope/benchmarks/tool_bench/__init__.py +0 -0
  53. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/tool_bench/tool_bench_adapter.py +1 -1
  54. evalscope-1.1.1/evalscope/benchmarks/visu_logic/__init__.py +0 -0
  55. evalscope-1.1.1/evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  56. evalscope-1.1.1/evalscope/benchmarks/winogrande/__init__.py +0 -0
  57. evalscope-1.1.1/evalscope/benchmarks/zerobench/__init__.py +0 -0
  58. evalscope-1.1.1/evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  59. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/constants.py +4 -0
  60. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/evaluator/evaluator.py +72 -79
  61. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/math_parser.py +14 -0
  62. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/metric.py +1 -1
  63. evalscope-1.1.1/evalscope/metrics/t2v_metrics/__init__.py +0 -0
  64. evalscope-1.1.1/evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
  65. evalscope-1.1.1/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
  66. evalscope-1.1.1/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
  67. evalscope-1.1.1/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
  68. evalscope-1.1.1/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
  69. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/utils/openai.py +4 -0
  70. evalscope-1.1.1/evalscope/perf/__init__.py +0 -0
  71. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/arguments.py +24 -4
  72. evalscope-1.1.1/evalscope/perf/benchmark.py +194 -0
  73. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/http_client.py +31 -16
  74. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/main.py +15 -2
  75. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/api/base.py +9 -7
  76. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/api/custom_api.py +13 -58
  77. evalscope-1.1.1/evalscope/perf/plugin/api/default_api.py +205 -0
  78. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/api/openai_api.py +4 -3
  79. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/base.py +21 -0
  80. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/custom.py +2 -3
  81. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  82. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  83. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/openqa.py +2 -4
  84. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  85. evalscope-1.1.1/evalscope/perf/utils/__init__.py +0 -0
  86. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/benchmark_util.py +36 -22
  87. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/db_util.py +14 -19
  88. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/local_server.py +0 -44
  89. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/log_utils.py +21 -6
  90. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/report/__init__.py +2 -1
  91. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/run.py +4 -0
  92. evalscope-1.1.1/evalscope/third_party/thinkbench/tools/__init__.py +0 -0
  93. evalscope-1.1.1/evalscope/utils/function_utils.py +253 -0
  94. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/io_utils.py +74 -0
  95. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/logger.py +49 -17
  96. evalscope-1.1.1/evalscope/utils/ner.py +377 -0
  97. evalscope-1.1.1/evalscope/version.py +4 -0
  98. {evalscope-1.1.0 → evalscope-1.1.1/evalscope.egg-info}/PKG-INFO +288 -323
  99. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope.egg-info/SOURCES.txt +57 -1
  100. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope.egg-info/requires.txt +3 -0
  101. {evalscope-1.1.0 → evalscope-1.1.1}/pyproject.toml +2 -2
  102. evalscope-1.1.1/requirements/aigc.txt +8 -0
  103. evalscope-1.1.1/requirements/app.txt +2 -0
  104. evalscope-1.1.1/requirements/dev.txt +3 -0
  105. evalscope-1.1.1/requirements/docs.txt +6 -0
  106. evalscope-1.1.1/requirements/framework.txt +29 -0
  107. evalscope-1.1.1/requirements/opencompass.txt +1 -0
  108. evalscope-1.1.1/requirements/perf.txt +11 -0
  109. evalscope-1.1.1/requirements/rag.txt +8 -0
  110. evalscope-1.1.1/requirements/sandbox.txt +1 -0
  111. evalscope-1.1.1/requirements/vlmeval.txt +1 -0
  112. evalscope-1.1.1/tests/test_run_all.py +12 -0
  113. evalscope-1.1.0/evalscope/api/benchmark/__init__.py +0 -3
  114. evalscope-1.1.0/evalscope/perf/benchmark.py +0 -209
  115. evalscope-1.1.0/evalscope/perf/plugin/api/default_api.py +0 -105
  116. evalscope-1.1.0/evalscope/utils/function_utils.py +0 -70
  117. evalscope-1.1.0/evalscope/version.py +0 -4
  118. {evalscope-1.1.0 → evalscope-1.1.1}/LICENSE +0 -0
  119. {evalscope-1.1.0 → evalscope-1.1.1}/MANIFEST.in +0 -0
  120. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/__init__.py +0 -0
  121. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/__init__.py +0 -0
  122. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/adapters/default_data_adapter.py +0 -0
  123. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/adapters/image_edit_adapter.py +0 -0
  124. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/adapters/multi_choice_adapter.py +0 -0
  125. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/adapters/text2image_adapter.py +0 -0
  126. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/adapters/vision_language_adapter.py +0 -0
  127. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/benchmark/meta.py +0 -0
  128. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/dataset/__init__.py +0 -0
  129. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/dataset/utils.py +0 -0
  130. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/evaluator/__init__.py +0 -0
  131. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/evaluator/cache.py +0 -0
  132. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/evaluator/evaluator.py +0 -0
  133. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/evaluator/state.py +0 -0
  134. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/filter/__init__.py +0 -0
  135. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/filter/filter.py +0 -0
  136. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/messages/__init__.py +0 -0
  137. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/messages/chat_message.py +0 -0
  138. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/messages/content.py +0 -0
  139. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/messages/utils.py +0 -0
  140. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/metric/__init__.py +0 -0
  141. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/metric/metric.py +0 -0
  142. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/metric/scorer.py +0 -0
  143. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/mixin/__init__.py +0 -0
  144. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/mixin/llm_judge_mixin.py +0 -0
  145. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/model/__init__.py +0 -0
  146. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/model/model.py +0 -0
  147. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/model/model_output.py +0 -0
  148. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/registry.py +0 -0
  149. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/tool/__init__.py +0 -0
  150. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/tool/tool_call.py +0 -0
  151. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/tool/tool_info.py +0 -0
  152. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/api/tool/utils.py +0 -0
  153. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/__init__.py +0 -0
  154. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/app.py +0 -0
  155. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/arguments.py +0 -0
  156. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/constants.py +0 -0
  157. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/ui/__init__.py +0 -0
  158. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/ui/app_ui.py +0 -0
  159. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/ui/multi_model.py +0 -0
  160. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/ui/sidebar.py +0 -0
  161. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/ui/single_model.py +0 -0
  162. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/ui/visualization.py +0 -0
  163. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/utils/data_utils.py +0 -0
  164. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/utils/env_utils.py +0 -0
  165. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/utils/localization.py +0 -0
  166. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/utils/text_utils.py +0 -0
  167. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/app/utils/visualization.py +0 -0
  168. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/arguments.py +0 -0
  169. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/__init__.py +0 -0
  170. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/base.py +0 -0
  171. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/opencompass/__init__.py +0 -0
  172. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/opencompass/api_meta_template.py +0 -0
  173. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/opencompass/backend_manager.py +0 -0
  174. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  175. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
  176. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
  177. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/__init__.py +0 -0
  178. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/backend_manager.py +0 -0
  179. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
  180. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
  181. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +0 -0
  182. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +0 -0
  183. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py +0 -0
  184. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
  185. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
  186. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
  187. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
  188. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
  189. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
  190. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
  191. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
  192. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
  193. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
  194. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
  195. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
  196. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
  197. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
  198. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
  199. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
  200. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
  201. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
  202. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
  203. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
  204. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/task_template.py +0 -0
  205. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
  206. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +0 -0
  207. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +0 -0
  208. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +0 -0
  209. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +0 -0
  210. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/utils/__init__.py +0 -0
  211. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/utils/clip.py +0 -0
  212. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/utils/embedding.py +0 -0
  213. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/utils/llm.py +0 -0
  214. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/rag_eval/utils/tools.py +0 -0
  215. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  216. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
  217. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/__init__.py +0 -0
  218. {evalscope-1.1.0/evalscope/benchmarks/ai2d → evalscope-1.1.1/evalscope/benchmarks/aa_lcr}/__init__.py +0 -0
  219. {evalscope-1.1.0/evalscope/benchmarks/aime → evalscope-1.1.1/evalscope/benchmarks/ai2d}/__init__.py +0 -0
  220. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ai2d/ai2d_adapter.py +0 -0
  221. {evalscope-1.1.0/evalscope/benchmarks/alpaca_eval → evalscope-1.1.1/evalscope/benchmarks/aime}/__init__.py +0 -0
  222. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/aime/aime24_adapter.py +0 -0
  223. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/aime/aime25_adapter.py +0 -0
  224. {evalscope-1.1.0/evalscope/benchmarks/amc → evalscope-1.1.1/evalscope/benchmarks/alpaca_eval}/__init__.py +0 -0
  225. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +0 -0
  226. {evalscope-1.1.0/evalscope/benchmarks/arena_hard → evalscope-1.1.1/evalscope/benchmarks/amc}/__init__.py +0 -0
  227. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/amc/amc_adapter.py +0 -0
  228. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/arc/__init__.py +0 -0
  229. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
  230. {evalscope-1.1.0/evalscope/benchmarks/bfcl → evalscope-1.1.1/evalscope/benchmarks/arena_hard}/__init__.py +0 -0
  231. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/arena_hard/arena_hard_adapter.py +0 -0
  232. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/arena_hard/utils.py +0 -0
  233. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/__init__.py +0 -0
  234. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
  235. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  236. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  237. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  238. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  239. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  240. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  241. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  242. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  243. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  244. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  245. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  246. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  247. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  248. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  249. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  250. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  251. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  252. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  253. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  254. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  255. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  256. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  257. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  258. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  259. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  260. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  261. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  262. {evalscope-1.1.0/evalscope/benchmarks/blink → evalscope-1.1.1/evalscope/benchmarks/bfcl}/__init__.py +0 -0
  263. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/bfcl/generation.py +0 -0
  264. {evalscope-1.1.0/evalscope/benchmarks/chartqa → evalscope-1.1.1/evalscope/benchmarks/blink}/__init__.py +0 -0
  265. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/blink/blink_adapter.py +0 -0
  266. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ceval/__init__.py +0 -0
  267. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ceval/ceval_adapter.py +0 -0
  268. {evalscope-1.1.0/evalscope/benchmarks/chinese_simple_qa → evalscope-1.1.1/evalscope/benchmarks/chartqa}/__init__.py +0 -0
  269. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/chartqa/chartqa_adapter.py +0 -0
  270. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/chartqa/utils.py +0 -0
  271. {evalscope-1.1.0/evalscope/benchmarks/data_collection → evalscope-1.1.1/evalscope/benchmarks/chinese_simple_qa}/__init__.py +0 -0
  272. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +0 -0
  273. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  274. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
  275. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  276. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
  277. {evalscope-1.1.0/evalscope/benchmarks/docmath → evalscope-1.1.1/evalscope/benchmarks/data_collection}/__init__.py +0 -0
  278. {evalscope-1.1.0/evalscope/benchmarks/docvqa → evalscope-1.1.1/evalscope/benchmarks/docmath}/__init__.py +0 -0
  279. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/docmath/docmath_adapter.py +0 -0
  280. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/docmath/utils.py +0 -0
  281. {evalscope-1.1.0/evalscope/benchmarks/drop → evalscope-1.1.1/evalscope/benchmarks/docvqa}/__init__.py +0 -0
  282. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/docvqa/docvqa_adapter.py +0 -0
  283. {evalscope-1.1.0/evalscope/benchmarks/frames → evalscope-1.1.1/evalscope/benchmarks/drop}/__init__.py +0 -0
  284. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/drop/drop_adapter.py +0 -0
  285. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/drop/utils.py +0 -0
  286. {evalscope-1.1.0/evalscope/benchmarks/general_arena → evalscope-1.1.1/evalscope/benchmarks/frames}/__init__.py +0 -0
  287. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/frames/frames_adapter.py +0 -0
  288. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/frames/utils.py +0 -0
  289. {evalscope-1.1.0/evalscope/benchmarks/general_mcq → evalscope-1.1.1/evalscope/benchmarks/general_arena}/__init__.py +0 -0
  290. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/general_arena/utils.py +0 -0
  291. {evalscope-1.1.0/evalscope/benchmarks/gpqa → evalscope-1.1.1/evalscope/benchmarks/general_mcq}/__init__.py +0 -0
  292. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  293. {evalscope-1.1.0/evalscope/benchmarks/healthbench → evalscope-1.1.1/evalscope/benchmarks/gpqa}/__init__.py +0 -0
  294. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/gpqa/gpqa_adapter.py +0 -0
  295. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/gpqa/prompt.py +0 -0
  296. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  297. {evalscope-1.1.0/evalscope/benchmarks/hle → evalscope-1.1.1/evalscope/benchmarks/hallusion_bench}/__init__.py +0 -0
  298. {evalscope-1.1.0/evalscope/benchmarks/ifeval → evalscope-1.1.1/evalscope/benchmarks/healthbench}/__init__.py +0 -0
  299. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/healthbench/healthbench_adapter.py +0 -0
  300. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/healthbench/utils.py +0 -0
  301. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  302. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
  303. {evalscope-1.1.0/evalscope/benchmarks/image_edit → evalscope-1.1.1/evalscope/benchmarks/hle}/__init__.py +0 -0
  304. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/hle/hle_adapter.py +0 -0
  305. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  306. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/humaneval/utils.py +0 -0
  307. {evalscope-1.1.0/evalscope/benchmarks/image_edit/gedit → evalscope-1.1.1/evalscope/benchmarks/ifeval}/__init__.py +0 -0
  308. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ifeval/ifeval_adapter.py +0 -0
  309. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ifeval/instructions.py +0 -0
  310. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ifeval/instructions_registry.py +0 -0
  311. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ifeval/instructions_util.py +0 -0
  312. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ifeval/utils.py +0 -0
  313. {evalscope-1.1.0/evalscope/benchmarks/infovqa → evalscope-1.1.1/evalscope/benchmarks/image_edit}/__init__.py +0 -0
  314. {evalscope-1.1.0/evalscope/benchmarks/iquiz → evalscope-1.1.1/evalscope/benchmarks/image_edit/gedit}/__init__.py +0 -0
  315. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +0 -0
  316. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/image_edit/gedit/utils.py +0 -0
  317. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/image_edit/gedit/vie_prompts.py +0 -0
  318. {evalscope-1.1.0/evalscope/benchmarks/live_code_bench → evalscope-1.1.1/evalscope/benchmarks/infovqa}/__init__.py +0 -0
  319. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/infovqa/infovqa_adapter.py +0 -0
  320. {evalscope-1.1.0/evalscope/benchmarks/maritime_bench → evalscope-1.1.1/evalscope/benchmarks/iquiz}/__init__.py +0 -0
  321. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/iquiz/iquiz_adapter.py +0 -0
  322. {evalscope-1.1.0/evalscope/benchmarks/math_500 → evalscope-1.1.1/evalscope/benchmarks/live_code_bench}/__init__.py +0 -0
  323. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/evaluate_utils.py +0 -0
  324. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/extract_utils.py +0 -0
  325. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/load_utils.py +0 -0
  326. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/pass_k_utils.py +0 -0
  327. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/prompts.py +0 -0
  328. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +0 -0
  329. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/live_code_bench/testing_util.py +0 -0
  330. {evalscope-1.1.0/evalscope/benchmarks/math_vista → evalscope-1.1.1/evalscope/benchmarks/maritime_bench}/__init__.py +0 -0
  331. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +0 -0
  332. {evalscope-1.1.0/evalscope/benchmarks/minerva_math → evalscope-1.1.1/evalscope/benchmarks/math_500}/__init__.py +0 -0
  333. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/math_500/math_500_adapter.py +0 -0
  334. {evalscope-1.1.0/evalscope/benchmarks/mm_bench → evalscope-1.1.1/evalscope/benchmarks/math_verse}/__init__.py +0 -0
  335. {evalscope-1.1.0/evalscope/benchmarks/mm_star → evalscope-1.1.1/evalscope/benchmarks/math_vision}/__init__.py +0 -0
  336. {evalscope-1.1.0/evalscope/benchmarks/mmlu_pro → evalscope-1.1.1/evalscope/benchmarks/math_vista}/__init__.py +0 -0
  337. {evalscope-1.1.0/evalscope/benchmarks/mmlu_redux → evalscope-1.1.1/evalscope/benchmarks/minerva_math}/__init__.py +0 -0
  338. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/minerva_math/minerva_math_adapter.py +0 -0
  339. {evalscope-1.1.0/evalscope/benchmarks/mmmu → evalscope-1.1.1/evalscope/benchmarks/mm_bench}/__init__.py +0 -0
  340. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mm_bench/mm_bench_adapter.py +0 -0
  341. {evalscope-1.1.0/evalscope/benchmarks/mmmu_pro → evalscope-1.1.1/evalscope/benchmarks/mm_star}/__init__.py +0 -0
  342. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mm_star/mm_star_adapter.py +0 -0
  343. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  344. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
  345. {evalscope-1.1.0/evalscope/benchmarks/multi_if → evalscope-1.1.1/evalscope/benchmarks/mmlu_pro}/__init__.py +0 -0
  346. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +0 -0
  347. {evalscope-1.1.0/evalscope/benchmarks/musr → evalscope-1.1.1/evalscope/benchmarks/mmlu_redux}/__init__.py +0 -0
  348. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +0 -0
  349. {evalscope-1.1.0/evalscope/benchmarks/needle_haystack → evalscope-1.1.1/evalscope/benchmarks/mmmu}/__init__.py +0 -0
  350. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mmmu/mmmu_adapter.py +0 -0
  351. {evalscope-1.1.0/evalscope/benchmarks/ocr_bench → evalscope-1.1.1/evalscope/benchmarks/mmmu_pro}/__init__.py +0 -0
  352. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +0 -0
  353. {evalscope-1.1.0/evalscope/benchmarks/ocr_bench_v2 → evalscope-1.1.1/evalscope/benchmarks/multi_if}/__init__.py +0 -0
  354. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/multi_if/ifeval.py +0 -0
  355. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/multi_if/metrics.py +0 -0
  356. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/multi_if/multi_if_adapter.py +0 -0
  357. {evalscope-1.1.0/evalscope/benchmarks/ocr_bench_v2/spotting_eval → evalscope-1.1.1/evalscope/benchmarks/musr}/__init__.py +0 -0
  358. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/musr/musr_adapter.py +0 -0
  359. {evalscope-1.1.0/evalscope/benchmarks/olympiad_bench → evalscope-1.1.1/evalscope/benchmarks/needle_haystack}/__init__.py +0 -0
  360. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/needle_haystack/utils.py +0 -0
  361. {evalscope-1.1.0/evalscope/benchmarks/omni_bench → evalscope-1.1.1/evalscope/benchmarks/ner}/__init__.py +0 -0
  362. {evalscope-1.1.0/evalscope/benchmarks/process_bench → evalscope-1.1.1/evalscope/benchmarks/ner/cross_ner_entities}/__init__.py +0 -0
  363. {evalscope-1.1.0/evalscope/benchmarks/real_world_qa → evalscope-1.1.1/evalscope/benchmarks/ocr_bench}/__init__.py +0 -0
  364. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +0 -0
  365. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +0 -0
  366. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +0 -0
  367. {evalscope-1.1.0/evalscope/benchmarks/simple_qa → evalscope-1.1.1/evalscope/benchmarks/ocr_bench_v2}/__init__.py +0 -0
  368. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +0 -0
  369. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +0 -0
  370. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/parallel.py +0 -0
  371. {evalscope-1.1.0/evalscope/benchmarks/super_gpqa → evalscope-1.1.1/evalscope/benchmarks/ocr_bench_v2/spotting_eval}/__init__.py +0 -0
  372. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +0 -0
  373. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +0 -0
  374. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +0 -0
  375. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +0 -0
  376. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +0 -0
  377. {evalscope-1.1.0/evalscope/benchmarks/tau_bench → evalscope-1.1.1/evalscope/benchmarks/olympiad_bench}/__init__.py +0 -0
  378. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +0 -0
  379. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/olympiad_bench/utils.py +0 -0
  380. {evalscope-1.1.0/evalscope/benchmarks/text2image → evalscope-1.1.1/evalscope/benchmarks/omni_bench}/__init__.py +0 -0
  381. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/omni_bench/omni_bench_adapter.py +0 -0
  382. {evalscope-1.1.0/evalscope/benchmarks/tool_bench → evalscope-1.1.1/evalscope/benchmarks/omnidoc_bench}/__init__.py +0 -0
  383. {evalscope-1.1.0/evalscope/benchmarks/winogrande → evalscope-1.1.1/evalscope/benchmarks/poly_math}/__init__.py +0 -0
  384. {evalscope-1.1.0/evalscope/metrics/t2v_metrics → evalscope-1.1.1/evalscope/benchmarks/pope}/__init__.py +0 -0
  385. {evalscope-1.1.0/evalscope/metrics/t2v_metrics/models → evalscope-1.1.1/evalscope/benchmarks/process_bench}/__init__.py +0 -0
  386. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/process_bench/process_bench_adapter.py +0 -0
  387. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/race/__init__.py +0 -0
  388. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/race/race_adapter.py +0 -0
  389. {evalscope-1.1.0/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model → evalscope-1.1.1/evalscope/benchmarks/real_world_qa}/__init__.py +0 -0
  390. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +0 -0
  391. {evalscope-1.1.0/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward → evalscope-1.1.1/evalscope/benchmarks/seed_bench_2_plus}/__init__.py +0 -0
  392. {evalscope-1.1.0/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5 → evalscope-1.1.1/evalscope/benchmarks/simple_qa}/__init__.py +0 -0
  393. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/simple_qa/simple_qa_adapter.py +0 -0
  394. {evalscope-1.1.0/evalscope/perf → evalscope-1.1.1/evalscope/benchmarks/super_gpqa}/__init__.py +0 -0
  395. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/super_gpqa/prompt.py +0 -0
  396. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +0 -0
  397. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/super_gpqa/utils.py +0 -0
  398. {evalscope-1.1.0/evalscope/perf/utils → evalscope-1.1.1/evalscope/benchmarks/tau_bench}/__init__.py +0 -0
  399. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/tau_bench/generation.py +0 -0
  400. {evalscope-1.1.0/evalscope/third_party/thinkbench/tools → evalscope-1.1.1/evalscope/benchmarks/text2image}/__init__.py +0 -0
  401. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/text2image/evalmuse_adapter.py +0 -0
  402. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/text2image/genai_bench_adapter.py +0 -0
  403. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/text2image/general_t2i_adapter.py +0 -0
  404. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/text2image/hpdv2_adapter.py +0 -0
  405. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/text2image/tifa_adapter.py +0 -0
  406. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/tool_bench/utils.py +0 -0
  407. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  408. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
  409. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
  410. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  411. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -0
  412. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/benchmarks/winogrande/winogrande_adapter.py +0 -0
  413. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/__init__.py +0 -0
  414. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/base.py +0 -0
  415. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/cli.py +0 -0
  416. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/start_app.py +0 -0
  417. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/start_eval.py +0 -0
  418. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/start_perf.py +0 -0
  419. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/cli/start_server.py +0 -0
  420. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/collections/__init__.py +0 -0
  421. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/collections/sampler.py +0 -0
  422. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/collections/schema.py +0 -0
  423. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/config.py +0 -0
  424. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/evaluator/__init__.py +0 -0
  425. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/filters/__init__.py +0 -0
  426. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/filters/extraction.py +0 -0
  427. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/filters/selection.py +0 -0
  428. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/__init__.py +0 -0
  429. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  430. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
  431. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/llm_judge.py +0 -0
  432. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/metrics.py +0 -0
  433. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/rouge_metric.py +0 -0
  434. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/clipscore.py +0 -0
  435. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/constants.py +0 -0
  436. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/itmscore.py +0 -0
  437. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +0 -0
  438. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +0 -0
  439. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +0 -0
  440. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +0 -0
  441. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +0 -0
  442. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +0 -0
  443. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +0 -0
  444. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +0 -0
  445. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +0 -0
  446. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +0 -0
  447. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +0 -0
  448. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +0 -0
  449. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +0 -0
  450. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +0 -0
  451. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/model.py +0 -0
  452. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/utils.py +0 -0
  453. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +0 -0
  454. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +0 -0
  455. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +0 -0
  456. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +0 -0
  457. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +0 -0
  458. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +0 -0
  459. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +0 -0
  460. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +0 -0
  461. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +0 -0
  462. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  463. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  464. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  465. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  466. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  467. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  468. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  469. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  470. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  471. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  472. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +0 -0
  473. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +0 -0
  474. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +0 -0
  475. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +0 -0
  476. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +0 -0
  477. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +0 -0
  478. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +0 -0
  479. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +0 -0
  480. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +0 -0
  481. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +0 -0
  482. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +0 -0
  483. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +0 -0
  484. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +0 -0
  485. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +0 -0
  486. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +0 -0
  487. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +0 -0
  488. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +0 -0
  489. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +0 -0
  490. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +0 -0
  491. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +0 -0
  492. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +0 -0
  493. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +0 -0
  494. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +0 -0
  495. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +0 -0
  496. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +0 -0
  497. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +0 -0
  498. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +0 -0
  499. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +0 -0
  500. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +0 -0
  501. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +0 -0
  502. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +0 -0
  503. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +0 -0
  504. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +0 -0
  505. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +0 -0
  506. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +0 -0
  507. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +0 -0
  508. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +0 -0
  509. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +0 -0
  510. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +0 -0
  511. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +0 -0
  512. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +0 -0
  513. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +0 -0
  514. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +0 -0
  515. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +0 -0
  516. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +0 -0
  517. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +0 -0
  518. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +0 -0
  519. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +0 -0
  520. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +0 -0
  521. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +0 -0
  522. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +0 -0
  523. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +0 -0
  524. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +0 -0
  525. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +0 -0
  526. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +0 -0
  527. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/score.py +0 -0
  528. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/metrics/t2v_metrics/vqascore.py +0 -0
  529. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/__init__.py +0 -0
  530. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/image_edit_model.py +0 -0
  531. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/mockllm.py +0 -0
  532. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/model_apis.py +0 -0
  533. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/modelscope.py +0 -0
  534. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/openai_compatible.py +0 -0
  535. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/models/text2image_model.py +0 -0
  536. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/__init__.py +0 -0
  537. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/api/__init__.py +0 -0
  538. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
  539. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/__init__.py +0 -0
  540. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/flickr8k.py +0 -0
  541. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/kontext_bench.py +0 -0
  542. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/random_vl_dataset.py +0 -0
  543. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
  544. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/plugin/registry.py +0 -0
  545. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/analysis_result.py +0 -0
  546. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/handler.py +0 -0
  547. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/perf/utils/rich_display.py +0 -0
  548. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/report/combinator.py +0 -0
  549. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/report/generator.py +0 -0
  550. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/report/report.py +0 -0
  551. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/summarizer.py +0 -0
  552. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/__init__.py +0 -0
  553. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/README.md +0 -0
  554. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/__init__.py +0 -0
  555. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/default_task.json +0 -0
  556. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
  557. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/eval.py +0 -0
  558. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/infer.py +0 -0
  559. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
  560. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
  561. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
  562. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
  563. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
  564. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
  565. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
  566. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
  567. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/tools/openai_api.py +0 -0
  568. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/longbench_write/utils.py +0 -0
  569. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/__init__.py +0 -0
  570. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/eval.py +0 -0
  571. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/infer.py +0 -0
  572. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/resources/critique_template.txt +0 -0
  573. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/resources/reformat_template.txt +0 -0
  574. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/tools/llm.py +0 -0
  575. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/thinkbench/tools/utils.py +0 -0
  576. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/README.md +0 -0
  577. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  578. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/config_default.json +0 -0
  579. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
  580. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/eval.py +0 -0
  581. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/infer.py +0 -0
  582. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
  583. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
  584. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
  585. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  586. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/__init__.py +0 -0
  587. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/argument_utils.py +0 -0
  588. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/chat_service.py +0 -0
  589. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/deprecation_utils.py +0 -0
  590. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/import_utils.py +0 -0
  591. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/json_schema.py +0 -0
  592. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/model_utils.py +0 -0
  593. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/multi_choices.py +0 -0
  594. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope/utils/url_utils.py +0 -0
  595. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope.egg-info/dependency_links.txt +0 -0
  596. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope.egg-info/entry_points.txt +0 -0
  597. {evalscope-1.1.0 → evalscope-1.1.1}/evalscope.egg-info/top_level.txt +0 -0
  598. {evalscope-1.1.0 → evalscope-1.1.1}/setup.cfg +0 -0
  599. {evalscope-1.1.0 → evalscope-1.1.1}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: evalscope
3
- Version: 1.1.0
3
+ Version: 1.1.1
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Author: ModelScope team
6
6
  Author-email: contact@modelscope.cn
@@ -10,23 +10,119 @@ Keywords: python,llm,evaluation
10
10
  Classifier: Development Status :: 4 - Beta
11
11
  Classifier: Operating System :: OS Independent
12
12
  Classifier: Programming Language :: Python :: 3
13
- Classifier: Programming Language :: Python :: 3.9
14
13
  Classifier: Programming Language :: Python :: 3.10
15
14
  Classifier: Programming Language :: Python :: 3.11
16
15
  Classifier: Programming Language :: Python :: 3.12
17
16
  Classifier: License :: OSI Approved :: Apache Software License
18
- Requires-Python: >=3.9
17
+ Requires-Python: >=3.10
19
18
  Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: colorlog
21
+ Requires-Dist: datasets==3.6.0
22
+ Requires-Dist: docstring_parser
23
+ Requires-Dist: dotenv
24
+ Requires-Dist: jieba
25
+ Requires-Dist: jsonlines
26
+ Requires-Dist: langdetect
27
+ Requires-Dist: latex2sympy2_extended[antlr4_9_3]
28
+ Requires-Dist: matplotlib
29
+ Requires-Dist: modelscope[framework]>=1.27
30
+ Requires-Dist: nltk>=3.9
31
+ Requires-Dist: openai
32
+ Requires-Dist: overrides
33
+ Requires-Dist: pandas
34
+ Requires-Dist: pillow
35
+ Requires-Dist: pydantic
36
+ Requires-Dist: pyyaml>=5.1
37
+ Requires-Dist: requests
38
+ Requires-Dist: rich
39
+ Requires-Dist: rouge-chinese
40
+ Requires-Dist: rouge-score>=0.1.0
41
+ Requires-Dist: sacrebleu
42
+ Requires-Dist: scikit-learn
43
+ Requires-Dist: seaborn
44
+ Requires-Dist: sympy
45
+ Requires-Dist: tabulate
46
+ Requires-Dist: tqdm
47
+ Requires-Dist: transformers>=4.33
48
+ Requires-Dist: word2number
20
49
  Provides-Extra: opencompass
50
+ Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
21
51
  Provides-Extra: vlmeval
52
+ Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
22
53
  Provides-Extra: rag
54
+ Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "rag"
55
+ Requires-Dist: langchain-community<0.4.0,>=0.3.0; extra == "rag"
56
+ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
57
+ Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
58
+ Requires-Dist: mteb==1.38.20; extra == "rag"
59
+ Requires-Dist: ragas==0.2.14; extra == "rag"
60
+ Requires-Dist: torch; extra == "rag"
61
+ Requires-Dist: webdataset>0.2.0; extra == "rag"
23
62
  Provides-Extra: perf
63
+ Requires-Dist: aiohttp; extra == "perf"
64
+ Requires-Dist: fastapi; extra == "perf"
65
+ Requires-Dist: jinja2; extra == "perf"
66
+ Requires-Dist: numpy; extra == "perf"
67
+ Requires-Dist: rich; extra == "perf"
68
+ Requires-Dist: sse_starlette; extra == "perf"
69
+ Requires-Dist: transformers; extra == "perf"
70
+ Requires-Dist: uvicorn; extra == "perf"
24
71
  Provides-Extra: app
72
+ Requires-Dist: gradio==5.4.0; extra == "app"
73
+ Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
25
74
  Provides-Extra: aigc
75
+ Requires-Dist: diffusers; extra == "aigc"
76
+ Requires-Dist: iopath; extra == "aigc"
77
+ Requires-Dist: omegaconf; extra == "aigc"
78
+ Requires-Dist: open_clip_torch; extra == "aigc"
79
+ Requires-Dist: opencv-python; extra == "aigc"
80
+ Requires-Dist: peft>=0.17; extra == "aigc"
81
+ Requires-Dist: torch; extra == "aigc"
82
+ Requires-Dist: torchvision; extra == "aigc"
83
+ Provides-Extra: sandbox
84
+ Requires-Dist: ms-enclave[docker]; extra == "sandbox"
26
85
  Provides-Extra: dev
86
+ Requires-Dist: pytest; extra == "dev"
87
+ Requires-Dist: pytest-cov; extra == "dev"
88
+ Requires-Dist: python-dotenv; extra == "dev"
27
89
  Provides-Extra: docs
90
+ Requires-Dist: docutils>=0.16.0; extra == "docs"
91
+ Requires-Dist: myst_parser; extra == "docs"
92
+ Requires-Dist: recommonmark; extra == "docs"
93
+ Requires-Dist: sphinx>=5.3.0; extra == "docs"
94
+ Requires-Dist: sphinx-design; extra == "docs"
95
+ Requires-Dist: sphinxawesome-theme; extra == "docs"
28
96
  Provides-Extra: all
29
- License-File: LICENSE
97
+ Requires-Dist: ms-opencompass>=0.1.6; extra == "all"
98
+ Requires-Dist: ms-vlmeval>=0.0.17; extra == "all"
99
+ Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "all"
100
+ Requires-Dist: langchain-community<0.4.0,>=0.3.0; extra == "all"
101
+ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "all"
102
+ Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "all"
103
+ Requires-Dist: mteb==1.38.20; extra == "all"
104
+ Requires-Dist: ragas==0.2.14; extra == "all"
105
+ Requires-Dist: torch; extra == "all"
106
+ Requires-Dist: webdataset>0.2.0; extra == "all"
107
+ Requires-Dist: aiohttp; extra == "all"
108
+ Requires-Dist: fastapi; extra == "all"
109
+ Requires-Dist: jinja2; extra == "all"
110
+ Requires-Dist: numpy; extra == "all"
111
+ Requires-Dist: rich; extra == "all"
112
+ Requires-Dist: sse_starlette; extra == "all"
113
+ Requires-Dist: transformers; extra == "all"
114
+ Requires-Dist: uvicorn; extra == "all"
115
+ Requires-Dist: gradio==5.4.0; extra == "all"
116
+ Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "all"
117
+ Requires-Dist: diffusers; extra == "all"
118
+ Requires-Dist: iopath; extra == "all"
119
+ Requires-Dist: omegaconf; extra == "all"
120
+ Requires-Dist: open_clip_torch; extra == "all"
121
+ Requires-Dist: opencv-python; extra == "all"
122
+ Requires-Dist: peft>=0.17; extra == "all"
123
+ Requires-Dist: torch; extra == "all"
124
+ Requires-Dist: torchvision; extra == "all"
125
+ Dynamic: license-file
30
126
 
31
127
  <p align="center">
32
128
  <br>
@@ -34,13 +130,12 @@ License-File: LICENSE
34
130
  <br>
35
131
  <p>
36
132
 
37
-
38
133
  <p align="center">
39
134
  <a href="README_zh.md">中文</a> &nbsp | &nbsp English &nbsp
40
135
  </p>
41
136
 
42
137
  <p align="center">
43
- <img src="https://img.shields.io/badge/python-%E2%89%A53.9-5be.svg">
138
+ <img src="https://img.shields.io/badge/python-%E2%89%A53.10-5be.svg">
44
139
  <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
45
140
  <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope"></a>
46
141
  <a href="https://github.com/modelscope/evalscope/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
@@ -48,122 +143,77 @@ License-File: LICENSE
48
143
  <p>
49
144
 
50
145
  <p align="center">
51
- <a href="https://evalscope.readthedocs.io/zh-cn/latest/"> 📖 中文文档</a> &nbsp | &nbsp <a href="https://evalscope.readthedocs.io/en/latest/"> 📖 English Documents</a>
146
+ <a href="https://evalscope.readthedocs.io/zh-cn/latest/"> 📖 Chinese Documentation</a> &nbsp | &nbsp <a href="https://evalscope.readthedocs.io/en/latest/"> 📖 English Documentation</a>
52
147
  <p>
53
148
 
54
- > ⭐ If you like this project, please click the "Star" button at the top right to support us. Your support is our motivation to keep going!
55
-
56
- ## 📋 Contents
57
- - [📋 Contents](#-contents)
58
- - [📝 Introduction](#-introduction)
59
- - [☎ User Groups](#-user-groups)
60
- - [🎉 News](#-news)
61
- - [🛠️ Environment Setup](#️-environment-setup)
62
- - [Method 1. Install via pip](#method-1-install-via-pip)
63
- - [Method 2. Install from source](#method-2-install-from-source)
64
- - [🚀 Quick Start](#-quick-start)
65
- - [Method 1. Using Command Line](#method-1-using-command-line)
66
- - [Method 2. Using Python Code](#method-2-using-python-code)
67
- - [Basic Parameter](#basic-parameter)
68
- - [Output Results](#output-results)
69
- - [📈 Visualization of Evaluation Results](#-visualization-of-evaluation-results)
70
- - [🌐 Evaluation of Model API](#-evaluation-of-model-api)
71
- - [⚙️ Custom Parameter Evaluation](#️-custom-parameter-evaluation)
72
- - [Parameter Description](#parameter-description)
73
- - [🧪 Other Evaluation Backends](#-other-evaluation-backends)
74
- - [📈 Model Serving Performance Evaluation](#-model-serving-performance-evaluation)
75
- - [🖊️ Custom Dataset Evaluation](#️-custom-dataset-evaluation)
76
- - [⚔️ Arena Mode](#️-arena-mode)
77
- - [👷‍♂️ Contribution](#️-contribution)
78
- - [📚 Citation](#-citation)
79
- - [🔜 Roadmap](#-roadmap)
80
- - [⭐ Star History](#-star-history)
81
149
 
150
+ > ⭐ If you like this project, please click the "Star" button in the upper right corner to support us. Your support is our motivation to move forward!
82
151
 
83
152
  ## 📝 Introduction
84
153
 
85
- EvalScope is a comprehensive model evaluation and performance benchmarking framework meticulously crafted by the [ModelScope Community](https://modelscope.cn/), offering a one-stop solution for your model assessment needs. Regardless of the type of model you are developing, EvalScope is equipped to cater to your requirements:
154
+ EvalScope is a powerful and easily extensible model evaluation framework created by the [ModelScope Community](https://modelscope.cn/), aiming to provide a one-stop evaluation solution for large model developers.
86
155
 
87
- - 🧠 Large Language Models
88
- - 🎨 Multimodal Models
89
- - 🔍 Embedding Models
90
- - 🏆 Reranker Models
91
- - 🖼️ CLIP Models
92
- - 🎭 AIGC Models (Image-to-Text/Video)
93
- - ...and more!
156
+ Whether you want to evaluate the general capabilities of models, conduct multi-model performance comparisons, or need to stress test models, EvalScope can meet your needs.
94
157
 
95
- EvalScope is not merely an evaluation tool; it is a valuable ally in your model optimization journey:
158
+ ## Key Features
96
159
 
97
- - 🏅 Equipped with multiple industry-recognized benchmarks and evaluation metrics: MMLU, CMMLU, C-Eval, GSM8K, etc.
98
- - 📊 Model inference performance stress testing: Ensuring your model excels in real-world applications.
99
- - 🚀 Seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, enabling one-click evaluations and providing full-chain support from training to assessment for your model development.
160
+ - **📚 Comprehensive Evaluation Benchmarks**: Built-in multiple industry-recognized evaluation benchmarks including MMLU, C-Eval, GSM8K, and more.
161
+ - **🧩 Multi-modal and Multi-domain Support**: Supports evaluation of various model types including Large Language Models (LLM), Vision Language Models (VLM), Embedding, Reranker, AIGC, and more.
162
+ - **🚀 Multi-backend Integration**: Seamlessly integrates multiple evaluation backends including OpenCompass, VLMEvalKit, RAGEval to meet different evaluation needs.
163
+ - **⚡ Inference Performance Testing**: Provides powerful model service stress testing tools, supporting multiple performance metrics such as TTFT, TPOT.
164
+ - **📊 Interactive Reports**: Provides WebUI visualization interface, supporting multi-dimensional model comparison, report overview and detailed inspection.
165
+ - **⚔️ Arena Mode**: Supports multi-model battles (Pairwise Battle), intuitively ranking and evaluating models.
166
+ - **🔧 Highly Extensible**: Developers can easily add custom datasets, models and evaluation metrics.
100
167
 
101
- Below is the overall architecture diagram of EvalScope:
168
+ <details><summary>🏛️ Overall Architecture</summary>
102
169
 
103
170
  <p align="center">
104
- <img src="https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/doc/EvalScope%E6%9E%B6%E6%9E%84%E5%9B%BE.png" width="70%">
105
- <br>EvalScope Framework.
171
+ <img src="https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/doc/EvalScope%E6%9E%B6%E6%9E%84%E5%9B%BE.png" style="width: 70%;">
172
+ <br>EvalScope Overall Architecture.
106
173
  </p>
107
174
 
108
- <details><summary>Framework Description</summary>
109
-
110
- The architecture includes the following modules:
111
- 1. Input Layer
112
- - **Model Sources**: API models (OpenAI API), local models (ModelScope)
113
- - **Datasets**: Standard evaluation benchmarks (MMLU/GSM8k, etc.), custom data (MCQ/QA)
114
-
115
- 2. Core Functions
116
- - **Multi-backend Evaluation**
117
- - Native backends: Unified evaluation for LLM/VLM/Embedding/T2I models
118
- - Integrated frameworks: OpenCompass/MTEB/VLMEvalKit/RAGAS
119
-
120
- - **Performance Monitoring**
121
- - Model plugins: Supports various model service APIs
122
- - Data plugins: Supports multiple data formats
123
- - Metric tracking: TTFT/TPOP/Stability and other metrics
175
+ 1. **Input Layer**
176
+ - **Model Sources**: API models (OpenAI API), Local models (ModelScope)
177
+ - **Datasets**: Standard evaluation benchmarks (MMLU/GSM8k etc.), Custom data (MCQ/QA)
124
178
 
125
- - **Tool Extensions**
126
- - Integration: Tool-Bench/Needle-in-a-Haystack/BFCL-v3
179
+ 2. **Core Functions**
180
+ - **Multi-backend Evaluation**: Native backend, OpenCompass, MTEB, VLMEvalKit, RAGAS
181
+ - **Performance Monitoring**: Supports multiple model service APIs and data formats, tracking TTFT/TPOP and other metrics
182
+ - **Tool Extensions**: Integrates Tool-Bench, Needle-in-a-Haystack, etc.
127
183
 
128
- 3. Output Layer
129
- - **Structured Reports**: Supports JSON/Tables/Logs
130
- - **Visualization Platforms**: Supports Gradio/Wandb/SwanLab
184
+ 3. **Output Layer**
185
+ - **Structured Reports**: Supports JSON, Table, Logs
186
+ - **Visualization Platform**: Supports Gradio, Wandb, SwanLab
131
187
 
132
188
  </details>
133
189
 
134
- ## User Groups
135
-
136
- Please scan the QR code below to join our community groups:
137
-
138
- [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
139
- :-------------------------:|:-------------------------:|:-------------------------:
140
- <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
141
-
142
-
143
- ## 🎉 News
190
+ ## 🎉 What's New
144
191
 
145
192
  > [!IMPORTANT]
146
193
  > **Version 1.0 Refactoring**
147
194
  >
148
195
  > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
196
+
197
+ - 🔥 **[2025.10.21]** Optimized sandbox environment usage in code evaluation, supporting both local and remote operation modes. For details, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html).
198
+ - 🔥 **[2025.10.20]** Added support for evaluation benchmarks including PolyMath, SimpleVQA, MathVerse, MathVision, AA-LCR; optimized evalscope perf performance to align with vLLM Bench. For details, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/vs_vllm_bench.html).
149
199
  - 🔥 **[2025.10.14]** Added support for OCRBench, OCRBench-v2, DocVQA, InfoVQA, ChartQA, and BLINK multimodal image-text evaluation benchmarks.
150
200
  - 🔥 **[2025.09.22]** Code evaluation benchmarks (HumanEval, LiveCodeBench) now support running in a sandbox environment. To use this feature, please install [ms-enclave](https://github.com/modelscope/ms-enclave) first.
151
201
  - 🔥 **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
152
- - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
202
+ - 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/vlm.html).
153
203
  - 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
154
204
  - 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
155
205
  - 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
156
- - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
206
+ - 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#bench).
157
207
  - 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
158
208
  - 🔥 **[2025.07.03]** Refactored Arena Mode: now supports custom model battles, outputs a model leaderboard, and provides battle result visualization. See [reference](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html) for details.
209
+ <details><summary>More</summary>
210
+
159
211
  - 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
160
- - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
212
+ - 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/bfcl_v3.html).
161
213
  - 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
162
214
  - 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
163
215
  - 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
164
216
  - 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
165
- <details><summary>More</summary>
166
-
167
217
  - 🔥 **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read 📖](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
168
218
  - 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
169
219
  - 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
@@ -200,96 +250,71 @@ Please scan the QR code below to join our community groups:
200
250
 
201
251
  </details>
202
252
 
203
- ## 🛠️ Environment Setup
253
+ ## ❤️ Community & Support
204
254
 
205
- ### Method 1. Install via pip
255
+ Welcome to join our community to communicate with other developers and get help.
206
256
 
207
- We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
257
+ [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
258
+ :-------------------------:|:-------------------------:|:-------------------------:
259
+ <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
208
260
 
209
- 1. Create a conda environment (optional)
210
- ```shell
211
- # Python 3.10 is recommended
212
- conda create -n evalscope python=3.10
213
261
 
214
- # Activate the conda environment
215
- conda activate evalscope
216
- ```
217
- 2. Install dependencies via pip
218
- ```shell
219
- pip install evalscope
220
- ```
221
- 3. Install additional dependencies (optional)
222
- - To use model service inference benchmarking features, install the perf dependency:
262
+
263
+ ## 🛠️ Environment Setup
264
+
265
+ We recommend using `conda` to create a virtual environment and install with `pip`.
266
+
267
+ 1. **Create and Activate Conda Environment** (Python 3.10 recommended)
223
268
  ```shell
224
- pip install 'evalscope[perf]'
269
+ conda create -n evalscope python=3.10
270
+ conda activate evalscope
225
271
  ```
226
- - To use visualization features, install the app dependency:
272
+
273
+ 2. **Install EvalScope**
274
+
275
+ - **Method 1: Install via PyPI (Recommended)**
276
+ ```shell
277
+ pip install evalscope
278
+ ```
279
+
280
+ - **Method 2: Install from Source (For Development)**
281
+ ```shell
282
+ git clone https://github.com/modelscope/evalscope.git
283
+ cd evalscope
284
+ pip install -e .
285
+ ```
286
+
287
+ 3. **Install Additional Dependencies** (Optional)
288
+ Install corresponding feature extensions according to your needs:
227
289
  ```shell
290
+ # Performance testing
291
+ pip install 'evalscope[perf]'
292
+
293
+ # Visualization App
228
294
  pip install 'evalscope[app]'
229
- ```
230
- - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
231
- ```shell
295
+
296
+ # Other evaluation backends
232
297
  pip install 'evalscope[opencompass]'
233
298
  pip install 'evalscope[vlmeval]'
234
299
  pip install 'evalscope[rag]'
235
- ```
236
- - To install all dependencies:
237
- ```shell
300
+
301
+ # Install all dependencies
238
302
  pip install 'evalscope[all]'
239
303
  ```
304
+ > If you installed from source, please replace `evalscope` with `.`, for example `pip install '.[perf]'`.
240
305
 
241
306
  > [!NOTE]
242
- > The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
243
- > ```shell
244
- > pip install llmuses<=0.4.3
245
- > ```
246
- > Then, import related dependencies using `llmuses`:
247
- > ```python
248
- > from llmuses import ...
249
- > ```
250
-
251
- ### Method 2. Install from source
252
-
253
- Installing from source allows you to use the latest code and makes it easier for further development and debugging.
254
-
255
- 1. Clone the source code
256
- ```shell
257
- git clone https://github.com/modelscope/evalscope.git
258
- ```
259
- 2. Install dependencies
260
- ```shell
261
- cd evalscope/
262
-
263
- pip install -e .
264
- ```
265
- 3. Install additional dependencies
266
- - To use model service inference benchmarking features, install the perf dependency:
267
- ```shell
268
- pip install '.[perf]'
269
- ```
270
- - To use visualization features, install the app dependency:
271
- ```shell
272
- pip install '.[app]'
273
- ```
274
- - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
275
- ```shell
276
- pip install '.[opencompass]'
277
- pip install '.[vlmeval]'
278
- pip install '.[rag]'
279
- ```
280
- - To install all dependencies:
281
- ```shell
282
- pip install '.[all]'
283
- ```
307
+ > This project was formerly known as `llmuses`. If you need to use `v0.4.3` or earlier versions, please run `pip install llmuses<=0.4.3` and use `from llmuses import ...` for imports.
284
308
 
285
309
 
286
310
  ## 🚀 Quick Start
287
311
 
288
- To evaluate a model on specified datasets using default configurations, this framework supports two ways to initiate evaluation tasks: using the command line or using Python code.
312
+ You can start evaluation tasks in two ways: **command line** or **Python code**.
289
313
 
290
314
  ### Method 1. Using Command Line
291
315
 
292
- Execute the `eval` command in any directory:
316
+ Execute the `evalscope eval` command in any path to start evaluation. The following command will evaluate the `Qwen/Qwen2.5-0.5B-Instruct` model on `gsm8k` and `arc` datasets, taking only 5 samples from each dataset.
317
+
293
318
  ```bash
294
319
  evalscope eval \
295
320
  --model Qwen/Qwen2.5-0.5B-Instruct \
@@ -299,22 +324,23 @@ evalscope eval \
299
324
 
300
325
  ### Method 2. Using Python Code
301
326
 
302
- When using Python code for evaluation, you need to submit the evaluation task using the `run_task` function, passing a `TaskConfig` as a parameter. It can also be a Python dictionary, yaml file path, or json file path, for example:
303
-
304
- **Using `TaskConfig`**
327
+ Use the `run_task` function and `TaskConfig` object to configure and start evaluation tasks.
305
328
 
306
329
  ```python
307
330
  from evalscope import run_task, TaskConfig
308
331
 
332
+ # Configure evaluation task
309
333
  task_cfg = TaskConfig(
310
334
  model='Qwen/Qwen2.5-0.5B-Instruct',
311
335
  datasets=['gsm8k', 'arc'],
312
336
  limit=5
313
337
  )
314
338
 
315
- run_task(task_cfg=task_cfg)
339
+ # Start evaluation
340
+ run_task(task_cfg)
316
341
  ```
317
- <details><summary>More Startup Methods</summary>
342
+
343
+ <details><summary><b>💡 Tip:</b> `run_task` also supports dictionaries, YAML or JSON files as configuration.</summary>
318
344
 
319
345
  **Using Python Dictionary**
320
346
 
@@ -326,13 +352,10 @@ task_cfg = {
326
352
  'datasets': ['gsm8k', 'arc'],
327
353
  'limit': 5
328
354
  }
329
-
330
355
  run_task(task_cfg=task_cfg)
331
356
  ```
332
357
 
333
- **Using `yaml` file**
334
-
335
- `config.yaml`:
358
+ **Using YAML File** (`config.yaml`)
336
359
  ```yaml
337
360
  model: Qwen/Qwen2.5-0.5B-Instruct
338
361
  datasets:
@@ -340,37 +363,15 @@ datasets:
340
363
  - arc
341
364
  limit: 5
342
365
  ```
343
-
344
366
  ```python
345
367
  from evalscope.run import run_task
346
368
 
347
369
  run_task(task_cfg="config.yaml")
348
370
  ```
349
-
350
- **Using `json` file**
351
-
352
- `config.json`:
353
- ```json
354
- {
355
- "model": "Qwen/Qwen2.5-0.5B-Instruct",
356
- "datasets": ["gsm8k", "arc"],
357
- "limit": 5
358
- }
359
- ```
360
-
361
- ```python
362
- from evalscope.run import run_task
363
-
364
- run_task(task_cfg="config.json")
365
- ```
366
371
  </details>
367
372
 
368
- ### Basic Parameter
369
- - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
370
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
371
- - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
372
-
373
373
  ### Output Results
374
+ After evaluation completion, you will see a report in the terminal in the following format:
374
375
  ```text
375
376
  +-----------------------+----------------+-----------------+-----------------+---------------+-------+---------+
376
377
  | Model Name | Dataset Name | Metric Name | Category Name | Subset Name | Num | Score |
@@ -383,164 +384,140 @@ run_task(task_cfg="config.json")
383
384
  +-----------------------+----------------+-----------------+-----------------+---------------+-------+---------+
384
385
  ```
385
386
 
386
- ## 📈 Visualization of Evaluation Results
387
-
388
- 1. Install the dependencies required for visualization, including gradio, plotly, etc.
389
- ```bash
390
- pip install 'evalscope[app]'
391
- ```
392
-
393
- 2. Start the Visualization Service
394
-
395
- Run the following command to start the visualization service.
396
- ```bash
397
- evalscope app
398
- ```
399
- You can access the visualization service in the browser if the following output appears.
400
- ```text
401
- * Running on local URL: http://127.0.0.1:7861
402
-
403
- To create a public link, set `share=True` in `launch()`.
404
- ```
405
-
406
- <table>
407
- <tr>
408
- <td style="text-align: center;">
409
- <img src="docs/en/get_started/images/setting.png" alt="Setting" style="width: 75%;" />
410
- <p>Setting Interface</p>
411
- </td>
412
- <td style="text-align: center;">
413
- <img src="docs/en/get_started/images/model_compare.png" alt="Model Compare" style="width: 100%;" />
414
- <p>Model Comparison</p>
415
- </td>
416
- </tr>
417
- <tr>
418
- <td style="text-align: center;">
419
- <img src="docs/en/get_started/images/report_overview.png" alt="Report Overview" style="width: 100%;" />
420
- <p>Report Overview</p>
421
- </td>
422
- <td style="text-align: center;">
423
- <img src="docs/en/get_started/images/report_details.png" alt="Report Details" style="width: 80%;" />
424
- <p>Report Details</p>
425
- </td>
426
- </tr>
427
- </table>
428
-
429
- For more details, refer to: [📖 Visualization of Evaluation Results](https://evalscope.readthedocs.io/en/latest/get_started/visualization.html)
430
-
431
- ## 🌐 Evaluation of Model API
387
+ ## 📈 Advanced Usage
432
388
 
433
- Specify the model API service address (api_url) and API Key (api_key) to evaluate the deployed model API service. In this case, the `eval-type` parameter must be specified as `service`, for example:
389
+ ### Custom Evaluation Parameters
434
390
 
435
- For example, to launch a model service using [vLLM](https://github.com/vllm-project/vllm):
436
-
437
- ```shell
438
- export VLLM_USE_MODELSCOPE=True && python -m vllm.entrypoints.openai.api_server --model Qwen/Qwen2.5-0.5B-Instruct --served-model-name qwen2.5 --trust_remote_code --port 8801
439
- ```
440
- Then, you can use the following command to evaluate the model API service:
441
- ```shell
442
- evalscope eval \
443
- --model qwen2.5 \
444
- --api-url http://127.0.0.1:8801/v1 \
445
- --api-key EMPTY \
446
- --eval-type service \
447
- --datasets gsm8k \
448
- --limit 10
449
- ```
450
-
451
- ## ⚙️ Custom Parameter Evaluation
452
-
453
- For more customized evaluations, such as customizing model parameters or dataset parameters, you can use the following command. The evaluation startup method is the same as simple evaluation. Below shows how to start the evaluation using the `eval` command:
391
+ You can fine-tune model loading, inference, and dataset configuration through command line parameters.
454
392
 
455
393
  ```shell
456
394
  evalscope eval \
457
395
  --model Qwen/Qwen3-0.6B \
458
396
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
459
- --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
397
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512}' \
460
398
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
461
399
  --datasets gsm8k \
462
400
  --limit 10
463
401
  ```
464
402
 
465
- ### Parameter Description
466
- - `--model-args`: Model loading parameters, passed as a JSON string:
467
- - `revision`: Model version
468
- - `precision`: Model precision
469
- - `device_map`: Device allocation for the model
470
- - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
471
- - `do_sample`: Whether to use sampling
472
- - `temperature`: Generation temperature
473
- - `max_tokens`: Maximum length of generated tokens
474
- - `chat_template_kwargs`: Model inference template parameters
475
- - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
476
- - `few_shot_num`: Number of few-shot examples
477
- - `few_shot_random`: Whether to randomly sample few-shot data; if not set, defaults to `true`
403
+ - `--model-args`: Model loading parameters such as `revision`, `precision`, etc.
404
+ - `--generation-config`: Model generation parameters such as `temperature`, `max_tokens`, etc.
405
+ - `--dataset-args`: Dataset configuration parameters such as `few_shot_num`, etc.
478
406
 
479
- Reference: [Full Parameter Description](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html)
407
+ For details, please refer to [📖 Complete Parameter Guide](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
480
408
 
409
+ ### Evaluating Online Model APIs
481
410
 
482
- ## 🧪 Other Evaluation Backends
483
- EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
484
- - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
485
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
486
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
487
- - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
488
- - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
411
+ EvalScope supports evaluating model services deployed via APIs (such as services deployed with vLLM). Simply specify the service address and API Key.
489
412
 
413
+ 1. **Start Model Service** (using vLLM as example)
414
+ ```shell
415
+ export VLLM_USE_MODELSCOPE=True
416
+ python -m vllm.entrypoints.openai.api_server \
417
+ --model Qwen/Qwen2.5-0.5B-Instruct \
418
+ --served-model-name qwen2.5 \
419
+ --port 8801
420
+ ```
490
421
 
491
- ## 📈 Model Serving Performance Evaluation
492
- A stress testing tool focused on large language models, which can be customized to support various dataset formats and different API protocol formats.
422
+ 2. **Run Evaluation**
423
+ ```shell
424
+ evalscope eval \
425
+ --model qwen2.5 \
426
+ --eval-type service \
427
+ --api-url http://127.0.0.1:8801/v1 \
428
+ --api-key EMPTY \
429
+ --datasets gsm8k \
430
+ --limit 10
431
+ ```
493
432
 
494
- Reference: Performance Testing [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html)
433
+ ### ⚔️ Arena Mode
495
434
 
496
- **Output example**
435
+ Arena mode evaluates model performance through pairwise battles between models, providing win rates and rankings, perfect for horizontal comparison of multiple models.
497
436
 
498
- ![multi_perf](docs/en/user_guides/stress_test/images/multi_perf.png)
437
+ ```text
438
+ # Example evaluation results
439
+ Model WinRate (%) CI (%)
440
+ ------------ ------------- ---------------
441
+ qwen2.5-72b 69.3 (-13.3 / +12.2)
442
+ qwen2.5-7b 50 (+0.0 / +0.0)
443
+ qwen2.5-0.5b 4.7 (-2.5 / +4.4)
444
+ ```
445
+ For details, please refer to [📖 Arena Mode Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html).
499
446
 
447
+ ### 🖊️ Custom Dataset Evaluation
500
448
 
501
- **Supports wandb for recording results**
449
+ EvalScope allows you to easily add and evaluate your own datasets. For details, please refer to [📖 Custom Dataset Evaluation Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/index.html).
502
450
 
503
- ![wandb sample](https://modelscope.oss-cn-beijing.aliyuncs.com/resource/wandb_sample.png)
504
451
 
505
- **Supports swanlab for recording results**
452
+ ## 🧪 Other Evaluation Backends
453
+ EvalScope supports launching evaluation tasks through third-party evaluation frameworks (we call them "backends") to meet diverse evaluation needs.
506
454
 
507
- ![swanlab sample](https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/swanlab.png)
455
+ - **Native**: EvalScope's default evaluation framework with comprehensive functionality.
456
+ - **OpenCompass**: Focuses on text-only evaluation. [📖 Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
457
+ - **VLMEvalKit**: Focuses on multi-modal evaluation. [📖 Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
458
+ - **RAGEval**: Focuses on RAG evaluation, supporting Embedding and Reranker models. [📖 Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
459
+ - **Third-party Evaluation Tools**: Supports evaluation tasks like [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html).
508
460
 
509
- **Supports Speed Benchmark**
461
+ ## Inference Performance Evaluation Tool
462
+ EvalScope provides a powerful stress testing tool for evaluating the performance of large language model services.
510
463
 
511
- It supports speed testing and provides speed benchmarks similar to those found in the [official Qwen](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html) reports:
464
+ - **Key Metrics**: Supports throughput (Tokens/s), first token latency (TTFT), token generation latency (TPOT), etc.
465
+ - **Result Recording**: Supports recording results to `wandb` and `swanlab`.
466
+ - **Speed Benchmarks**: Can generate speed benchmark results similar to official reports.
512
467
 
513
- ```text
514
- Speed Benchmark Results:
515
- +---------------+-----------------+----------------+
516
- | Prompt Tokens | Speed(tokens/s) | GPU Memory(GB) |
517
- +---------------+-----------------+----------------+
518
- | 1 | 50.69 | 0.97 |
519
- | 6144 | 51.36 | 1.23 |
520
- | 14336 | 49.93 | 1.59 |
521
- | 30720 | 49.56 | 2.34 |
522
- +---------------+-----------------+----------------+
523
- ```
468
+ For details, please refer to [📖 Performance Testing Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html).
524
469
 
525
- ## 🖊️ Custom Dataset Evaluation
526
- EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/index.html)
470
+ Example output is shown below:
471
+ <p align="center">
472
+ <img src="docs/en/user_guides/stress_test/images/multi_perf.png" style="width: 80%;">
473
+ </p>
527
474
 
528
475
 
529
- ## ⚔️ Arena Mode
476
+ ## 📊 Visualizing Evaluation Results
530
477
 
531
- Arena mode allows you to configure multiple candidate models and specify a baseline model. Evaluation is performed by pairwise battles between each candidate model and the baseline model, with the final output including each model's win rate and ranking. This method is suitable for comparative evaluation among multiple models, providing an intuitive reflection of each model's strengths and weaknesses. Refer to: Arena Mode [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html)
478
+ EvalScope provides a Gradio-based WebUI for interactive analysis and comparison of evaluation results.
532
479
 
533
- ```text
534
- Model WinRate (%) CI (%)
535
- ------------ ------------- ---------------
536
- qwen2.5-72b 69.3 (-13.3 / +12.2)
537
- qwen2.5-7b 50 (+0.0 / +0.0)
538
- qwen2.5-0.5b 4.7 (-2.5 / +4.4)
539
- ```
480
+ 1. **Install Dependencies**
481
+ ```bash
482
+ pip install 'evalscope[app]'
483
+ ```
484
+
485
+ 2. **Start Service**
486
+ ```bash
487
+ evalscope app
488
+ ```
489
+ Visit `http://127.0.0.1:7861` to open the visualization interface.
490
+
491
+ <table>
492
+ <tr>
493
+ <td style="text-align: center;">
494
+ <img src="docs/en/get_started/images/setting.png" alt="Setting" style="width: 85%;" />
495
+ <p>Settings Interface</p>
496
+ </td>
497
+ <td style="text-align: center;">
498
+ <img src="docs/en/get_started/images/model_compare.png" alt="Model Compare" style="width: 100%;" />
499
+ <p>Model Comparison</p>
500
+ </td>
501
+ </tr>
502
+ <tr>
503
+ <td style="text-align: center;">
504
+ <img src="docs/en/get_started/images/report_overview.png" alt="Report Overview" style="width: 100%;" />
505
+ <p>Report Overview</p>
506
+ </td>
507
+ <td style="text-align: center;">
508
+ <img src="docs/en/get_started/images/report_details.png" alt="Report Details" style="width: 85%;" />
509
+ <p>Report Details</p>
510
+ </td>
511
+ </tr>
512
+ </table>
513
+
514
+ For details, please refer to [📖 Visualizing Evaluation Results](https://evalscope.readthedocs.io/en/latest/get_started/visualization.html).
515
+
516
+ ## 👷‍♂️ Contributing
540
517
 
541
- ## 👷‍♂️ Contribution
518
+ We welcome any contributions from the community! If you want to add new evaluation benchmarks, models, or features, please refer to our [Contributing Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/add_benchmark.html).
542
519
 
543
- EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn), is continuously optimizing its benchmark evaluation features! We invite you to refer to the [Contribution Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/add_benchmark.html) to easily add your own evaluation benchmarks and share your contributions with the community. Let’s work together to support the growth of EvalScope and make our tools even better! Join us now!
520
+ Thanks to all developers who have contributed to EvalScope!
544
521
 
545
522
  <a href="https://github.com/modelscope/evalscope/graphs/contributors" target="_blank">
546
523
  <table>
@@ -552,8 +529,10 @@ EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn
552
529
  </table>
553
530
  </a>
554
531
 
532
+
555
533
  ## 📚 Citation
556
534
 
535
+ If you use EvalScope in your research, please cite our work:
557
536
  ```bibtex
558
537
  @misc{evalscope_2024,
559
538
  title={{EvalScope}: Evaluation Framework for Large Models},
@@ -563,20 +542,6 @@ EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn
563
542
  }
564
543
  ```
565
544
 
566
- ## 🔜 Roadmap
567
- - [x] Support for better evaluation report visualization
568
- - [x] Support for mixed evaluations across multiple datasets
569
- - [x] RAG evaluation
570
- - [x] VLM evaluation
571
- - [x] Agents evaluation
572
- - [x] vLLM
573
- - [ ] Distributed evaluating
574
- - [x] Multi-modal evaluation
575
- - [ ] Benchmarks
576
- - [x] BFCL-v3
577
- - [x] GPQA
578
- - [x] MBPP
579
-
580
545
 
581
546
  ## ⭐ Star History
582
547