evalscope 0.6.1__tar.gz → 0.7.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (312) hide show
  1. evalscope-0.7.1/LICENSE +203 -0
  2. evalscope-0.7.1/MANIFEST.in +4 -0
  3. {evalscope-0.6.1 → evalscope-0.7.1}/PKG-INFO +93 -35
  4. {evalscope-0.6.1 → evalscope-0.7.1}/README.md +68 -24
  5. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/opencompass/tasks/eval_api.py +2 -1
  6. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/opencompass/tasks/eval_datasets.py +1 -0
  7. evalscope-0.7.1/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +230 -0
  8. evalscope-0.7.1/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +43 -0
  9. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/correctness_prompt_chinese.json +87 -0
  10. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/long_form_answer_prompt_chinese.json +36 -0
  11. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerRelevancy/question_generation_chinese.json +26 -0
  12. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/ContextPrecision/context_precision_prompt_chinese.json +41 -0
  13. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/nli_statements_message_chinese.json +60 -0
  14. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/statement_prompt_chinese.json +36 -0
  15. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/HeadlinesExtractor/prompt_chinese.json +22 -0
  16. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/concept_combination_prompt_chinese.json +35 -0
  17. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/generate_query_reference_prompt_chinese.json +7 -0
  18. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/theme_persona_matching_prompt_chinese.json +39 -0
  19. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json +7 -0
  20. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json +39 -0
  21. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalFaithfulness/faithfulness_prompt_chinese.json +34 -0
  22. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalRelevance/relevance_prompt_chinese.json +36 -0
  23. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/NERExtractor/prompt_chinese.json +25 -0
  24. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json +7 -0
  25. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json +39 -0
  26. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/SummaryExtractor/prompt_chinese.json +16 -0
  27. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/chinese/ThemesExtractor/prompt_chinese.json +24 -0
  28. evalscope-0.7.1/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +18 -0
  29. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/vlm_eval_kit/backend_manager.py +23 -21
  30. evalscope-0.7.1/evalscope/benchmarks/ceval/samples.jsonl +1 -0
  31. evalscope-0.7.1/evalscope/benchmarks/cmmlu/samples.jsonl +5 -0
  32. evalscope-0.7.1/evalscope/benchmarks/mmlu/samples.jsonl +5 -0
  33. evalscope-0.7.1/evalscope/benchmarks/race/samples.jsonl +5 -0
  34. evalscope-0.7.1/evalscope/benchmarks/trivia_qa/samples.jsonl +5 -0
  35. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/cli/start_perf.py +8 -11
  36. evalscope-0.7.1/evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +58485 -0
  37. evalscope-0.7.1/evalscope/metrics/resources/gpt2-zhcn3-v4.json +1 -0
  38. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/rouge_metric.py +30 -15
  39. evalscope-0.7.1/evalscope/perf/arguments.py +179 -0
  40. evalscope-0.7.1/evalscope/perf/benchmark.py +245 -0
  41. evalscope-0.7.1/evalscope/perf/http_client.py +172 -0
  42. evalscope-0.7.1/evalscope/perf/main.py +35 -0
  43. evalscope-0.7.1/evalscope/perf/plugin/__init__.py +2 -0
  44. evalscope-0.7.1/evalscope/perf/plugin/api/__init__.py +3 -0
  45. evalscope-0.6.1/evalscope/perf/api_plugin_base.py → evalscope-0.7.1/evalscope/perf/plugin/api/base.py +17 -18
  46. {evalscope-0.6.1/evalscope/perf → evalscope-0.7.1/evalscope/perf/plugin/api}/custom_api.py +25 -19
  47. {evalscope-0.6.1/evalscope/perf → evalscope-0.7.1/evalscope/perf/plugin/api}/dashscope_api.py +28 -14
  48. {evalscope-0.6.1/evalscope/perf → evalscope-0.7.1/evalscope/perf/plugin/api}/openai_api.py +51 -27
  49. evalscope-0.7.1/evalscope/perf/plugin/datasets/__init__.py +6 -0
  50. evalscope-0.6.1/evalscope/perf/dataset_plugin_base.py → evalscope-0.7.1/evalscope/perf/plugin/datasets/base.py +13 -10
  51. evalscope-0.7.1/evalscope/perf/plugin/datasets/custom.py +21 -0
  52. evalscope-0.7.1/evalscope/perf/plugin/datasets/flickr8k.py +51 -0
  53. {evalscope-0.6.1/evalscope/perf → evalscope-0.7.1/evalscope/perf/plugin}/datasets/line_by_line.py +9 -5
  54. evalscope-0.7.1/evalscope/perf/plugin/datasets/longalpaca.py +28 -0
  55. evalscope-0.7.1/evalscope/perf/plugin/datasets/openqa.py +38 -0
  56. evalscope-0.7.1/evalscope/perf/plugin/datasets/speed_benchmark.py +50 -0
  57. evalscope-0.7.1/evalscope/perf/plugin/registry.py +54 -0
  58. evalscope-0.6.1/evalscope/perf/how_to_analysis_result.py → evalscope-0.7.1/evalscope/perf/utils/analysis_result.py +11 -5
  59. evalscope-0.7.1/evalscope/perf/utils/benchmark_util.py +135 -0
  60. evalscope-0.7.1/evalscope/perf/utils/chat_service.py +252 -0
  61. evalscope-0.7.1/evalscope/perf/utils/db_util.py +200 -0
  62. evalscope-0.7.1/evalscope/perf/utils/handler.py +46 -0
  63. evalscope-0.7.1/evalscope/perf/utils/local_server.py +139 -0
  64. evalscope-0.7.1/evalscope/registry/config/cfg_arena.yaml +77 -0
  65. evalscope-0.7.1/evalscope/registry/config/cfg_arena_zhihu.yaml +63 -0
  66. evalscope-0.7.1/evalscope/registry/config/cfg_pairwise_baseline.yaml +83 -0
  67. evalscope-0.7.1/evalscope/registry/config/cfg_single.yaml +78 -0
  68. evalscope-0.7.1/evalscope/registry/data/prompt_template/lmsys_v2.jsonl +8 -0
  69. evalscope-0.7.1/evalscope/registry/data/prompt_template/prompt_templates.jsonl +8 -0
  70. evalscope-0.7.1/evalscope/registry/data/qa_browser/battle.jsonl +634 -0
  71. evalscope-0.7.1/evalscope/registry/data/qa_browser/category_mapping.yaml +10 -0
  72. evalscope-0.7.1/evalscope/registry/data/question.jsonl +80 -0
  73. evalscope-0.7.1/evalscope/third_party/longbench_write/README.md +118 -0
  74. evalscope-0.7.1/evalscope/third_party/longbench_write/default_task.json +27 -0
  75. evalscope-0.7.1/evalscope/third_party/longbench_write/default_task.yaml +24 -0
  76. evalscope-0.7.1/evalscope/third_party/toolbench_static/README.md +118 -0
  77. evalscope-0.7.1/evalscope/third_party/toolbench_static/config_default.json +15 -0
  78. evalscope-0.7.1/evalscope/third_party/toolbench_static/config_default.yaml +12 -0
  79. evalscope-0.7.1/evalscope/third_party/toolbench_static/requirements.txt +2 -0
  80. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/logger.py +18 -20
  81. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/utils.py +41 -42
  82. evalscope-0.7.1/evalscope/version.py +4 -0
  83. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/PKG-INFO +93 -35
  84. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/SOURCES.txt +100 -18
  85. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/requires.txt +24 -10
  86. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/top_level.txt +1 -0
  87. evalscope-0.7.1/requirements/docs.txt +6 -0
  88. evalscope-0.7.1/requirements/framework.txt +36 -0
  89. evalscope-0.7.1/requirements/inner.txt +26 -0
  90. evalscope-0.7.1/requirements/opencompass.txt +1 -0
  91. evalscope-0.7.1/requirements/perf.txt +9 -0
  92. evalscope-0.7.1/requirements/rag.txt +3 -0
  93. evalscope-0.7.1/requirements/tests.txt +5 -0
  94. evalscope-0.7.1/requirements/vlmeval.txt +1 -0
  95. evalscope-0.7.1/requirements.txt +1 -0
  96. evalscope-0.7.1/setup.cfg +37 -0
  97. evalscope-0.7.1/setup.py +172 -0
  98. evalscope-0.7.1/tests/cli/__init__.py +1 -0
  99. evalscope-0.7.1/tests/cli/test_run.py +76 -0
  100. evalscope-0.7.1/tests/perf/__init__.py +1 -0
  101. evalscope-0.7.1/tests/perf/test_perf.py +96 -0
  102. evalscope-0.7.1/tests/rag/test_clip_benchmark.py +85 -0
  103. evalscope-0.7.1/tests/rag/test_mteb.py +136 -0
  104. evalscope-0.7.1/tests/rag/test_ragas.py +120 -0
  105. evalscope-0.7.1/tests/swift/__init__.py +1 -0
  106. evalscope-0.7.1/tests/swift/test_run_swift_eval.py +146 -0
  107. evalscope-0.7.1/tests/swift/test_run_swift_vlm_eval.py +128 -0
  108. evalscope-0.7.1/tests/swift/test_run_swift_vlm_jugde_eval.py +157 -0
  109. evalscope-0.7.1/tests/test_run_all.py +12 -0
  110. evalscope-0.7.1/tests/vlm/__init__.py +1 -0
  111. evalscope-0.7.1/tests/vlm/test_vlmeval.py +59 -0
  112. evalscope-0.6.1/evalscope/perf/_logging.py +0 -32
  113. evalscope-0.6.1/evalscope/perf/datasets/longalpaca_12k.py +0 -20
  114. evalscope-0.6.1/evalscope/perf/datasets/openqa.py +0 -22
  115. evalscope-0.6.1/evalscope/perf/http_client.py +0 -756
  116. evalscope-0.6.1/evalscope/perf/plugin_registry.py +0 -35
  117. evalscope-0.6.1/evalscope/perf/query_parameters.py +0 -42
  118. evalscope-0.6.1/evalscope/perf/server_sent_event.py +0 -43
  119. evalscope-0.6.1/evalscope/preprocess/tokenizers/gpt2_tokenizer.py +0 -221
  120. evalscope-0.6.1/evalscope/version.py +0 -4
  121. evalscope-0.6.1/setup.cfg +0 -4
  122. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/__init__.py +0 -0
  123. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/__init__.py +0 -0
  124. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/base.py +0 -0
  125. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/opencompass/__init__.py +0 -0
  126. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/opencompass/api_meta_template.py +0 -0
  127. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/opencompass/backend_manager.py +0 -0
  128. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  129. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/__init__.py +0 -0
  130. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/backend_manager.py +0 -0
  131. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
  132. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
  133. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +0 -0
  134. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +0 -0
  135. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py +0 -0
  136. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
  137. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
  138. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
  139. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
  140. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
  141. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
  142. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
  143. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
  144. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
  145. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
  146. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
  147. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
  148. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
  149. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
  150. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
  151. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
  152. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
  153. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/metrics/__init__.py +0 -0
  154. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/metrics/multi_modal_faithfulness.py +0 -0
  155. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/metrics/multi_modal_relevance.py +0 -0
  156. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/task_template.py +0 -0
  157. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
  158. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +0 -0
  159. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +0 -0
  160. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/utils/__init__.py +0 -0
  161. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/utils/clip.py +0 -0
  162. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/utils/embedding.py +0 -0
  163. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/utils/llm.py +0 -0
  164. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/rag_eval/utils/tools.py +0 -0
  165. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  166. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -0
  167. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/__init__.py +0 -0
  168. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/arc/__init__.py +0 -0
  169. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/arc/ai2_arc.py +0 -0
  170. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
  171. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/__init__.py +0 -0
  172. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
  173. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  174. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  175. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  176. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  177. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  178. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  179. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  180. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  181. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  182. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  183. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  184. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  185. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  186. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  187. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  188. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  189. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  190. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  191. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  192. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  193. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  194. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  195. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  196. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  197. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  198. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  199. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  200. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/benchmark.py +0 -0
  201. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/ceval/__init__.py +0 -0
  202. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/ceval/ceval_adapter.py +0 -0
  203. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/ceval/ceval_exam.py +0 -0
  204. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  205. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/cmmlu/cmmlu.py +0 -0
  206. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
  207. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  208. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/competition_math/competition_math.py +0 -0
  209. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
  210. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/data_adapter.py +0 -0
  211. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  212. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
  213. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  214. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/gsm8k/gsm8k.py +0 -0
  215. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
  216. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  217. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/hellaswag/hellaswag.py +0 -0
  218. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
  219. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  220. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/humaneval/humaneval.py +0 -0
  221. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -0
  222. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  223. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/mmlu/mmlu.py +0 -0
  224. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
  225. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/race/__init__.py +0 -0
  226. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/race/race.py +0 -0
  227. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/race/race_adapter.py +0 -0
  228. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  229. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -0
  230. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
  231. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  232. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -0
  233. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -0
  234. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/cache.py +0 -0
  235. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/cli/__init__.py +0 -0
  236. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/cli/base.py +0 -0
  237. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/cli/cli.py +0 -0
  238. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/cli/start_server.py +0 -0
  239. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/config.py +0 -0
  240. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/constants.py +0 -0
  241. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/evaluator/__init__.py +0 -0
  242. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/evaluator/evaluator.py +0 -0
  243. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/evaluator/rating_eval.py +0 -0
  244. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/evaluator/reviewer/__init__.py +0 -0
  245. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/evaluator/reviewer/auto_reviewer.py +0 -0
  246. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/__init__.py +0 -0
  247. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  248. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
  249. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/code_metric.py +0 -0
  250. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/math_accuracy.py +0 -0
  251. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/metrics/metrics.py +0 -0
  252. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/__init__.py +0 -0
  253. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/api/__init__.py +0 -0
  254. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/api/openai_api.py +0 -0
  255. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/custom/__init__.py +0 -0
  256. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/custom/custom_model.py +0 -0
  257. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/dummy_chat_model.py +0 -0
  258. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/model.py +0 -0
  259. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/model_adapter.py +0 -0
  260. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/openai_model.py +0 -0
  261. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/models/template.py +0 -0
  262. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/perf/__init__.py +0 -0
  263. {evalscope-0.6.1/evalscope/perf/datasets → evalscope-0.7.1/evalscope/perf/utils}/__init__.py +0 -0
  264. {evalscope-0.6.1/evalscope/preprocess → evalscope-0.7.1/evalscope/registry}/__init__.py +0 -0
  265. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/arc.yaml +0 -0
  266. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/bbh.yaml +0 -0
  267. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/bbh_mini.yaml +0 -0
  268. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/ceval.yaml +0 -0
  269. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/ceval_mini.yaml +0 -0
  270. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/cmmlu.yaml +0 -0
  271. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -0
  272. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/general_qa.yaml +0 -0
  273. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/gsm8k.yaml +0 -0
  274. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/mmlu.yaml +0 -0
  275. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/registry/tasks/mmlu_mini.yaml +0 -0
  276. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/run.py +0 -0
  277. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/run_arena.py +0 -0
  278. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/run_ms.py +0 -0
  279. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/summarizer.py +0 -0
  280. {evalscope-0.6.1/evalscope/registry → evalscope-0.7.1/evalscope/third_party}/__init__.py +0 -0
  281. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/__init__.py +0 -0
  282. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/eval.py +0 -0
  283. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/infer.py +0 -0
  284. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
  285. {evalscope-0.6.1/evalscope/third_party → evalscope-0.7.1/evalscope/third_party/longbench_write/resources}/__init__.py +0 -0
  286. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
  287. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
  288. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
  289. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
  290. {evalscope-0.6.1/evalscope/third_party/longbench_write/resources → evalscope-0.7.1/evalscope/third_party/longbench_write/tools}/__init__.py +0 -0
  291. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
  292. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/longbench_write/utils.py +0 -0
  293. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  294. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/toolbench_static/eval.py +0 -0
  295. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/toolbench_static/infer.py +0 -0
  296. {evalscope-0.6.1/evalscope/third_party/longbench_write/tools → evalscope-0.7.1/evalscope/third_party/toolbench_static/llm}/__init__.py +0 -0
  297. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
  298. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  299. {evalscope-0.6.1/evalscope/third_party/toolbench_static/llm → evalscope-0.7.1/evalscope/tools}/__init__.py +0 -0
  300. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/tools/combine_reports.py +0 -0
  301. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/tools/gen_mmlu_subject_mapping.py +0 -0
  302. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/tools/rewrite_eval_results.py +0 -0
  303. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/__init__.py +0 -0
  304. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/arena_utils.py +0 -0
  305. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/completion_parsers.py +0 -0
  306. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/task_cfg_parser.py +0 -0
  307. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope/utils/task_utils.py +0 -0
  308. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/dependency_links.txt +0 -0
  309. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/entry_points.txt +0 -0
  310. {evalscope-0.6.1 → evalscope-0.7.1}/evalscope.egg-info/not-zip-safe +0 -0
  311. {evalscope-0.6.1/evalscope/tools → evalscope-0.7.1/tests}/__init__.py +0 -0
  312. {evalscope-0.6.1/evalscope/preprocess/tokenizers → evalscope-0.7.1/tests/rag}/__init__.py +0 -0
@@ -0,0 +1,203 @@
1
+ Copyright 2022-2023 Alibaba ModelScope. All rights reserved.
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright 2022-2023 Alibaba ModelScope.
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
@@ -0,0 +1,4 @@
1
+ include README.md
2
+ include requirements.txt
3
+ recursive-include evalscope *
4
+ recursive-include requirements *
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.6.1
3
+ Version: 0.7.1
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -15,12 +15,13 @@ Classifier: Programming Language :: Python :: 3.9
15
15
  Classifier: Programming Language :: Python :: 3.10
16
16
  Requires-Python: >=3.8
17
17
  Description-Content-Type: text/markdown
18
- Requires-Dist: torch
18
+ License-File: LICENSE
19
19
  Requires-Dist: absl-py
20
20
  Requires-Dist: accelerate
21
21
  Requires-Dist: cachetools
22
22
  Requires-Dist: datasets<=3.0.1,>=3.0.0
23
23
  Requires-Dist: editdistance
24
+ Requires-Dist: jieba
24
25
  Requires-Dist: jsonlines
25
26
  Requires-Dist: matplotlib
26
27
  Requires-Dist: modelscope[framework]
@@ -34,6 +35,7 @@ Requires-Dist: pyyaml
34
35
  Requires-Dist: regex
35
36
  Requires-Dist: requests
36
37
  Requires-Dist: requests-toolbelt
38
+ Requires-Dist: rouge-chinese
37
39
  Requires-Dist: rouge-score>=0.1.0
38
40
  Requires-Dist: sacrebleu
39
41
  Requires-Dist: scikit-learn
@@ -42,19 +44,25 @@ Requires-Dist: sentencepiece
42
44
  Requires-Dist: simple-ddl-parser
43
45
  Requires-Dist: tabulate
44
46
  Requires-Dist: tiktoken
47
+ Requires-Dist: torch
45
48
  Requires-Dist: tqdm
46
49
  Requires-Dist: transformers>=4.33
47
50
  Requires-Dist: transformers_stream_generator
48
- Requires-Dist: jieba
49
- Requires-Dist: rouge-chinese
50
51
  Provides-Extra: opencompass
51
- Requires-Dist: ms-opencompass>=0.1.3; extra == "opencompass"
52
+ Requires-Dist: ms-opencompass>=0.1.4; extra == "opencompass"
52
53
  Provides-Extra: vlmeval
53
- Requires-Dist: ms-vlmeval>=0.0.5; extra == "vlmeval"
54
+ Requires-Dist: ms-vlmeval>=0.0.9; extra == "vlmeval"
54
55
  Provides-Extra: rag
55
56
  Requires-Dist: mteb==1.19.4; extra == "rag"
56
57
  Requires-Dist: ragas==0.2.5; extra == "rag"
57
58
  Requires-Dist: webdataset>0.2.0; extra == "rag"
59
+ Provides-Extra: perf
60
+ Requires-Dist: aiohttp; extra == "perf"
61
+ Requires-Dist: fastapi; extra == "perf"
62
+ Requires-Dist: numpy; extra == "perf"
63
+ Requires-Dist: sse_starlette; extra == "perf"
64
+ Requires-Dist: transformers; extra == "perf"
65
+ Requires-Dist: unicorn; extra == "perf"
58
66
  Provides-Extra: inner
59
67
  Requires-Dist: absl-py; extra == "inner"
60
68
  Requires-Dist: accelerate; extra == "inner"
@@ -83,12 +91,12 @@ Requires-Dist: tqdm; extra == "inner"
83
91
  Requires-Dist: transformers<4.43,>=4.33; extra == "inner"
84
92
  Requires-Dist: transformers_stream_generator; extra == "inner"
85
93
  Provides-Extra: all
86
- Requires-Dist: torch; extra == "all"
87
94
  Requires-Dist: absl-py; extra == "all"
88
95
  Requires-Dist: accelerate; extra == "all"
89
96
  Requires-Dist: cachetools; extra == "all"
90
97
  Requires-Dist: datasets<=3.0.1,>=3.0.0; extra == "all"
91
98
  Requires-Dist: editdistance; extra == "all"
99
+ Requires-Dist: jieba; extra == "all"
92
100
  Requires-Dist: jsonlines; extra == "all"
93
101
  Requires-Dist: matplotlib; extra == "all"
94
102
  Requires-Dist: modelscope[framework]; extra == "all"
@@ -102,6 +110,7 @@ Requires-Dist: pyyaml; extra == "all"
102
110
  Requires-Dist: regex; extra == "all"
103
111
  Requires-Dist: requests; extra == "all"
104
112
  Requires-Dist: requests-toolbelt; extra == "all"
113
+ Requires-Dist: rouge-chinese; extra == "all"
105
114
  Requires-Dist: rouge-score>=0.1.0; extra == "all"
106
115
  Requires-Dist: sacrebleu; extra == "all"
107
116
  Requires-Dist: scikit-learn; extra == "all"
@@ -110,16 +119,21 @@ Requires-Dist: sentencepiece; extra == "all"
110
119
  Requires-Dist: simple-ddl-parser; extra == "all"
111
120
  Requires-Dist: tabulate; extra == "all"
112
121
  Requires-Dist: tiktoken; extra == "all"
122
+ Requires-Dist: torch; extra == "all"
113
123
  Requires-Dist: tqdm; extra == "all"
114
124
  Requires-Dist: transformers>=4.33; extra == "all"
115
125
  Requires-Dist: transformers_stream_generator; extra == "all"
116
- Requires-Dist: jieba; extra == "all"
117
- Requires-Dist: rouge-chinese; extra == "all"
118
- Requires-Dist: ms-opencompass>=0.1.3; extra == "all"
119
- Requires-Dist: ms-vlmeval>=0.0.5; extra == "all"
126
+ Requires-Dist: ms-opencompass>=0.1.4; extra == "all"
127
+ Requires-Dist: ms-vlmeval>=0.0.9; extra == "all"
120
128
  Requires-Dist: mteb==1.19.4; extra == "all"
121
129
  Requires-Dist: ragas==0.2.5; extra == "all"
122
130
  Requires-Dist: webdataset>0.2.0; extra == "all"
131
+ Requires-Dist: aiohttp; extra == "all"
132
+ Requires-Dist: fastapi; extra == "all"
133
+ Requires-Dist: numpy; extra == "all"
134
+ Requires-Dist: sse_starlette; extra == "all"
135
+ Requires-Dist: transformers; extra == "all"
136
+ Requires-Dist: unicorn; extra == "all"
123
137
 
124
138
 
125
139
 
@@ -130,14 +144,15 @@ Requires-Dist: webdataset>0.2.0; extra == "all"
130
144
  </p>
131
145
 
132
146
  <p align="center">
133
- <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
134
- <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
135
- </a>
136
- <a href='https://evalscope.readthedocs.io/en/latest/?badge=latest'>
137
- <img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
138
- </a>
139
- <br>
140
- <a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
147
+ <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
148
+ <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
149
+ </a>
150
+ <a href="https://github.com/modelscope/evalscope/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
151
+ <a href='https://evalscope.readthedocs.io/en/latest/?badge=latest'>
152
+ <img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
153
+ </a>
154
+ <br>
155
+ <a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
141
156
  <p>
142
157
 
143
158
  > ⭐ If you like this project, please click the "Star" button at the top right to support us. Your support is our motivation to keep going!
@@ -178,6 +193,7 @@ The architecture includes the following modules:
178
193
 
179
194
 
180
195
  ## 🎉 News
196
+ - 🔥 **[2024.11.26]** The model inference service performance evaluator has been completely refactored: it now supports local inference service startup and Speed Benchmark; asynchronous call error handling has been optimized. For more details, refer to the [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html).
181
197
  - 🔥 **[2024.10.31]** The best practice for evaluating Multimodal-RAG has been updated, please check the [📖 Blog](https://evalscope.readthedocs.io/zh-cn/latest/blog/RAG/multimodal_RAG.html#multimodal-rag) for more details.
182
198
  - 🔥 **[2024.10.23]** Supports multimodal RAG evaluation, including the assessment of image-text retrieval using [CLIP_Benchmark](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/clip_benchmark.html), and extends [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html) to support end-to-end multimodal metrics evaluation.
183
199
  - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
@@ -212,7 +228,9 @@ We recommend using conda to manage your environment and installing dependencies
212
228
  # Additional options
213
229
  pip install evalscope[opencompass] # Install OpenCompass backend
214
230
  pip install evalscope[vlmeval] # Install VLMEvalKit backend
215
- pip install evalscope[all] # Install all backends (Native, OpenCompass, VLMEvalKit)
231
+ pip install evalscope[rag] # Install RAGEval backend
232
+ pip install evalscope[perf] # Install Perf dependencies
233
+ pip install evalscope[all] # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
216
234
  ```
217
235
 
218
236
  > [!WARNING]
@@ -238,7 +256,9 @@ We recommend using conda to manage your environment and installing dependencies
238
256
  # Additional options
239
257
  pip install -e '.[opencompass]' # Install OpenCompass backend
240
258
  pip install -e '.[vlmeval]' # Install VLMEvalKit backend
241
- pip install -e '.[all]' # Install all backends (Native, OpenCompass, VLMEvalKit)
259
+ pip install -e '.[rag]' # Install RAGEval backend
260
+ pip install -e '.[perf]' # Install Perf dependencies
261
+ pip install -e '.[all]' # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
242
262
  ```
243
263
 
244
264
 
@@ -247,31 +267,47 @@ We recommend using conda to manage your environment and installing dependencies
247
267
  ### 1. Simple Evaluation
248
268
  To evaluate a model using default settings on specified datasets, follow the process below:
249
269
 
250
- #### Install using pip
251
- You can execute this command from any directory:
270
+ #### Installation using pip
271
+
272
+ You can execute this in any directory:
252
273
  ```bash
253
274
  python -m evalscope.run \
254
- --model qwen/Qwen2-0.5B-Instruct \
275
+ --model Qwen/Qwen2.5-0.5B-Instruct \
255
276
  --template-type qwen \
256
- --datasets arc
277
+ --datasets gsm8k ceval \
278
+ --limit 10
257
279
  ```
258
280
 
259
- #### Install from source
260
- Execute this command in the `evalscope` directory:
281
+ #### Installation from source
282
+
283
+ You need to execute this in the `evalscope` directory:
261
284
  ```bash
262
285
  python evalscope/run.py \
263
- --model qwen/Qwen2-0.5B-Instruct \
286
+ --model Qwen/Qwen2.5-0.5B-Instruct \
264
287
  --template-type qwen \
265
- --datasets arc
288
+ --datasets gsm8k ceval \
289
+ --limit 10
266
290
  ```
267
291
 
268
- If prompted with `Do you wish to run the custom code? [y/N]`, please type `y`.
292
+ > If prompted with `Do you wish to run the custom code? [y/N]`, please type `y`.
293
+
294
+ **Results (tested with only 10 samples)**
295
+ ```text
296
+ Report table:
297
+ +-----------------------+--------------------+-----------------+
298
+ | Model | ceval | gsm8k |
299
+ +=======================+====================+=================+
300
+ | Qwen2.5-0.5B-Instruct | (ceval/acc) 0.5577 | (gsm8k/acc) 0.5 |
301
+ +-----------------------+--------------------+-----------------+
302
+ ```
269
303
 
270
304
 
271
305
  #### Basic Parameter Descriptions
272
306
  - `--model`: Specifies the `model_id` of the model on [ModelScope](https://modelscope.cn/), allowing automatic download. For example, see the [Qwen2-0.5B-Instruct model link](https://modelscope.cn/models/qwen/Qwen2-0.5B-Instruct/summary); you can also use a local path, such as `/path/to/model`.
273
307
  - `--template-type`: Specifies the template type corresponding to the model. Refer to the `Default Template` field in the [template table](https://swift.readthedocs.io/en/latest/Instruction/Supported-models-datasets.html#llm) for filling in this field.
274
308
  - `--datasets`: The dataset name, allowing multiple datasets to be specified, separated by spaces; these datasets will be automatically downloaded. Refer to the [supported datasets list](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html) for available options.
309
+ - `--limit`: Maximum number of evaluation samples per dataset; if not specified, all will be evaluated, which is useful for quick validation.
310
+
275
311
 
276
312
  ### 2. Parameterized Evaluation
277
313
  If you wish to conduct a more customized evaluation, such as modifying model parameters or dataset parameters, you can use the following commands:
@@ -311,7 +347,7 @@ In addition to the three [basic parameters](#basic-parameter-descriptions), the
311
347
  - `--dataset-args`: Evaluation dataset configuration parameters, provided in JSON format, where the key is the dataset name and the value is the parameter; note that these must correspond one-to-one with the values in `--datasets`.
312
348
  - `--few_shot_num`: Number of few-shot examples.
313
349
  - `--few_shot_random`: Whether to randomly sample few-shot data; if not specified, defaults to `true`.
314
- - `--limit`: Maximum number of evaluation samples per dataset; if not specified, all will be evaluated, which is useful for quick validation.
350
+
315
351
 
316
352
  ### 3. Use the run_task Function to Submit an Evaluation Task
317
353
  Using the `run_task` function to submit an evaluation task requires the same parameters as the command line. You need to pass a dictionary as the parameter, which includes the following fields:
@@ -356,6 +392,32 @@ EvalScope supports using third-party evaluation frameworks to initiate evaluatio
356
392
  - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
357
393
  - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
358
394
 
395
+
396
+ ## Model Serving Performance Evaluation
397
+ A stress testing tool focused on large language models, which can be customized to support various dataset formats and different API protocol formats.
398
+
399
+ Reference: Performance Testing [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html)
400
+
401
+ **Supports wandb for recording results**
402
+
403
+ ![wandb sample](https://modelscope.oss-cn-beijing.aliyuncs.com/resource/wandb_sample.png)
404
+
405
+ **Supports Speed Benchmark**
406
+
407
+ It supports speed testing and provides speed benchmarks similar to those found in the [official Qwen](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html) reports:
408
+
409
+ ```text
410
+ Speed Benchmark Results:
411
+ +---------------+-----------------+----------------+
412
+ | Prompt Tokens | Speed(tokens/s) | GPU Memory(GB) |
413
+ +---------------+-----------------+----------------+
414
+ | 1 | 50.69 | 0.97 |
415
+ | 6144 | 51.36 | 1.23 |
416
+ | 14336 | 49.93 | 1.59 |
417
+ | 30720 | 49.56 | 2.34 |
418
+ +---------------+-----------------+----------------+
419
+ ```
420
+
359
421
  ## Custom Dataset Evaluation
360
422
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
361
423
 
@@ -370,10 +432,6 @@ The Arena mode allows multiple candidate models to be evaluated through pairwise
370
432
 
371
433
  Refer to: Arena Mode [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html)
372
434
 
373
- ## Model Serving Performance Evaluation
374
- A stress testing tool that focuses on large language models and can be customized to support various data set formats and different API protocol formats.
375
-
376
- Refer to : Model Serving Performance Evaluation [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html)
377
435
 
378
436
 
379
437
 
@@ -7,14 +7,15 @@
7
7
  </p>
8
8
 
9
9
  <p align="center">
10
- <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
11
- <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
12
- </a>
13
- <a href='https://evalscope.readthedocs.io/en/latest/?badge=latest'>
14
- <img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
15
- </a>
16
- <br>
17
- <a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
10
+ <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
11
+ <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
12
+ </a>
13
+ <a href="https://github.com/modelscope/evalscope/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
14
+ <a href='https://evalscope.readthedocs.io/en/latest/?badge=latest'>
15
+ <img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
16
+ </a>
17
+ <br>
18
+ <a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
18
19
  <p>
19
20
 
20
21
  > ⭐ If you like this project, please click the "Star" button at the top right to support us. Your support is our motivation to keep going!
@@ -55,6 +56,7 @@ The architecture includes the following modules:
55
56
 
56
57
 
57
58
  ## 🎉 News
59
+ - 🔥 **[2024.11.26]** The model inference service performance evaluator has been completely refactored: it now supports local inference service startup and Speed Benchmark; asynchronous call error handling has been optimized. For more details, refer to the [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html).
58
60
  - 🔥 **[2024.10.31]** The best practice for evaluating Multimodal-RAG has been updated, please check the [📖 Blog](https://evalscope.readthedocs.io/zh-cn/latest/blog/RAG/multimodal_RAG.html#multimodal-rag) for more details.
59
61
  - 🔥 **[2024.10.23]** Supports multimodal RAG evaluation, including the assessment of image-text retrieval using [CLIP_Benchmark](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/clip_benchmark.html), and extends [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html) to support end-to-end multimodal metrics evaluation.
60
62
  - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
@@ -89,7 +91,9 @@ We recommend using conda to manage your environment and installing dependencies
89
91
  # Additional options
90
92
  pip install evalscope[opencompass] # Install OpenCompass backend
91
93
  pip install evalscope[vlmeval] # Install VLMEvalKit backend
92
- pip install evalscope[all] # Install all backends (Native, OpenCompass, VLMEvalKit)
94
+ pip install evalscope[rag] # Install RAGEval backend
95
+ pip install evalscope[perf] # Install Perf dependencies
96
+ pip install evalscope[all] # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
93
97
  ```
94
98
 
95
99
  > [!WARNING]
@@ -115,7 +119,9 @@ We recommend using conda to manage your environment and installing dependencies
115
119
  # Additional options
116
120
  pip install -e '.[opencompass]' # Install OpenCompass backend
117
121
  pip install -e '.[vlmeval]' # Install VLMEvalKit backend
118
- pip install -e '.[all]' # Install all backends (Native, OpenCompass, VLMEvalKit)
122
+ pip install -e '.[rag]' # Install RAGEval backend
123
+ pip install -e '.[perf]' # Install Perf dependencies
124
+ pip install -e '.[all]' # Install all backends (Native, OpenCompass, VLMEvalKit, RAGEval)
119
125
  ```
120
126
 
121
127
 
@@ -124,31 +130,47 @@ We recommend using conda to manage your environment and installing dependencies
124
130
  ### 1. Simple Evaluation
125
131
  To evaluate a model using default settings on specified datasets, follow the process below:
126
132
 
127
- #### Install using pip
128
- You can execute this command from any directory:
133
+ #### Installation using pip
134
+
135
+ You can execute this in any directory:
129
136
  ```bash
130
137
  python -m evalscope.run \
131
- --model qwen/Qwen2-0.5B-Instruct \
138
+ --model Qwen/Qwen2.5-0.5B-Instruct \
132
139
  --template-type qwen \
133
- --datasets arc
140
+ --datasets gsm8k ceval \
141
+ --limit 10
134
142
  ```
135
143
 
136
- #### Install from source
137
- Execute this command in the `evalscope` directory:
144
+ #### Installation from source
145
+
146
+ You need to execute this in the `evalscope` directory:
138
147
  ```bash
139
148
  python evalscope/run.py \
140
- --model qwen/Qwen2-0.5B-Instruct \
149
+ --model Qwen/Qwen2.5-0.5B-Instruct \
141
150
  --template-type qwen \
142
- --datasets arc
151
+ --datasets gsm8k ceval \
152
+ --limit 10
143
153
  ```
144
154
 
145
- If prompted with `Do you wish to run the custom code? [y/N]`, please type `y`.
155
+ > If prompted with `Do you wish to run the custom code? [y/N]`, please type `y`.
156
+
157
+ **Results (tested with only 10 samples)**
158
+ ```text
159
+ Report table:
160
+ +-----------------------+--------------------+-----------------+
161
+ | Model | ceval | gsm8k |
162
+ +=======================+====================+=================+
163
+ | Qwen2.5-0.5B-Instruct | (ceval/acc) 0.5577 | (gsm8k/acc) 0.5 |
164
+ +-----------------------+--------------------+-----------------+
165
+ ```
146
166
 
147
167
 
148
168
  #### Basic Parameter Descriptions
149
169
  - `--model`: Specifies the `model_id` of the model on [ModelScope](https://modelscope.cn/), allowing automatic download. For example, see the [Qwen2-0.5B-Instruct model link](https://modelscope.cn/models/qwen/Qwen2-0.5B-Instruct/summary); you can also use a local path, such as `/path/to/model`.
150
170
  - `--template-type`: Specifies the template type corresponding to the model. Refer to the `Default Template` field in the [template table](https://swift.readthedocs.io/en/latest/Instruction/Supported-models-datasets.html#llm) for filling in this field.
151
171
  - `--datasets`: The dataset name, allowing multiple datasets to be specified, separated by spaces; these datasets will be automatically downloaded. Refer to the [supported datasets list](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html) for available options.
172
+ - `--limit`: Maximum number of evaluation samples per dataset; if not specified, all will be evaluated, which is useful for quick validation.
173
+
152
174
 
153
175
  ### 2. Parameterized Evaluation
154
176
  If you wish to conduct a more customized evaluation, such as modifying model parameters or dataset parameters, you can use the following commands:
@@ -188,7 +210,7 @@ In addition to the three [basic parameters](#basic-parameter-descriptions), the
188
210
  - `--dataset-args`: Evaluation dataset configuration parameters, provided in JSON format, where the key is the dataset name and the value is the parameter; note that these must correspond one-to-one with the values in `--datasets`.
189
211
  - `--few_shot_num`: Number of few-shot examples.
190
212
  - `--few_shot_random`: Whether to randomly sample few-shot data; if not specified, defaults to `true`.
191
- - `--limit`: Maximum number of evaluation samples per dataset; if not specified, all will be evaluated, which is useful for quick validation.
213
+
192
214
 
193
215
  ### 3. Use the run_task Function to Submit an Evaluation Task
194
216
  Using the `run_task` function to submit an evaluation task requires the same parameters as the command line. You need to pass a dictionary as the parameter, which includes the following fields:
@@ -233,6 +255,32 @@ EvalScope supports using third-party evaluation frameworks to initiate evaluatio
233
255
  - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
234
256
  - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
235
257
 
258
+
259
+ ## Model Serving Performance Evaluation
260
+ A stress testing tool focused on large language models, which can be customized to support various dataset formats and different API protocol formats.
261
+
262
+ Reference: Performance Testing [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html)
263
+
264
+ **Supports wandb for recording results**
265
+
266
+ ![wandb sample](https://modelscope.oss-cn-beijing.aliyuncs.com/resource/wandb_sample.png)
267
+
268
+ **Supports Speed Benchmark**
269
+
270
+ It supports speed testing and provides speed benchmarks similar to those found in the [official Qwen](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html) reports:
271
+
272
+ ```text
273
+ Speed Benchmark Results:
274
+ +---------------+-----------------+----------------+
275
+ | Prompt Tokens | Speed(tokens/s) | GPU Memory(GB) |
276
+ +---------------+-----------------+----------------+
277
+ | 1 | 50.69 | 0.97 |
278
+ | 6144 | 51.36 | 1.23 |
279
+ | 14336 | 49.93 | 1.59 |
280
+ | 30720 | 49.56 | 2.34 |
281
+ +---------------+-----------------+----------------+
282
+ ```
283
+
236
284
  ## Custom Dataset Evaluation
237
285
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
238
286
 
@@ -247,10 +295,6 @@ The Arena mode allows multiple candidate models to be evaluated through pairwise
247
295
 
248
296
  Refer to: Arena Mode [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html)
249
297
 
250
- ## Model Serving Performance Evaluation
251
- A stress testing tool that focuses on large language models and can be customized to support various data set formats and different API protocol formats.
252
-
253
- Refer to : Model Serving Performance Evaluation [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html)
254
298
 
255
299
 
256
300
 
@@ -6,7 +6,8 @@ from opencompass.tasks import OpenICLInferTask
6
6
 
7
7
 
8
8
  with read_base():
9
- from opencompass.configs.summarizers.medium import summarizer
9
+ # from opencompass.configs.summarizers.medium import summarizer
10
+ # from opencompass.configs.summarizers.PMMEval import summarizer
10
11
  from evalscope.backend.opencompass.tasks.eval_datasets import datasets
11
12
 
12
13
  # 1. Get datasets