evalscope 0.5.3__tar.gz → 0.5.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. {evalscope-0.5.3 → evalscope-0.5.5}/PKG-INFO +46 -60
  2. {evalscope-0.5.3 → evalscope-0.5.5}/README.md +30 -51
  3. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/opencompass/backend_manager.py +2 -0
  4. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/opencompass/tasks/eval_datasets.py +2 -2
  5. evalscope-0.5.5/evalscope/backend/rag_eval/__init__.py +3 -0
  6. evalscope-0.5.5/evalscope/backend/rag_eval/backend_manager.py +68 -0
  7. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/__init__.py +4 -0
  8. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/arguments.py +59 -0
  9. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/base.py +89 -0
  10. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/task_template.py +83 -0
  11. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +302 -0
  12. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +252 -0
  13. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +113 -0
  14. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +153 -0
  15. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +345 -0
  16. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/STS.py +302 -0
  17. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +64 -0
  18. evalscope-0.5.5/evalscope/backend/rag_eval/ragas/__init__.py +2 -0
  19. evalscope-0.5.5/evalscope/backend/rag_eval/ragas/arguments.py +37 -0
  20. evalscope-0.5.5/evalscope/backend/rag_eval/ragas/task_template.py +117 -0
  21. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/vlm_eval_kit/backend_manager.py +1 -2
  22. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/vlm_eval_kit/custom_dataset.py +1 -1
  23. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/benchmark.py +1 -1
  24. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/evaluator/evaluator.py +4 -3
  25. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +19 -0
  26. evalscope-0.5.5/evalscope/models/api/__init__.py +3 -0
  27. evalscope-0.5.5/evalscope/models/api/openai_api.py +228 -0
  28. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/http_client.py +5 -5
  29. evalscope-0.5.5/evalscope/preprocess/tokenizers/__init__.py +0 -0
  30. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/run.py +4 -0
  31. evalscope-0.5.5/evalscope/third_party/longbench_write/__init__.py +3 -0
  32. evalscope-0.5.5/evalscope/third_party/longbench_write/eval.py +284 -0
  33. evalscope-0.5.5/evalscope/third_party/longbench_write/infer.py +217 -0
  34. evalscope-0.5.5/evalscope/third_party/longbench_write/longbench_write.py +88 -0
  35. evalscope-0.5.5/evalscope/third_party/longbench_write/resources/judge.txt +31 -0
  36. evalscope-0.5.5/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +120 -0
  37. evalscope-0.5.5/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +60 -0
  38. evalscope-0.5.5/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +48 -0
  39. evalscope-0.5.5/evalscope/third_party/longbench_write/tools/data_etl.py +155 -0
  40. evalscope-0.5.5/evalscope/third_party/longbench_write/utils.py +37 -0
  41. evalscope-0.5.5/evalscope/third_party/toolbench_static/llm/__init__.py +1 -0
  42. evalscope-0.5.5/evalscope/tools/__init__.py +1 -0
  43. evalscope-0.5.5/evalscope/utils/logger.py +94 -0
  44. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/utils/task_utils.py +3 -0
  45. evalscope-0.5.5/evalscope/version.py +4 -0
  46. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/PKG-INFO +46 -60
  47. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/SOURCES.txt +30 -0
  48. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/requires.txt +16 -8
  49. evalscope-0.5.3/evalscope/backend/opencompass/__init__.py +0 -3
  50. evalscope-0.5.3/evalscope/utils/logger.py +0 -64
  51. evalscope-0.5.3/evalscope/version.py +0 -4
  52. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/__init__.py +0 -0
  53. {evalscope-0.5.3/evalscope/perf → evalscope-0.5.5/evalscope/backend}/__init__.py +0 -0
  54. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/base.py +0 -0
  55. {evalscope-0.5.3/evalscope/backend → evalscope-0.5.5/evalscope/backend/opencompass}/__init__.py +0 -0
  56. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/opencompass/api_meta_template.py +0 -0
  57. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  58. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
  59. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  60. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/__init__.py +0 -0
  61. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/arc/__init__.py +0 -0
  62. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/arc/ai2_arc.py +0 -0
  63. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
  64. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/__init__.py +0 -0
  65. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
  66. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  67. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  68. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  69. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  70. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  71. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  72. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  73. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  74. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  75. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  76. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  77. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  78. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  79. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  80. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  81. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  82. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  83. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  84. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  85. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  86. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  87. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  88. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  89. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  90. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  91. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  92. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  93. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/ceval/__init__.py +0 -0
  94. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/ceval/ceval_adapter.py +0 -0
  95. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/ceval/ceval_exam.py +0 -0
  96. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  97. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/cmmlu/cmmlu.py +0 -0
  98. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
  99. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  100. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/competition_math/competition_math.py +0 -0
  101. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
  102. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/data_adapter.py +0 -0
  103. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  104. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
  105. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  106. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/gsm8k/gsm8k.py +0 -0
  107. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
  108. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  109. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/hellaswag/hellaswag.py +0 -0
  110. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
  111. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  112. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/humaneval/humaneval.py +0 -0
  113. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -0
  114. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  115. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/mmlu/mmlu.py +0 -0
  116. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
  117. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/race/__init__.py +0 -0
  118. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/race/race.py +0 -0
  119. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/race/race_adapter.py +0 -0
  120. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  121. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -0
  122. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
  123. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  124. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -0
  125. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -0
  126. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/cache.py +0 -0
  127. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/cli/__init__.py +0 -0
  128. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/cli/base.py +0 -0
  129. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/cli/cli.py +0 -0
  130. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/cli/start_perf.py +0 -0
  131. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/cli/start_server.py +0 -0
  132. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/config.py +0 -0
  133. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/constants.py +0 -0
  134. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/evaluator/__init__.py +0 -0
  135. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/evaluator/rating_eval.py +0 -0
  136. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/evaluator/reviewer/__init__.py +0 -0
  137. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/evaluator/reviewer/auto_reviewer.py +0 -0
  138. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/__init__.py +0 -0
  139. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  140. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/code_metric.py +0 -0
  141. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/math_accuracy.py +0 -0
  142. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/metrics.py +0 -0
  143. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/metrics/rouge_metric.py +0 -0
  144. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/__init__.py +0 -0
  145. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/custom/__init__.py +0 -0
  146. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/custom/custom_model.py +0 -0
  147. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/dummy_chat_model.py +0 -0
  148. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/model.py +0 -0
  149. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/model_adapter.py +0 -0
  150. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/openai_model.py +0 -0
  151. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/models/template.py +0 -0
  152. {evalscope-0.5.3/evalscope/perf/datasets → evalscope-0.5.5/evalscope/perf}/__init__.py +0 -0
  153. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/_logging.py +0 -0
  154. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/api_plugin_base.py +0 -0
  155. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/custom_api.py +0 -0
  156. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/dashscope_api.py +0 -0
  157. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/dataset_plugin_base.py +0 -0
  158. {evalscope-0.5.3/evalscope/preprocess/tokenizers → evalscope-0.5.5/evalscope/perf/datasets}/__init__.py +0 -0
  159. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/datasets/line_by_line.py +0 -0
  160. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/datasets/longalpaca_12k.py +0 -0
  161. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/datasets/openqa.py +0 -0
  162. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/how_to_analysis_result.py +0 -0
  163. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/openai_api.py +0 -0
  164. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/plugin_registry.py +0 -0
  165. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/query_parameters.py +0 -0
  166. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/perf/server_sent_event.py +0 -0
  167. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/preprocess/__init__.py +0 -0
  168. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/preprocess/tokenizers/gpt2_tokenizer.py +0 -0
  169. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/__init__.py +0 -0
  170. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/arc.yaml +0 -0
  171. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/bbh.yaml +0 -0
  172. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/bbh_mini.yaml +0 -0
  173. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/ceval.yaml +0 -0
  174. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/ceval_mini.yaml +0 -0
  175. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/cmmlu.yaml +0 -0
  176. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -0
  177. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/general_qa.yaml +0 -0
  178. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/gsm8k.yaml +0 -0
  179. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/mmlu.yaml +0 -0
  180. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/registry/tasks/mmlu_mini.yaml +0 -0
  181. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/run_arena.py +0 -0
  182. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/run_ms.py +0 -0
  183. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/summarizer.py +0 -0
  184. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/third_party/__init__.py +0 -0
  185. {evalscope-0.5.3/evalscope/third_party/toolbench_static/llm → evalscope-0.5.5/evalscope/third_party/longbench_write/resources}/__init__.py +0 -0
  186. {evalscope-0.5.3/evalscope → evalscope-0.5.5/evalscope/third_party/longbench_write}/tools/__init__.py +0 -0
  187. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  188. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/eval.py +0 -0
  189. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/infer.py +0 -0
  190. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
  191. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  192. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/tools/combine_reports.py +0 -0
  193. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/tools/gen_mmlu_subject_mapping.py +0 -0
  194. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/tools/rewrite_eval_results.py +0 -0
  195. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/utils/__init__.py +0 -0
  196. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/utils/arena_utils.py +0 -0
  197. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/utils/completion_parsers.py +0 -0
  198. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/utils/task_cfg_parser.py +0 -0
  199. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope/utils/utils.py +0 -0
  200. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/dependency_links.txt +0 -0
  201. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/entry_points.txt +0 -0
  202. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/not-zip-safe +0 -0
  203. {evalscope-0.5.3 → evalscope-0.5.5}/evalscope.egg-info/top_level.txt +0 -0
  204. {evalscope-0.5.3 → evalscope-0.5.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.5.3
3
+ Version: 0.5.5
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -19,11 +19,12 @@ Requires-Dist: torch
19
19
  Requires-Dist: absl-py
20
20
  Requires-Dist: accelerate
21
21
  Requires-Dist: cachetools
22
+ Requires-Dist: datasets<3.0.0,>=2.18.0
22
23
  Requires-Dist: editdistance
23
24
  Requires-Dist: jsonlines
24
25
  Requires-Dist: matplotlib
25
26
  Requires-Dist: modelscope[framework]
26
- Requires-Dist: nltk
27
+ Requires-Dist: nltk>=3.9
27
28
  Requires-Dist: openai
28
29
  Requires-Dist: pandas
29
30
  Requires-Dist: plotly
@@ -33,7 +34,7 @@ Requires-Dist: pyyaml
33
34
  Requires-Dist: regex
34
35
  Requires-Dist: requests
35
36
  Requires-Dist: requests-toolbelt
36
- Requires-Dist: rouge-score
37
+ Requires-Dist: rouge-score>=0.1.0
37
38
  Requires-Dist: sacrebleu
38
39
  Requires-Dist: scikit-learn
39
40
  Requires-Dist: seaborn
@@ -42,14 +43,17 @@ Requires-Dist: simple-ddl-parser
42
43
  Requires-Dist: tabulate
43
44
  Requires-Dist: tiktoken
44
45
  Requires-Dist: tqdm
45
- Requires-Dist: transformers<4.43,>=4.33
46
+ Requires-Dist: transformers>=4.33
46
47
  Requires-Dist: transformers_stream_generator
47
48
  Requires-Dist: jieba
48
49
  Requires-Dist: rouge-chinese
49
50
  Provides-Extra: opencompass
50
- Requires-Dist: ms-opencompass>=0.1.0; extra == "opencompass"
51
+ Requires-Dist: ms-opencompass>=0.1.1; extra == "opencompass"
51
52
  Provides-Extra: vlmeval
52
53
  Requires-Dist: ms-vlmeval>=0.0.5; extra == "vlmeval"
54
+ Provides-Extra: rag
55
+ Requires-Dist: ragas; extra == "rag"
56
+ Requires-Dist: mteb>=0.14.16; extra == "rag"
53
57
  Provides-Extra: inner
54
58
  Requires-Dist: absl-py; extra == "inner"
55
59
  Requires-Dist: accelerate; extra == "inner"
@@ -82,11 +86,12 @@ Requires-Dist: torch; extra == "all"
82
86
  Requires-Dist: absl-py; extra == "all"
83
87
  Requires-Dist: accelerate; extra == "all"
84
88
  Requires-Dist: cachetools; extra == "all"
89
+ Requires-Dist: datasets<3.0.0,>=2.18.0; extra == "all"
85
90
  Requires-Dist: editdistance; extra == "all"
86
91
  Requires-Dist: jsonlines; extra == "all"
87
92
  Requires-Dist: matplotlib; extra == "all"
88
93
  Requires-Dist: modelscope[framework]; extra == "all"
89
- Requires-Dist: nltk; extra == "all"
94
+ Requires-Dist: nltk>=3.9; extra == "all"
90
95
  Requires-Dist: openai; extra == "all"
91
96
  Requires-Dist: pandas; extra == "all"
92
97
  Requires-Dist: plotly; extra == "all"
@@ -96,7 +101,7 @@ Requires-Dist: pyyaml; extra == "all"
96
101
  Requires-Dist: regex; extra == "all"
97
102
  Requires-Dist: requests; extra == "all"
98
103
  Requires-Dist: requests-toolbelt; extra == "all"
99
- Requires-Dist: rouge-score; extra == "all"
104
+ Requires-Dist: rouge-score>=0.1.0; extra == "all"
100
105
  Requires-Dist: sacrebleu; extra == "all"
101
106
  Requires-Dist: scikit-learn; extra == "all"
102
107
  Requires-Dist: seaborn; extra == "all"
@@ -105,12 +110,14 @@ Requires-Dist: simple-ddl-parser; extra == "all"
105
110
  Requires-Dist: tabulate; extra == "all"
106
111
  Requires-Dist: tiktoken; extra == "all"
107
112
  Requires-Dist: tqdm; extra == "all"
108
- Requires-Dist: transformers<4.43,>=4.33; extra == "all"
113
+ Requires-Dist: transformers>=4.33; extra == "all"
109
114
  Requires-Dist: transformers_stream_generator; extra == "all"
110
115
  Requires-Dist: jieba; extra == "all"
111
116
  Requires-Dist: rouge-chinese; extra == "all"
112
- Requires-Dist: ms-opencompass>=0.1.0; extra == "all"
117
+ Requires-Dist: ms-opencompass>=0.1.1; extra == "all"
113
118
  Requires-Dist: ms-vlmeval>=0.0.5; extra == "all"
119
+ Requires-Dist: ragas; extra == "all"
120
+ Requires-Dist: mteb>=0.14.16; extra == "all"
114
121
 
115
122
  English | [简体中文](README_zh.md)
116
123
 
@@ -143,28 +150,11 @@ English | [简体中文](README_zh.md)
143
150
 
144
151
  ## 📝 Introduction
145
152
 
146
- Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
147
-
148
- ### Framework Features
149
- - **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
150
- - **Evaluation Metrics**: Implements various commonly used evaluation metrics.
151
- - **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
152
- - **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
153
- - **Evaluation Reports**: Automatically generates evaluation reports.
154
- - **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
155
- - **Single mode**: Scoring a single model.
156
- - **Pairwise-baseline mode**: Comparing against a baseline model.
157
- - **Pairwise (all) mode**: Pairwise comparison among all models.
158
- - **Visualization Tools**: Provides intuitive displays of evaluation results.
159
- - **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
160
- - **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
161
- - **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
162
- - **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
163
-
164
- ### Overall Architecture
153
+ EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
154
+
165
155
  <p align="center">
166
156
  <img src="docs/en/_static/images/evalscope_framework.png" width="70%">
167
- <br>Fig 1. EvalScope Framework.
157
+ <br>EvalScope Framework.
168
158
  </p>
169
159
 
170
160
  The architecture includes the following modules:
@@ -174,18 +164,25 @@ The architecture includes the following modules:
174
164
  - **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
175
165
  - **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
176
166
  - **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
167
+ - **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
177
168
  - **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
178
169
  4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
179
170
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
180
171
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
181
172
 
173
+
182
174
  ## 🎉 News
183
- - **[2024.08.09]** Simplified installation process, supporting PyPI installation for vlmeval dependencies; Optimized multi-modal models evaluation experience with pipeline that based on OpenAI API, achieving up to 10x acceleration 🚀🚀🚀
184
- - **[2024.07.31]** Breaking change: The sdk name has been changed from `llmuses` to `evalscope`, please update the sdk name in your code.
185
- - **[2024.07.26]** Supports **VLMEvalKit** as a third-party evaluation framework, initiating multimodal model evaluation tasks. 🔥🔥🔥
186
- - **[2024.06.29]** Supports **OpenCompass** as a third-party evaluation framework. We have provided a high-level wrapper, supporting installation via pip and simplifying the evaluation task configuration. 🔥🔥🔥
187
- - **[2024.06.13]** EvalScope has been updated to version 0.3.x, which supports the ModelScope SWIFT framework for LLMs evaluation. 🚀🚀🚀
188
- - **[2024.06.13]** We have supported the ToolBench as a third-party evaluation backend for Agents evaluation. 🚀🚀🚀
175
+ - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
176
+ - 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
177
+ - 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
178
+ - 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
179
+ - 🔥 **[2024.08.20]** Updated the official documentation, including getting started guides, best practices, and FAQs. Feel free to [📖read it here](https://evalscope.readthedocs.io/en/latest/)!
180
+ - 🔥 **[2024.08.09]** Simplified the installation process, allowing for pypi installation of vlmeval dependencies; optimized the multimodal model evaluation experience, achieving up to 10x acceleration based on the OpenAI API evaluation chain.
181
+ - 🔥 **[2024.07.31]** Important change: The package name `llmuses` has been changed to `evalscope`. Please update your code accordingly.
182
+ - 🔥 **[2024.07.26]** Support for **VLMEvalKit** as a third-party evaluation framework to initiate multimodal model evaluation tasks.
183
+ - 🔥 **[2024.06.29]** Support for **OpenCompass** as a third-party evaluation framework, which we have encapsulated at a higher level, supporting pip installation and simplifying evaluation task configuration.
184
+ - 🔥 **[2024.06.13]** EvalScope seamlessly integrates with the fine-tuning framework SWIFT, providing full-chain support from LLM training to evaluation.
185
+ - 🔥 **[2024.06.13]** Integrated the Agent evaluation dataset ToolBench.
189
186
 
190
187
 
191
188
 
@@ -265,8 +262,8 @@ If prompted with `Do you wish to run the custom code? [y/N]`, please type `y`.
265
262
 
266
263
  #### Basic Parameter Descriptions
267
264
  - `--model`: Specifies the `model_id` of the model on [ModelScope](https://modelscope.cn/), allowing automatic download. For example, see the [Qwen2-0.5B-Instruct model link](https://modelscope.cn/models/qwen/Qwen2-0.5B-Instruct/summary); you can also use a local path, such as `/path/to/model`.
268
- - `--template-type`: Specifies the template type corresponding to the model. Refer to the `Default Template` field in the [template table](https://swift.readthedocs.io/en/latest/LLM/Supported-models-datasets.html) for filling in this field.
269
- - `--datasets`: The dataset name, allowing multiple datasets to be specified, separated by spaces; these datasets will be automatically downloaded. Refer to the [supported datasets list](#supported-datasets-list) for available options.
265
+ - `--template-type`: Specifies the template type corresponding to the model. Refer to the `Default Template` field in the [template table](https://swift.readthedocs.io/en/latest/Instruction/Supported-models-datasets.html#llm) for filling in this field.
266
+ - `--datasets`: The dataset name, allowing multiple datasets to be specified, separated by spaces; these datasets will be automatically downloaded. Refer to the [supported datasets list](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html) for available options.
270
267
 
271
268
  ### 2. Parameterized Evaluation
272
269
  If you wish to conduct a more customized evaluation, such as modifying model parameters or dataset parameters, you can use the following commands:
@@ -276,8 +273,8 @@ If you wish to conduct a more customized evaluation, such as modifying model par
276
273
  python evalscope/run.py \
277
274
  --model qwen/Qwen2-0.5B-Instruct \
278
275
  --template-type qwen \
279
- --model-args revision=v1.0.2,precision=torch.float16,device_map=auto \
280
- --datasets mmlu ceval \
276
+ --model-args revision=master,precision=torch.float16,device_map=auto \
277
+ --datasets gsm8k ceval \
281
278
  --use-cache true \
282
279
  --limit 10
283
280
  ```
@@ -342,31 +339,14 @@ from evalscope.run import run_task
342
339
  run_task(task_cfg=your_task_cfg)
343
340
  ```
344
341
 
345
- ### Supported Datasets List
346
- > [!NOTE]
347
- > The framework currently supports the following datasets. If the dataset you need is not in the list, please submit an issue, or use the [OpenCompass backend](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html) for evaluation, or use the [VLMEvalKit backend](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html) for multi-modal model evaluation.
348
-
349
- | Dataset Name | Link | Status | Note |
350
- |--------------------|----------------------------------------------------------------------------------------|--------|------|
351
- | `mmlu` | [mmlu](https://modelscope.cn/datasets/modelscope/mmlu/summary) | Active | |
352
- | `ceval` | [ceval](https://modelscope.cn/datasets/modelscope/ceval-exam/summary) | Active | |
353
- | `gsm8k` | [gsm8k](https://modelscope.cn/datasets/modelscope/gsm8k/summary) | Active | |
354
- | `arc` | [arc](https://modelscope.cn/datasets/modelscope/ai2_arc/summary) | Active | |
355
- | `hellaswag` | [hellaswag](https://modelscope.cn/datasets/modelscope/hellaswag/summary) | Active | |
356
- | `truthful_qa` | [truthful_qa](https://modelscope.cn/datasets/modelscope/truthful_qa/summary) | Active | |
357
- | `competition_math` | [competition_math](https://modelscope.cn/datasets/modelscope/competition_math/summary) | Active | |
358
- | `humaneval` | [humaneval](https://modelscope.cn/datasets/modelscope/humaneval/summary) | Active | |
359
- | `bbh` | [bbh](https://modelscope.cn/datasets/modelscope/bbh/summary) | Active | |
360
- | `race` | [race](https://modelscope.cn/datasets/modelscope/race/summary) | Active | |
361
- | `trivia_qa` | [trivia_qa](https://modelscope.cn/datasets/modelscope/trivia_qa/summary) | To be integrated | |
362
-
363
342
 
364
343
  ## Evaluation Backend
365
344
  EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
366
345
  - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
367
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
368
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
369
- - **ThirdParty**: The third-party task, e.g. [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html), you can contribute your own evaluation task to EvalScope as third-party backend.
346
+ - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
347
+ - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
348
+ - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
349
+ - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
370
350
 
371
351
  ## Custom Dataset Evaluation
372
352
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
@@ -395,6 +375,8 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
395
375
 
396
376
 
397
377
  ## TO-DO List
378
+ - [x] RAG evaluation
379
+ - [x] VLM evaluation
398
380
  - [x] Agents evaluation
399
381
  - [x] vLLM
400
382
  - [ ] Distributed evaluating
@@ -406,3 +388,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
406
388
  - [ ] Auto-reviewer
407
389
  - [ ] Qwen-max
408
390
 
391
+
392
+ ## Star History
393
+
394
+ [![Star History Chart](https://api.star-history.com/svg?repos=modelscope/evalscope&type=Date)](https://star-history.com/#modelscope/evalscope&Date)
@@ -29,28 +29,11 @@ English | [简体中文](README_zh.md)
29
29
 
30
30
  ## 📝 Introduction
31
31
 
32
- Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
33
-
34
- ### Framework Features
35
- - **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
36
- - **Evaluation Metrics**: Implements various commonly used evaluation metrics.
37
- - **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
38
- - **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
39
- - **Evaluation Reports**: Automatically generates evaluation reports.
40
- - **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
41
- - **Single mode**: Scoring a single model.
42
- - **Pairwise-baseline mode**: Comparing against a baseline model.
43
- - **Pairwise (all) mode**: Pairwise comparison among all models.
44
- - **Visualization Tools**: Provides intuitive displays of evaluation results.
45
- - **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
46
- - **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
47
- - **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
48
- - **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
49
-
50
- ### Overall Architecture
32
+ EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
33
+
51
34
  <p align="center">
52
35
  <img src="docs/en/_static/images/evalscope_framework.png" width="70%">
53
- <br>Fig 1. EvalScope Framework.
36
+ <br>EvalScope Framework.
54
37
  </p>
55
38
 
56
39
  The architecture includes the following modules:
@@ -60,18 +43,25 @@ The architecture includes the following modules:
60
43
  - **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
61
44
  - **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
62
45
  - **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
46
+ - **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
63
47
  - **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
64
48
  4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
65
49
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
66
50
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
67
51
 
52
+
68
53
  ## 🎉 News
69
- - **[2024.08.09]** Simplified installation process, supporting PyPI installation for vlmeval dependencies; Optimized multi-modal models evaluation experience with pipeline that based on OpenAI API, achieving up to 10x acceleration 🚀🚀🚀
70
- - **[2024.07.31]** Breaking change: The sdk name has been changed from `llmuses` to `evalscope`, please update the sdk name in your code.
71
- - **[2024.07.26]** Supports **VLMEvalKit** as a third-party evaluation framework, initiating multimodal model evaluation tasks. 🔥🔥🔥
72
- - **[2024.06.29]** Supports **OpenCompass** as a third-party evaluation framework. We have provided a high-level wrapper, supporting installation via pip and simplifying the evaluation task configuration. 🔥🔥🔥
73
- - **[2024.06.13]** EvalScope has been updated to version 0.3.x, which supports the ModelScope SWIFT framework for LLMs evaluation. 🚀🚀🚀
74
- - **[2024.06.13]** We have supported the ToolBench as a third-party evaluation backend for Agents evaluation. 🚀🚀🚀
54
+ - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
55
+ - 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
56
+ - 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
57
+ - 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
58
+ - 🔥 **[2024.08.20]** Updated the official documentation, including getting started guides, best practices, and FAQs. Feel free to [📖read it here](https://evalscope.readthedocs.io/en/latest/)!
59
+ - 🔥 **[2024.08.09]** Simplified the installation process, allowing for pypi installation of vlmeval dependencies; optimized the multimodal model evaluation experience, achieving up to 10x acceleration based on the OpenAI API evaluation chain.
60
+ - 🔥 **[2024.07.31]** Important change: The package name `llmuses` has been changed to `evalscope`. Please update your code accordingly.
61
+ - 🔥 **[2024.07.26]** Support for **VLMEvalKit** as a third-party evaluation framework to initiate multimodal model evaluation tasks.
62
+ - 🔥 **[2024.06.29]** Support for **OpenCompass** as a third-party evaluation framework, which we have encapsulated at a higher level, supporting pip installation and simplifying evaluation task configuration.
63
+ - 🔥 **[2024.06.13]** EvalScope seamlessly integrates with the fine-tuning framework SWIFT, providing full-chain support from LLM training to evaluation.
64
+ - 🔥 **[2024.06.13]** Integrated the Agent evaluation dataset ToolBench.
75
65
 
76
66
 
77
67
 
@@ -151,8 +141,8 @@ If prompted with `Do you wish to run the custom code? [y/N]`, please type `y`.
151
141
 
152
142
  #### Basic Parameter Descriptions
153
143
  - `--model`: Specifies the `model_id` of the model on [ModelScope](https://modelscope.cn/), allowing automatic download. For example, see the [Qwen2-0.5B-Instruct model link](https://modelscope.cn/models/qwen/Qwen2-0.5B-Instruct/summary); you can also use a local path, such as `/path/to/model`.
154
- - `--template-type`: Specifies the template type corresponding to the model. Refer to the `Default Template` field in the [template table](https://swift.readthedocs.io/en/latest/LLM/Supported-models-datasets.html) for filling in this field.
155
- - `--datasets`: The dataset name, allowing multiple datasets to be specified, separated by spaces; these datasets will be automatically downloaded. Refer to the [supported datasets list](#supported-datasets-list) for available options.
144
+ - `--template-type`: Specifies the template type corresponding to the model. Refer to the `Default Template` field in the [template table](https://swift.readthedocs.io/en/latest/Instruction/Supported-models-datasets.html#llm) for filling in this field.
145
+ - `--datasets`: The dataset name, allowing multiple datasets to be specified, separated by spaces; these datasets will be automatically downloaded. Refer to the [supported datasets list](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html) for available options.
156
146
 
157
147
  ### 2. Parameterized Evaluation
158
148
  If you wish to conduct a more customized evaluation, such as modifying model parameters or dataset parameters, you can use the following commands:
@@ -162,8 +152,8 @@ If you wish to conduct a more customized evaluation, such as modifying model par
162
152
  python evalscope/run.py \
163
153
  --model qwen/Qwen2-0.5B-Instruct \
164
154
  --template-type qwen \
165
- --model-args revision=v1.0.2,precision=torch.float16,device_map=auto \
166
- --datasets mmlu ceval \
155
+ --model-args revision=master,precision=torch.float16,device_map=auto \
156
+ --datasets gsm8k ceval \
167
157
  --use-cache true \
168
158
  --limit 10
169
159
  ```
@@ -228,31 +218,14 @@ from evalscope.run import run_task
228
218
  run_task(task_cfg=your_task_cfg)
229
219
  ```
230
220
 
231
- ### Supported Datasets List
232
- > [!NOTE]
233
- > The framework currently supports the following datasets. If the dataset you need is not in the list, please submit an issue, or use the [OpenCompass backend](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html) for evaluation, or use the [VLMEvalKit backend](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html) for multi-modal model evaluation.
234
-
235
- | Dataset Name | Link | Status | Note |
236
- |--------------------|----------------------------------------------------------------------------------------|--------|------|
237
- | `mmlu` | [mmlu](https://modelscope.cn/datasets/modelscope/mmlu/summary) | Active | |
238
- | `ceval` | [ceval](https://modelscope.cn/datasets/modelscope/ceval-exam/summary) | Active | |
239
- | `gsm8k` | [gsm8k](https://modelscope.cn/datasets/modelscope/gsm8k/summary) | Active | |
240
- | `arc` | [arc](https://modelscope.cn/datasets/modelscope/ai2_arc/summary) | Active | |
241
- | `hellaswag` | [hellaswag](https://modelscope.cn/datasets/modelscope/hellaswag/summary) | Active | |
242
- | `truthful_qa` | [truthful_qa](https://modelscope.cn/datasets/modelscope/truthful_qa/summary) | Active | |
243
- | `competition_math` | [competition_math](https://modelscope.cn/datasets/modelscope/competition_math/summary) | Active | |
244
- | `humaneval` | [humaneval](https://modelscope.cn/datasets/modelscope/humaneval/summary) | Active | |
245
- | `bbh` | [bbh](https://modelscope.cn/datasets/modelscope/bbh/summary) | Active | |
246
- | `race` | [race](https://modelscope.cn/datasets/modelscope/race/summary) | Active | |
247
- | `trivia_qa` | [trivia_qa](https://modelscope.cn/datasets/modelscope/trivia_qa/summary) | To be integrated | |
248
-
249
221
 
250
222
  ## Evaluation Backend
251
223
  EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
252
224
  - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
253
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
254
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
255
- - **ThirdParty**: The third-party task, e.g. [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html), you can contribute your own evaluation task to EvalScope as third-party backend.
225
+ - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
226
+ - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
227
+ - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
228
+ - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
256
229
 
257
230
  ## Custom Dataset Evaluation
258
231
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
@@ -281,6 +254,8 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
281
254
 
282
255
 
283
256
  ## TO-DO List
257
+ - [x] RAG evaluation
258
+ - [x] VLM evaluation
284
259
  - [x] Agents evaluation
285
260
  - [x] vLLM
286
261
  - [ ] Distributed evaluating
@@ -292,3 +267,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
292
267
  - [ ] Auto-reviewer
293
268
  - [ ] Qwen-max
294
269
 
270
+
271
+ ## Star History
272
+
273
+ [![Star History Chart](https://api.star-history.com/svg?repos=modelscope/evalscope&type=Date)](https://star-history.com/#modelscope/evalscope&Date)
@@ -242,4 +242,6 @@ if __name__ == '__main__':
242
242
  'limit': 5
243
243
  }
244
244
  )
245
+ all_datasets = OpenCompassBackendManager.list_datasets()
246
+ print(f'all_datasets: {all_datasets}')
245
247
  oc_backend_manager.run()
@@ -7,7 +7,7 @@ with read_base():
7
7
  from opencompass.configs.datasets.agieval.agieval_gen_64afd3 import agieval_datasets
8
8
  from opencompass.configs.datasets.GaokaoBench.GaokaoBench_gen_5cfe9e import GaokaoBench_datasets
9
9
  from opencompass.configs.datasets.humaneval.humaneval_gen_8e312c import humaneval_datasets
10
- from opencompass.configs.datasets.mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
10
+ from opencompass.configs.datasets.mbpp.mbpp_gen_830460 import mbpp_datasets
11
11
  from opencompass.configs.datasets.CLUE_C3.CLUE_C3_gen_8c358f import C3_datasets
12
12
  from opencompass.configs.datasets.CLUE_CMRC.CLUE_CMRC_gen_1bd3c8 import CMRC_datasets
13
13
  from opencompass.configs.datasets.CLUE_DRCD.CLUE_DRCD_gen_1bd3c8 import DRCD_datasets
@@ -45,7 +45,7 @@ with read_base():
45
45
  from opencompass.configs.datasets.piqa.piqa_gen_1194eb import piqa_datasets
46
46
  from opencompass.configs.datasets.siqa.siqa_gen_e78df3 import siqa_datasets
47
47
  from opencompass.configs.datasets.strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets
48
- from opencompass.configs.datasets.winogrande.deprecated_winogrande_gen_a9ede5 import winogrande_datasets
48
+ from opencompass.configs.datasets.winogrande.winogrande_gen_458220 import winogrande_datasets
49
49
  from opencompass.configs.datasets.obqa.obqa_gen_9069e4 import obqa_datasets
50
50
  from opencompass.configs.datasets.nq.nq_gen_c788f6 import nq_datasets
51
51
  from opencompass.configs.datasets.triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
@@ -0,0 +1,3 @@
1
+ from evalscope.backend.rag_eval.utils.embedding import EmbeddingModel
2
+ from evalscope.backend.rag_eval.utils.llm import LLM
3
+ from evalscope.backend.rag_eval.backend_manager import RAGEvalBackendManager
@@ -0,0 +1,68 @@
1
+ import os
2
+ from typing import Optional, Union
3
+ from evalscope.utils import is_module_installed, get_valid_list
4
+ from evalscope.backend.base import BackendManager
5
+ from evalscope.utils.logger import get_logger
6
+
7
+
8
+ logger = get_logger()
9
+
10
+
11
+ class RAGEvalBackendManager(BackendManager):
12
+ def __init__(self, config: Union[str, dict], **kwargs):
13
+ """BackendManager for VLM Evaluation Kit
14
+
15
+ Args:
16
+ config (Union[str, dict]): the configuration yaml-file or the configuration dictionary
17
+ """
18
+ super().__init__(config, **kwargs)
19
+
20
+ @staticmethod
21
+ def _check_env(module_name: str):
22
+ if is_module_installed(module_name):
23
+ logger.info(f"Check `{module_name}` Installed")
24
+ else:
25
+ logger.error(f"Please install `{module_name}` first")
26
+
27
+ def run_mteb(self):
28
+ from evalscope.backend.rag_eval.cmteb import ModelArguments, EvalArguments
29
+ from evalscope.backend.rag_eval.cmteb import one_stage_eval, two_stage_eval
30
+
31
+ if len(self.model_args) > 2:
32
+ raise ValueError("Not support multiple models yet")
33
+
34
+ # Convert arguments to dictionary
35
+ model_args_list = [ModelArguments(**args).to_dict() for args in self.model_args]
36
+ eval_args = EvalArguments(**self.eval_args).to_dict()
37
+
38
+ if len(model_args_list) == 1:
39
+ one_stage_eval(model_args_list[0], eval_args)
40
+ else: # len(model_args_list) == 2
41
+ two_stage_eval(model_args_list[0], model_args_list[1], eval_args)
42
+
43
+ def run_ragas(self):
44
+ from evalscope.backend.rag_eval.ragas import rag_eval, testset_generation
45
+ from evalscope.backend.rag_eval.ragas import (
46
+ TestsetGenerationArguments,
47
+ EvaluationArguments,
48
+ )
49
+
50
+ if self.testset_args is not None:
51
+ testset_generation(TestsetGenerationArguments(**self.testset_args))
52
+ if self.eval_args is not None:
53
+ rag_eval(EvaluationArguments(**self.eval_args))
54
+
55
+ def run(self, *args, **kwargs):
56
+ tool = self.config_d.pop("tool")
57
+ if tool.lower() == "mteb":
58
+ self._check_env("mteb")
59
+ self.model_args = self.config_d["model"]
60
+ self.eval_args = self.config_d["eval"]
61
+ self.run_mteb()
62
+ elif tool.lower() == "ragas":
63
+ self._check_env("ragas")
64
+ self.testset_args = self.config_d.get("testset_generation", None)
65
+ self.eval_args = self.config_d.get("eval", None)
66
+ self.run_ragas()
67
+ else:
68
+ raise ValueError(f"Unknown tool: {tool}")
@@ -0,0 +1,4 @@
1
+ from evalscope.backend.rag_eval.cmteb.tasks import *
2
+ from evalscope.backend.rag_eval.cmteb.base import *
3
+ from evalscope.backend.rag_eval.cmteb.arguments import ModelArguments, EvalArguments
4
+ from evalscope.backend.rag_eval.cmteb.task_template import one_stage_eval, two_stage_eval
@@ -0,0 +1,59 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import List, Optional, Union, Dict, Any
3
+
4
+
5
+ @dataclass
6
+ class ModelArguments:
7
+ # Arguments for embeding model: sentence transformer or cross encoder
8
+ model_name_or_path: str = "" # model name or path
9
+ is_cross_encoder: bool = False # whether the model is a cross encoder
10
+ # pooling mode: Either “cls”, “lasttoken”, “max”, “mean”, “mean_sqrt_len_tokens”, or “weightedmean”.
11
+ pooling_mode: Optional[str] = None
12
+ max_seq_length: int = 512 # max sequence length
13
+ # prompt for llm based model
14
+ prompt: str = ""
15
+ # model kwargs
16
+ model_kwargs: dict = field(default_factory=lambda: {"torch_dtype": "auto"})
17
+ # config kwargs
18
+ config_kwargs: Dict[str, Any] = field(default_factory=dict)
19
+ # encode kwargs
20
+ encode_kwargs: dict = field(
21
+ default_factory=lambda: {"show_progress_bar": True, "batch_size": 32}
22
+ )
23
+ hub: str = "modelscope" # modelscope or huggingface
24
+
25
+ def to_dict(self) -> Dict[str, Any]:
26
+ return {
27
+ "model_name_or_path": self.model_name_or_path,
28
+ "is_cross_encoder": self.is_cross_encoder,
29
+ "pooling_mode": self.pooling_mode,
30
+ "max_seq_length": self.max_seq_length,
31
+ "prompt": self.prompt,
32
+ "model_kwargs": self.model_kwargs,
33
+ "config_kwargs": self.config_kwargs,
34
+ "encode_kwargs": self.encode_kwargs,
35
+ "hub": self.hub,
36
+ }
37
+
38
+
39
+ @dataclass
40
+ class EvalArguments:
41
+ # Evaluation
42
+ tasks: List[str] = field(default_factory=list) # task names
43
+ verbosity: int = 2 # verbosity level 0-3
44
+ output_folder: str = "outputs" # output folder
45
+ overwrite_results: bool = True # overwrite results
46
+ limits: Optional[int] = None # limit number of samples
47
+ hub: str = "modelscope" # modelscope or huggingface
48
+ top_k: int = 5
49
+
50
+ def to_dict(self) -> Dict[str, Any]:
51
+ return {
52
+ "tasks": self.tasks,
53
+ "verbosity": self.verbosity,
54
+ "output_folder": self.output_folder,
55
+ "overwrite_results": self.overwrite_results,
56
+ "limits": self.limits,
57
+ "hub": self.hub,
58
+ "top_k": 5,
59
+ }