evalscope 0.5.4__tar.gz → 0.5.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. {evalscope-0.5.4 → evalscope-0.5.5}/PKG-INFO +26 -32
  2. {evalscope-0.5.4 → evalscope-0.5.5}/README.md +14 -25
  3. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/opencompass/tasks/eval_datasets.py +2 -2
  4. evalscope-0.5.5/evalscope/backend/rag_eval/__init__.py +3 -0
  5. evalscope-0.5.5/evalscope/backend/rag_eval/backend_manager.py +68 -0
  6. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/__init__.py +4 -0
  7. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/arguments.py +59 -0
  8. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/base.py +89 -0
  9. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/task_template.py +83 -0
  10. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +302 -0
  11. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +252 -0
  12. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +113 -0
  13. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +153 -0
  14. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +345 -0
  15. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/STS.py +302 -0
  16. evalscope-0.5.5/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +64 -0
  17. evalscope-0.5.5/evalscope/backend/rag_eval/ragas/__init__.py +2 -0
  18. evalscope-0.5.5/evalscope/backend/rag_eval/ragas/arguments.py +37 -0
  19. evalscope-0.5.5/evalscope/backend/rag_eval/ragas/task_template.py +117 -0
  20. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -1
  21. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/vlm_eval_kit/custom_dataset.py +1 -1
  22. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/evaluator/evaluator.py +1 -0
  23. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +19 -0
  24. evalscope-0.5.5/evalscope/preprocess/tokenizers/__init__.py +0 -0
  25. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/run.py +4 -0
  26. evalscope-0.5.5/evalscope/utils/logger.py +94 -0
  27. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/utils/task_utils.py +3 -0
  28. evalscope-0.5.5/evalscope/version.py +4 -0
  29. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/PKG-INFO +26 -32
  30. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/SOURCES.txt +16 -0
  31. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/requires.txt +12 -6
  32. evalscope-0.5.4/evalscope/backend/opencompass/__init__.py +0 -3
  33. evalscope-0.5.4/evalscope/utils/logger.py +0 -64
  34. evalscope-0.5.4/evalscope/version.py +0 -4
  35. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/__init__.py +0 -0
  36. {evalscope-0.5.4/evalscope/perf → evalscope-0.5.5/evalscope/backend}/__init__.py +0 -0
  37. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/base.py +0 -0
  38. {evalscope-0.5.4/evalscope/backend → evalscope-0.5.5/evalscope/backend/opencompass}/__init__.py +0 -0
  39. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/opencompass/api_meta_template.py +0 -0
  40. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/opencompass/backend_manager.py +0 -0
  41. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  42. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
  43. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  44. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/__init__.py +0 -0
  45. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/arc/__init__.py +0 -0
  46. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/arc/ai2_arc.py +0 -0
  47. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
  48. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/__init__.py +0 -0
  49. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
  50. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  51. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  52. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  53. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  54. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  55. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  56. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  57. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  58. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  59. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  60. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  61. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  62. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  63. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  64. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  65. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  66. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  67. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  68. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  69. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  70. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  71. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  72. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  73. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  74. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  75. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  76. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  77. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/benchmark.py +0 -0
  78. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/ceval/__init__.py +0 -0
  79. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/ceval/ceval_adapter.py +0 -0
  80. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/ceval/ceval_exam.py +0 -0
  81. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  82. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/cmmlu/cmmlu.py +0 -0
  83. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
  84. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  85. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/competition_math/competition_math.py +0 -0
  86. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
  87. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/data_adapter.py +0 -0
  88. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  89. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
  90. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  91. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/gsm8k/gsm8k.py +0 -0
  92. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
  93. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  94. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/hellaswag/hellaswag.py +0 -0
  95. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
  96. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  97. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/humaneval/humaneval.py +0 -0
  98. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -0
  99. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  100. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/mmlu/mmlu.py +0 -0
  101. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
  102. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/race/__init__.py +0 -0
  103. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/race/race.py +0 -0
  104. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/race/race_adapter.py +0 -0
  105. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  106. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -0
  107. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
  108. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  109. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -0
  110. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -0
  111. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/cache.py +0 -0
  112. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/cli/__init__.py +0 -0
  113. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/cli/base.py +0 -0
  114. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/cli/cli.py +0 -0
  115. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/cli/start_perf.py +0 -0
  116. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/cli/start_server.py +0 -0
  117. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/config.py +0 -0
  118. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/constants.py +0 -0
  119. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/evaluator/__init__.py +0 -0
  120. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/evaluator/rating_eval.py +0 -0
  121. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/evaluator/reviewer/__init__.py +0 -0
  122. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/evaluator/reviewer/auto_reviewer.py +0 -0
  123. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/__init__.py +0 -0
  124. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  125. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/code_metric.py +0 -0
  126. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/math_accuracy.py +0 -0
  127. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/metrics.py +0 -0
  128. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/metrics/rouge_metric.py +0 -0
  129. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/__init__.py +0 -0
  130. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/api/__init__.py +0 -0
  131. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/api/openai_api.py +0 -0
  132. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/custom/__init__.py +0 -0
  133. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/custom/custom_model.py +0 -0
  134. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/dummy_chat_model.py +0 -0
  135. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/model.py +0 -0
  136. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/model_adapter.py +0 -0
  137. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/openai_model.py +0 -0
  138. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/models/template.py +0 -0
  139. {evalscope-0.5.4/evalscope/perf/datasets → evalscope-0.5.5/evalscope/perf}/__init__.py +0 -0
  140. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/_logging.py +0 -0
  141. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/api_plugin_base.py +0 -0
  142. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/custom_api.py +0 -0
  143. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/dashscope_api.py +0 -0
  144. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/dataset_plugin_base.py +0 -0
  145. {evalscope-0.5.4/evalscope/preprocess/tokenizers → evalscope-0.5.5/evalscope/perf/datasets}/__init__.py +0 -0
  146. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/datasets/line_by_line.py +0 -0
  147. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/datasets/longalpaca_12k.py +0 -0
  148. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/datasets/openqa.py +0 -0
  149. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/how_to_analysis_result.py +0 -0
  150. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/http_client.py +0 -0
  151. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/openai_api.py +0 -0
  152. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/plugin_registry.py +0 -0
  153. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/query_parameters.py +0 -0
  154. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/perf/server_sent_event.py +0 -0
  155. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/preprocess/__init__.py +0 -0
  156. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/preprocess/tokenizers/gpt2_tokenizer.py +0 -0
  157. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/__init__.py +0 -0
  158. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/arc.yaml +0 -0
  159. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/bbh.yaml +0 -0
  160. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/bbh_mini.yaml +0 -0
  161. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/ceval.yaml +0 -0
  162. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/ceval_mini.yaml +0 -0
  163. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/cmmlu.yaml +0 -0
  164. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -0
  165. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/general_qa.yaml +0 -0
  166. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/gsm8k.yaml +0 -0
  167. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/mmlu.yaml +0 -0
  168. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/registry/tasks/mmlu_mini.yaml +0 -0
  169. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/run_arena.py +0 -0
  170. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/run_ms.py +0 -0
  171. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/summarizer.py +0 -0
  172. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/__init__.py +0 -0
  173. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/__init__.py +0 -0
  174. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/eval.py +0 -0
  175. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/infer.py +0 -0
  176. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
  177. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
  178. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
  179. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
  180. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
  181. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
  182. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
  183. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
  184. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/longbench_write/utils.py +0 -0
  185. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  186. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/eval.py +0 -0
  187. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/infer.py +0 -0
  188. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
  189. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
  190. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  191. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/tools/__init__.py +0 -0
  192. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/tools/combine_reports.py +0 -0
  193. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/tools/gen_mmlu_subject_mapping.py +0 -0
  194. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/tools/rewrite_eval_results.py +0 -0
  195. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/utils/__init__.py +0 -0
  196. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/utils/arena_utils.py +0 -0
  197. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/utils/completion_parsers.py +0 -0
  198. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/utils/task_cfg_parser.py +0 -0
  199. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope/utils/utils.py +0 -0
  200. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/dependency_links.txt +0 -0
  201. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/entry_points.txt +0 -0
  202. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/not-zip-safe +0 -0
  203. {evalscope-0.5.4 → evalscope-0.5.5}/evalscope.egg-info/top_level.txt +0 -0
  204. {evalscope-0.5.4 → evalscope-0.5.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.5.4
3
+ Version: 0.5.5
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -24,7 +24,7 @@ Requires-Dist: editdistance
24
24
  Requires-Dist: jsonlines
25
25
  Requires-Dist: matplotlib
26
26
  Requires-Dist: modelscope[framework]
27
- Requires-Dist: nltk
27
+ Requires-Dist: nltk>=3.9
28
28
  Requires-Dist: openai
29
29
  Requires-Dist: pandas
30
30
  Requires-Dist: plotly
@@ -34,7 +34,7 @@ Requires-Dist: pyyaml
34
34
  Requires-Dist: regex
35
35
  Requires-Dist: requests
36
36
  Requires-Dist: requests-toolbelt
37
- Requires-Dist: rouge-score
37
+ Requires-Dist: rouge-score>=0.1.0
38
38
  Requires-Dist: sacrebleu
39
39
  Requires-Dist: scikit-learn
40
40
  Requires-Dist: seaborn
@@ -48,9 +48,12 @@ Requires-Dist: transformers_stream_generator
48
48
  Requires-Dist: jieba
49
49
  Requires-Dist: rouge-chinese
50
50
  Provides-Extra: opencompass
51
- Requires-Dist: ms-opencompass>=0.1.0; extra == "opencompass"
51
+ Requires-Dist: ms-opencompass>=0.1.1; extra == "opencompass"
52
52
  Provides-Extra: vlmeval
53
53
  Requires-Dist: ms-vlmeval>=0.0.5; extra == "vlmeval"
54
+ Provides-Extra: rag
55
+ Requires-Dist: ragas; extra == "rag"
56
+ Requires-Dist: mteb>=0.14.16; extra == "rag"
54
57
  Provides-Extra: inner
55
58
  Requires-Dist: absl-py; extra == "inner"
56
59
  Requires-Dist: accelerate; extra == "inner"
@@ -88,7 +91,7 @@ Requires-Dist: editdistance; extra == "all"
88
91
  Requires-Dist: jsonlines; extra == "all"
89
92
  Requires-Dist: matplotlib; extra == "all"
90
93
  Requires-Dist: modelscope[framework]; extra == "all"
91
- Requires-Dist: nltk; extra == "all"
94
+ Requires-Dist: nltk>=3.9; extra == "all"
92
95
  Requires-Dist: openai; extra == "all"
93
96
  Requires-Dist: pandas; extra == "all"
94
97
  Requires-Dist: plotly; extra == "all"
@@ -98,7 +101,7 @@ Requires-Dist: pyyaml; extra == "all"
98
101
  Requires-Dist: regex; extra == "all"
99
102
  Requires-Dist: requests; extra == "all"
100
103
  Requires-Dist: requests-toolbelt; extra == "all"
101
- Requires-Dist: rouge-score; extra == "all"
104
+ Requires-Dist: rouge-score>=0.1.0; extra == "all"
102
105
  Requires-Dist: sacrebleu; extra == "all"
103
106
  Requires-Dist: scikit-learn; extra == "all"
104
107
  Requires-Dist: seaborn; extra == "all"
@@ -111,8 +114,10 @@ Requires-Dist: transformers>=4.33; extra == "all"
111
114
  Requires-Dist: transformers_stream_generator; extra == "all"
112
115
  Requires-Dist: jieba; extra == "all"
113
116
  Requires-Dist: rouge-chinese; extra == "all"
114
- Requires-Dist: ms-opencompass>=0.1.0; extra == "all"
117
+ Requires-Dist: ms-opencompass>=0.1.1; extra == "all"
115
118
  Requires-Dist: ms-vlmeval>=0.0.5; extra == "all"
119
+ Requires-Dist: ragas; extra == "all"
120
+ Requires-Dist: mteb>=0.14.16; extra == "all"
116
121
 
117
122
  English | [简体中文](README_zh.md)
118
123
 
@@ -145,30 +150,11 @@ English | [简体中文](README_zh.md)
145
150
 
146
151
  ## 📝 Introduction
147
152
 
148
- Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
149
-
150
- ### Framework Features
151
- - **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
152
- - **Evaluation Metrics**: Implements various commonly used evaluation metrics.
153
- - **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
154
- - **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
155
- - **Evaluation Reports**: Automatically generates evaluation reports.
156
- - **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
157
- - **Single mode**: Scoring a single model.
158
- - **Pairwise-baseline mode**: Comparing against a baseline model.
159
- - **Pairwise (all) mode**: Pairwise comparison among all models.
160
- - **Visualization Tools**: Provides intuitive displays of evaluation results.
161
- - **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
162
- - **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
163
- - **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
164
- - **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
165
-
166
-
167
- <details><summary>Overall Architecture</summary>
153
+ EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
168
154
 
169
155
  <p align="center">
170
156
  <img src="docs/en/_static/images/evalscope_framework.png" width="70%">
171
- <br>Fig 1. EvalScope Framework.
157
+ <br>EvalScope Framework.
172
158
  </p>
173
159
 
174
160
  The architecture includes the following modules:
@@ -178,14 +164,15 @@ The architecture includes the following modules:
178
164
  - **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
179
165
  - **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
180
166
  - **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
167
+ - **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
181
168
  - **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
182
169
  4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
183
170
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
184
171
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
185
- </details>
186
172
 
187
173
 
188
174
  ## 🎉 News
175
+ - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
189
176
  - 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
190
177
  - 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
191
178
  - 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
@@ -356,9 +343,10 @@ run_task(task_cfg=your_task_cfg)
356
343
  ## Evaluation Backend
357
344
  EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
358
345
  - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
359
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
360
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
361
- - **ThirdParty**: The third-party task, e.g. [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html), you can contribute your own evaluation task to EvalScope as third-party backend.
346
+ - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
347
+ - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
348
+ - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
349
+ - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
362
350
 
363
351
  ## Custom Dataset Evaluation
364
352
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
@@ -387,6 +375,8 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
387
375
 
388
376
 
389
377
  ## TO-DO List
378
+ - [x] RAG evaluation
379
+ - [x] VLM evaluation
390
380
  - [x] Agents evaluation
391
381
  - [x] vLLM
392
382
  - [ ] Distributed evaluating
@@ -398,3 +388,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
398
388
  - [ ] Auto-reviewer
399
389
  - [ ] Qwen-max
400
390
 
391
+
392
+ ## Star History
393
+
394
+ [![Star History Chart](https://api.star-history.com/svg?repos=modelscope/evalscope&type=Date)](https://star-history.com/#modelscope/evalscope&Date)
@@ -29,30 +29,11 @@ English | [简体中文](README_zh.md)
29
29
 
30
30
  ## 📝 Introduction
31
31
 
32
- Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
33
-
34
- ### Framework Features
35
- - **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
36
- - **Evaluation Metrics**: Implements various commonly used evaluation metrics.
37
- - **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
38
- - **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
39
- - **Evaluation Reports**: Automatically generates evaluation reports.
40
- - **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
41
- - **Single mode**: Scoring a single model.
42
- - **Pairwise-baseline mode**: Comparing against a baseline model.
43
- - **Pairwise (all) mode**: Pairwise comparison among all models.
44
- - **Visualization Tools**: Provides intuitive displays of evaluation results.
45
- - **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
46
- - **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
47
- - **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
48
- - **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
49
-
50
-
51
- <details><summary>Overall Architecture</summary>
32
+ EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
52
33
 
53
34
  <p align="center">
54
35
  <img src="docs/en/_static/images/evalscope_framework.png" width="70%">
55
- <br>Fig 1. EvalScope Framework.
36
+ <br>EvalScope Framework.
56
37
  </p>
57
38
 
58
39
  The architecture includes the following modules:
@@ -62,14 +43,15 @@ The architecture includes the following modules:
62
43
  - **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
63
44
  - **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
64
45
  - **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
46
+ - **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
65
47
  - **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
66
48
  4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
67
49
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
68
50
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
69
- </details>
70
51
 
71
52
 
72
53
  ## 🎉 News
54
+ - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
73
55
  - 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
74
56
  - 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
75
57
  - 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
@@ -240,9 +222,10 @@ run_task(task_cfg=your_task_cfg)
240
222
  ## Evaluation Backend
241
223
  EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
242
224
  - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
243
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
244
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
245
- - **ThirdParty**: The third-party task, e.g. [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html), you can contribute your own evaluation task to EvalScope as third-party backend.
225
+ - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
226
+ - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
227
+ - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
228
+ - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
246
229
 
247
230
  ## Custom Dataset Evaluation
248
231
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
@@ -271,6 +254,8 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
271
254
 
272
255
 
273
256
  ## TO-DO List
257
+ - [x] RAG evaluation
258
+ - [x] VLM evaluation
274
259
  - [x] Agents evaluation
275
260
  - [x] vLLM
276
261
  - [ ] Distributed evaluating
@@ -282,3 +267,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
282
267
  - [ ] Auto-reviewer
283
268
  - [ ] Qwen-max
284
269
 
270
+
271
+ ## Star History
272
+
273
+ [![Star History Chart](https://api.star-history.com/svg?repos=modelscope/evalscope&type=Date)](https://star-history.com/#modelscope/evalscope&Date)
@@ -7,7 +7,7 @@ with read_base():
7
7
  from opencompass.configs.datasets.agieval.agieval_gen_64afd3 import agieval_datasets
8
8
  from opencompass.configs.datasets.GaokaoBench.GaokaoBench_gen_5cfe9e import GaokaoBench_datasets
9
9
  from opencompass.configs.datasets.humaneval.humaneval_gen_8e312c import humaneval_datasets
10
- from opencompass.configs.datasets.mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
10
+ from opencompass.configs.datasets.mbpp.mbpp_gen_830460 import mbpp_datasets
11
11
  from opencompass.configs.datasets.CLUE_C3.CLUE_C3_gen_8c358f import C3_datasets
12
12
  from opencompass.configs.datasets.CLUE_CMRC.CLUE_CMRC_gen_1bd3c8 import CMRC_datasets
13
13
  from opencompass.configs.datasets.CLUE_DRCD.CLUE_DRCD_gen_1bd3c8 import DRCD_datasets
@@ -45,7 +45,7 @@ with read_base():
45
45
  from opencompass.configs.datasets.piqa.piqa_gen_1194eb import piqa_datasets
46
46
  from opencompass.configs.datasets.siqa.siqa_gen_e78df3 import siqa_datasets
47
47
  from opencompass.configs.datasets.strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets
48
- from opencompass.configs.datasets.winogrande.deprecated_winogrande_gen_a9ede5 import winogrande_datasets
48
+ from opencompass.configs.datasets.winogrande.winogrande_gen_458220 import winogrande_datasets
49
49
  from opencompass.configs.datasets.obqa.obqa_gen_9069e4 import obqa_datasets
50
50
  from opencompass.configs.datasets.nq.nq_gen_c788f6 import nq_datasets
51
51
  from opencompass.configs.datasets.triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
@@ -0,0 +1,3 @@
1
+ from evalscope.backend.rag_eval.utils.embedding import EmbeddingModel
2
+ from evalscope.backend.rag_eval.utils.llm import LLM
3
+ from evalscope.backend.rag_eval.backend_manager import RAGEvalBackendManager
@@ -0,0 +1,68 @@
1
+ import os
2
+ from typing import Optional, Union
3
+ from evalscope.utils import is_module_installed, get_valid_list
4
+ from evalscope.backend.base import BackendManager
5
+ from evalscope.utils.logger import get_logger
6
+
7
+
8
+ logger = get_logger()
9
+
10
+
11
+ class RAGEvalBackendManager(BackendManager):
12
+ def __init__(self, config: Union[str, dict], **kwargs):
13
+ """BackendManager for VLM Evaluation Kit
14
+
15
+ Args:
16
+ config (Union[str, dict]): the configuration yaml-file or the configuration dictionary
17
+ """
18
+ super().__init__(config, **kwargs)
19
+
20
+ @staticmethod
21
+ def _check_env(module_name: str):
22
+ if is_module_installed(module_name):
23
+ logger.info(f"Check `{module_name}` Installed")
24
+ else:
25
+ logger.error(f"Please install `{module_name}` first")
26
+
27
+ def run_mteb(self):
28
+ from evalscope.backend.rag_eval.cmteb import ModelArguments, EvalArguments
29
+ from evalscope.backend.rag_eval.cmteb import one_stage_eval, two_stage_eval
30
+
31
+ if len(self.model_args) > 2:
32
+ raise ValueError("Not support multiple models yet")
33
+
34
+ # Convert arguments to dictionary
35
+ model_args_list = [ModelArguments(**args).to_dict() for args in self.model_args]
36
+ eval_args = EvalArguments(**self.eval_args).to_dict()
37
+
38
+ if len(model_args_list) == 1:
39
+ one_stage_eval(model_args_list[0], eval_args)
40
+ else: # len(model_args_list) == 2
41
+ two_stage_eval(model_args_list[0], model_args_list[1], eval_args)
42
+
43
+ def run_ragas(self):
44
+ from evalscope.backend.rag_eval.ragas import rag_eval, testset_generation
45
+ from evalscope.backend.rag_eval.ragas import (
46
+ TestsetGenerationArguments,
47
+ EvaluationArguments,
48
+ )
49
+
50
+ if self.testset_args is not None:
51
+ testset_generation(TestsetGenerationArguments(**self.testset_args))
52
+ if self.eval_args is not None:
53
+ rag_eval(EvaluationArguments(**self.eval_args))
54
+
55
+ def run(self, *args, **kwargs):
56
+ tool = self.config_d.pop("tool")
57
+ if tool.lower() == "mteb":
58
+ self._check_env("mteb")
59
+ self.model_args = self.config_d["model"]
60
+ self.eval_args = self.config_d["eval"]
61
+ self.run_mteb()
62
+ elif tool.lower() == "ragas":
63
+ self._check_env("ragas")
64
+ self.testset_args = self.config_d.get("testset_generation", None)
65
+ self.eval_args = self.config_d.get("eval", None)
66
+ self.run_ragas()
67
+ else:
68
+ raise ValueError(f"Unknown tool: {tool}")
@@ -0,0 +1,4 @@
1
+ from evalscope.backend.rag_eval.cmteb.tasks import *
2
+ from evalscope.backend.rag_eval.cmteb.base import *
3
+ from evalscope.backend.rag_eval.cmteb.arguments import ModelArguments, EvalArguments
4
+ from evalscope.backend.rag_eval.cmteb.task_template import one_stage_eval, two_stage_eval
@@ -0,0 +1,59 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import List, Optional, Union, Dict, Any
3
+
4
+
5
+ @dataclass
6
+ class ModelArguments:
7
+ # Arguments for embeding model: sentence transformer or cross encoder
8
+ model_name_or_path: str = "" # model name or path
9
+ is_cross_encoder: bool = False # whether the model is a cross encoder
10
+ # pooling mode: Either “cls”, “lasttoken”, “max”, “mean”, “mean_sqrt_len_tokens”, or “weightedmean”.
11
+ pooling_mode: Optional[str] = None
12
+ max_seq_length: int = 512 # max sequence length
13
+ # prompt for llm based model
14
+ prompt: str = ""
15
+ # model kwargs
16
+ model_kwargs: dict = field(default_factory=lambda: {"torch_dtype": "auto"})
17
+ # config kwargs
18
+ config_kwargs: Dict[str, Any] = field(default_factory=dict)
19
+ # encode kwargs
20
+ encode_kwargs: dict = field(
21
+ default_factory=lambda: {"show_progress_bar": True, "batch_size": 32}
22
+ )
23
+ hub: str = "modelscope" # modelscope or huggingface
24
+
25
+ def to_dict(self) -> Dict[str, Any]:
26
+ return {
27
+ "model_name_or_path": self.model_name_or_path,
28
+ "is_cross_encoder": self.is_cross_encoder,
29
+ "pooling_mode": self.pooling_mode,
30
+ "max_seq_length": self.max_seq_length,
31
+ "prompt": self.prompt,
32
+ "model_kwargs": self.model_kwargs,
33
+ "config_kwargs": self.config_kwargs,
34
+ "encode_kwargs": self.encode_kwargs,
35
+ "hub": self.hub,
36
+ }
37
+
38
+
39
+ @dataclass
40
+ class EvalArguments:
41
+ # Evaluation
42
+ tasks: List[str] = field(default_factory=list) # task names
43
+ verbosity: int = 2 # verbosity level 0-3
44
+ output_folder: str = "outputs" # output folder
45
+ overwrite_results: bool = True # overwrite results
46
+ limits: Optional[int] = None # limit number of samples
47
+ hub: str = "modelscope" # modelscope or huggingface
48
+ top_k: int = 5
49
+
50
+ def to_dict(self) -> Dict[str, Any]:
51
+ return {
52
+ "tasks": self.tasks,
53
+ "verbosity": self.verbosity,
54
+ "output_folder": self.output_folder,
55
+ "overwrite_results": self.overwrite_results,
56
+ "limits": self.limits,
57
+ "hub": self.hub,
58
+ "top_k": 5,
59
+ }
@@ -0,0 +1,89 @@
1
+ from collections import defaultdict
2
+ from typing import List
3
+ from mteb import AbsTask
4
+ from datasets import DatasetDict
5
+ from modelscope import MsDataset
6
+ import datasets
7
+ from evalscope.backend.rag_eval.cmteb.tasks import CLS_DICT, CLS_RETRIEVAL
8
+
9
+ __all__ = ["TaskBase"]
10
+
11
+
12
+
13
+ class TaskBase:
14
+
15
+ @staticmethod
16
+ def get_tasks(task_names, **kwargs) -> List[AbsTask]:
17
+
18
+ return [TaskBase.get_task(task_name, **kwargs) for task_name in task_names]
19
+
20
+ @staticmethod
21
+ def get_task(task_name, **kwargs) -> AbsTask:
22
+
23
+ if task_name not in CLS_DICT:
24
+ from mteb.overview import TASKS_REGISTRY
25
+
26
+ task_cls = TASKS_REGISTRY[task_name]
27
+ if task_cls.metadata.type != "Retrieval":
28
+ task_cls.load_data = load_data
29
+ else:
30
+ task_cls = CLS_DICT[task_name]
31
+ task_cls.load_data = load_data
32
+ # init task instance
33
+ task_instance = task_cls()
34
+ return task_instance
35
+
36
+
37
+ def load_data(self, **kwargs):
38
+ """Load dataset from the hub, compatible with ModelScope and Hugging Face."""
39
+ if self.data_loaded:
40
+ return
41
+
42
+ limits = kwargs.get("limits", None)
43
+ hub = kwargs.get("hub", "modelscope")
44
+ name = self.metadata_dict.get("name")
45
+ path = self.metadata_dict["dataset"].get("path")
46
+
47
+ assert path is not None, "Path must be specified in dataset"
48
+
49
+ # Loading the dataset based on the source hub
50
+ if hub == "modelscope":
51
+ import re
52
+
53
+ path = re.sub(r"^mteb/", "MTEB/", path)
54
+ dataset = MsDataset.load(path)
55
+ else:
56
+ dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) # type: ignore
57
+
58
+ if limits is not None:
59
+ dataset = {
60
+ split: dataset[split].select(range(min(limits, len(dataset[split]))))
61
+ for split in dataset.keys()
62
+ }
63
+
64
+ if name in CLS_RETRIEVAL:
65
+ self.corpus, self.queries, self.relevant_docs = load_retrieval_data(
66
+ dataset,
67
+ path,
68
+ self.metadata_dict["eval_splits"],
69
+ )
70
+
71
+ self.dataset = dataset
72
+ self.dataset_transform()
73
+ self.data_loaded = True
74
+
75
+
76
+ def load_retrieval_data(dataset, dataset_name: str, eval_splits: list) -> tuple:
77
+ eval_split = eval_splits[0]
78
+ qrels = MsDataset.load(dataset_name + "-qrels")[eval_split]
79
+
80
+ corpus = {e["id"]: {"text": e["text"]} for e in dataset["corpus"]}
81
+ queries = {e["id"]: e["text"] for e in dataset["queries"]}
82
+ relevant_docs = defaultdict(dict)
83
+ for e in qrels:
84
+ relevant_docs[e["qid"]][e["pid"]] = e["score"]
85
+
86
+ corpus = DatasetDict({eval_split: corpus})
87
+ queries = DatasetDict({eval_split: queries})
88
+ relevant_docs = DatasetDict({eval_split: relevant_docs})
89
+ return corpus, queries, relevant_docs
@@ -0,0 +1,83 @@
1
+ import os
2
+ import mteb
3
+ from evalscope.backend.rag_eval import EmbeddingModel
4
+ from evalscope.backend.rag_eval import cmteb
5
+ from mteb.task_selection import results_to_dataframe
6
+ from evalscope.utils.logger import get_logger
7
+
8
+ logger = get_logger()
9
+
10
+
11
+ def show_results(output_folder, model, results):
12
+ model_name = model.mteb_model_meta.model_name_as_path()
13
+ revision = model.mteb_model_meta.revision
14
+
15
+ results_df = results_to_dataframe({model_name: {revision: results}})
16
+
17
+ save_path = os.path.join(
18
+ output_folder,
19
+ model_name,
20
+ revision,
21
+ )
22
+ logger.info(f"Evaluation results:\n{results_df.to_markdown()}")
23
+ logger.info(f"Evaluation results saved in {os.path.abspath(save_path)}")
24
+
25
+
26
+ def one_stage_eval(
27
+ model_args,
28
+ eval_args,
29
+ ) -> None:
30
+ # load model
31
+ model = EmbeddingModel.load(**model_args)
32
+
33
+ # load task first to update instructions
34
+ tasks = cmteb.TaskBase.get_tasks(task_names=eval_args["tasks"])
35
+ evaluation = mteb.MTEB(tasks=tasks)
36
+
37
+ # run evaluation
38
+ results = evaluation.run(model, **eval_args)
39
+
40
+ # save and log results
41
+ show_results(eval_args["output_folder"], model, results)
42
+
43
+
44
+ def two_stage_eval(
45
+ model1_args,
46
+ model2_args,
47
+ eval_args,
48
+ ) -> None:
49
+ """a two-stage run with the second stage reading results saved from the first stage."""
50
+ # load model
51
+ dual_encoder = EmbeddingModel.load(**model1_args)
52
+ cross_encoder = EmbeddingModel.load(**model2_args)
53
+
54
+ first_stage_path = f"{eval_args['output_folder']}/stage1"
55
+ second_stage_path = f"{eval_args['output_folder']}/stage2"
56
+
57
+ tasks = cmteb.TaskBase.get_tasks(task_names=eval_args["tasks"])
58
+ for task in tasks:
59
+ evaluation = mteb.MTEB(tasks=[task])
60
+
61
+ # stage 1: run dual encoder
62
+ evaluation.run(
63
+ dual_encoder,
64
+ save_predictions=True,
65
+ output_folder=first_stage_path,
66
+ overwrite_results=True,
67
+ hub=eval_args["hub"],
68
+ limits=eval_args["limits"],
69
+ )
70
+ # stage 2: run cross encoder
71
+ results = evaluation.run(
72
+ cross_encoder,
73
+ top_k=eval_args["top_k"],
74
+ save_predictions=True,
75
+ output_folder=second_stage_path,
76
+ previous_results=f"{first_stage_path}/{task.metadata.name}_default_predictions.json",
77
+ overwrite_results=True,
78
+ hub=eval_args["hub"],
79
+ limits=eval_args["limits"],
80
+ )
81
+
82
+ # save and log results
83
+ show_results(second_stage_path, cross_encoder, results)