evalscope 0.8.1__tar.gz → 0.8.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (302) hide show
  1. {evalscope-0.8.1/evalscope.egg-info → evalscope-0.8.2}/PKG-INFO +15 -3
  2. {evalscope-0.8.1 → evalscope-0.8.2}/README.md +12 -0
  3. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/config.py +3 -1
  4. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/evaluator/evaluator.py +1 -0
  5. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/model_adapter.py +1 -1
  6. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/arguments.py +1 -0
  7. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/benchmark.py +1 -1
  8. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/main.py +3 -1
  9. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/api/openai_api.py +51 -47
  10. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/utils/local_server.py +1 -0
  11. evalscope-0.8.2/evalscope/version.py +4 -0
  12. {evalscope-0.8.1 → evalscope-0.8.2/evalscope.egg-info}/PKG-INFO +15 -3
  13. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope.egg-info/SOURCES.txt +0 -20
  14. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope.egg-info/requires.txt +2 -2
  15. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/rag.txt +1 -1
  16. {evalscope-0.8.1 → evalscope-0.8.2}/tests/perf/test_perf.py +3 -3
  17. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/correctness_prompt_chinese.json +0 -87
  18. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/long_form_answer_prompt_chinese.json +0 -36
  19. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerRelevancy/question_generation_chinese.json +0 -26
  20. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/ContextPrecision/context_precision_prompt_chinese.json +0 -41
  21. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/CustomNodeFilter/scoring_prompt_chinese.json +0 -7
  22. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/nli_statements_message_chinese.json +0 -60
  23. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/statement_prompt_chinese.json +0 -36
  24. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/HeadlinesExtractor/prompt_chinese.json +0 -24
  25. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/concept_combination_prompt_chinese.json +0 -35
  26. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/generate_query_reference_prompt_chinese.json +0 -30
  27. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/theme_persona_matching_prompt_chinese.json +0 -39
  28. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json +0 -30
  29. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json +0 -39
  30. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalFaithfulness/faithfulness_prompt_chinese.json +0 -34
  31. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalRelevance/relevance_prompt_chinese.json +0 -36
  32. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/NERExtractor/prompt_chinese.json +0 -25
  33. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json +0 -24
  34. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json +0 -39
  35. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/SummaryExtractor/prompt_chinese.json +0 -16
  36. evalscope-0.8.1/evalscope/backend/rag_eval/ragas/prompts/chinese/ThemesExtractor/prompt_chinese.json +0 -24
  37. evalscope-0.8.1/evalscope/version.py +0 -4
  38. {evalscope-0.8.1 → evalscope-0.8.2}/LICENSE +0 -0
  39. {evalscope-0.8.1 → evalscope-0.8.2}/MANIFEST.in +0 -0
  40. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/__init__.py +0 -0
  41. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/arguments.py +0 -0
  42. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/__init__.py +0 -0
  43. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/base.py +0 -0
  44. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/opencompass/__init__.py +0 -0
  45. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/opencompass/api_meta_template.py +0 -0
  46. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/opencompass/backend_manager.py +0 -0
  47. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/opencompass/tasks/__init__.py +0 -0
  48. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/opencompass/tasks/eval_api.py +0 -0
  49. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/opencompass/tasks/eval_datasets.py +0 -0
  50. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/__init__.py +0 -0
  51. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/backend_manager.py +0 -0
  52. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/__init__.py +0 -0
  53. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/arguments.py +0 -0
  54. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +0 -0
  55. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/task_template.py +0 -0
  56. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py +0 -0
  57. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +0 -0
  58. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +0 -0
  59. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +0 -0
  60. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +0 -0
  61. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/clip_benchmark/utils/webdatasets.txt +0 -0
  62. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/__init__.py +0 -0
  63. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/arguments.py +0 -0
  64. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/base.py +0 -0
  65. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/task_template.py +0 -0
  66. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -0
  67. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -0
  68. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +0 -0
  69. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -0
  70. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -0
  71. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -0
  72. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -0
  73. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -0
  74. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/__init__.py +0 -0
  75. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/arguments.py +0 -0
  76. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -0
  77. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/task_template.py +0 -0
  78. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/tasks/__init__.py +0 -0
  79. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +0 -0
  80. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/tasks/build_transform.py +0 -0
  81. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +0 -0
  82. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +0 -0
  83. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/utils/__init__.py +0 -0
  84. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/utils/clip.py +0 -0
  85. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/utils/embedding.py +0 -0
  86. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/utils/llm.py +0 -0
  87. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/rag_eval/utils/tools.py +0 -0
  88. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/vlm_eval_kit/__init__.py +0 -0
  89. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/vlm_eval_kit/backend_manager.py +0 -0
  90. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -0
  91. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/__init__.py +0 -0
  92. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/arc/__init__.py +0 -0
  93. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/arc/ai2_arc.py +0 -0
  94. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/arc/arc_adapter.py +0 -0
  95. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/__init__.py +0 -0
  96. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/bbh_adapter.py +0 -0
  97. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +0 -0
  98. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +0 -0
  99. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +0 -0
  100. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +0 -0
  101. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +0 -0
  102. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +0 -0
  103. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +0 -0
  104. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +0 -0
  105. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +0 -0
  106. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +0 -0
  107. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +0 -0
  108. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +0 -0
  109. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +0 -0
  110. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +0 -0
  111. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +0 -0
  112. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +0 -0
  113. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +0 -0
  114. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +0 -0
  115. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +0 -0
  116. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +0 -0
  117. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +0 -0
  118. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +0 -0
  119. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +0 -0
  120. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +0 -0
  121. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +0 -0
  122. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +0 -0
  123. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +0 -0
  124. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/benchmark.py +0 -0
  125. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/ceval/__init__.py +0 -0
  126. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/ceval/ceval_adapter.py +0 -0
  127. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/ceval/ceval_exam.py +0 -0
  128. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/ceval/samples.jsonl +0 -0
  129. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/cmmlu/__init__.py +0 -0
  130. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/cmmlu/cmmlu.py +0 -0
  131. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +0 -0
  132. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/cmmlu/samples.jsonl +0 -0
  133. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/competition_math/__init__.py +0 -0
  134. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/competition_math/competition_math.py +0 -0
  135. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/competition_math/competition_math_adapter.py +0 -0
  136. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/data_adapter.py +0 -0
  137. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/general_qa/__init__.py +0 -0
  138. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/general_qa/general_qa_adapter.py +0 -0
  139. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/gsm8k/__init__.py +0 -0
  140. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/gsm8k/gsm8k.py +0 -0
  141. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +0 -0
  142. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/hellaswag/__init__.py +0 -0
  143. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/hellaswag/hellaswag.py +0 -0
  144. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +0 -0
  145. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/humaneval/__init__.py +0 -0
  146. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/humaneval/humaneval.py +0 -0
  147. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -0
  148. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/mmlu/__init__.py +0 -0
  149. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/mmlu/mmlu.py +0 -0
  150. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/mmlu/mmlu_adapter.py +0 -0
  151. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/mmlu/samples.jsonl +0 -0
  152. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/race/__init__.py +0 -0
  153. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/race/race.py +0 -0
  154. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/race/race_adapter.py +0 -0
  155. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/race/samples.jsonl +0 -0
  156. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/trivia_qa/__init__.py +0 -0
  157. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/trivia_qa/samples.jsonl +0 -0
  158. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -0
  159. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +0 -0
  160. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/truthful_qa/__init__.py +0 -0
  161. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -0
  162. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +0 -0
  163. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/cli/__init__.py +0 -0
  164. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/cli/base.py +0 -0
  165. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/cli/cli.py +0 -0
  166. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/cli/start_eval.py +0 -0
  167. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/cli/start_perf.py +0 -0
  168. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/cli/start_server.py +0 -0
  169. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/constants.py +0 -0
  170. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/evaluator/__init__.py +0 -0
  171. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/evaluator/rating_eval.py +0 -0
  172. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/evaluator/reviewer/__init__.py +0 -0
  173. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/evaluator/reviewer/auto_reviewer.py +0 -0
  174. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/__init__.py +0 -0
  175. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/bundled_rouge_score/__init__.py +0 -0
  176. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -0
  177. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/code_metric.py +0 -0
  178. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/math_accuracy.py +0 -0
  179. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/metrics.py +0 -0
  180. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -0
  181. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -0
  182. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/metrics/rouge_metric.py +0 -0
  183. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/__init__.py +0 -0
  184. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/api/__init__.py +0 -0
  185. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/api/openai_api.py +0 -0
  186. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/custom/__init__.py +0 -0
  187. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/custom/custom_model.py +0 -0
  188. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/dummy_chat_model.py +0 -0
  189. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/model.py +0 -0
  190. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/models/openai_model.py +0 -0
  191. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/__init__.py +0 -0
  192. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/http_client.py +0 -0
  193. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/__init__.py +0 -0
  194. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/api/__init__.py +0 -0
  195. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/api/base.py +0 -0
  196. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/api/custom_api.py +0 -0
  197. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/api/dashscope_api.py +0 -0
  198. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/__init__.py +0 -0
  199. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/base.py +0 -0
  200. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/custom.py +0 -0
  201. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/flickr8k.py +0 -0
  202. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/line_by_line.py +0 -0
  203. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/longalpaca.py +0 -0
  204. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/openqa.py +0 -0
  205. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/datasets/speed_benchmark.py +0 -0
  206. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/plugin/registry.py +0 -0
  207. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/utils/__init__.py +0 -0
  208. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/utils/analysis_result.py +0 -0
  209. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/utils/benchmark_util.py +0 -0
  210. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/utils/db_util.py +0 -0
  211. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/perf/utils/handler.py +0 -0
  212. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/__init__.py +0 -0
  213. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/config/cfg_arena.yaml +0 -0
  214. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/config/cfg_arena_zhihu.yaml +0 -0
  215. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -0
  216. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/config/cfg_single.yaml +0 -0
  217. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -0
  218. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -0
  219. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/data/qa_browser/battle.jsonl +0 -0
  220. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/data/qa_browser/category_mapping.yaml +0 -0
  221. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/data/question.jsonl +0 -0
  222. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/arc.yaml +0 -0
  223. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/bbh.yaml +0 -0
  224. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/bbh_mini.yaml +0 -0
  225. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/ceval.yaml +0 -0
  226. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/ceval_mini.yaml +0 -0
  227. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/cmmlu.yaml +0 -0
  228. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -0
  229. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/general_qa.yaml +0 -0
  230. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/gsm8k.yaml +0 -0
  231. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/mmlu.yaml +0 -0
  232. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/registry/tasks/mmlu_mini.yaml +0 -0
  233. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/run.py +0 -0
  234. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/run_arena.py +0 -0
  235. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/summarizer.py +0 -0
  236. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/__init__.py +0 -0
  237. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/README.md +0 -0
  238. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/__init__.py +0 -0
  239. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/default_task.json +0 -0
  240. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/default_task.yaml +0 -0
  241. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/eval.py +0 -0
  242. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/infer.py +0 -0
  243. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/longbench_write.py +0 -0
  244. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/resources/__init__.py +0 -0
  245. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/resources/judge.txt +0 -0
  246. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/resources/longbench_write.jsonl +0 -0
  247. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/resources/longbench_write_en.jsonl +0 -0
  248. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/resources/longwrite_ruler.jsonl +0 -0
  249. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/tools/__init__.py +0 -0
  250. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/tools/data_etl.py +0 -0
  251. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/longbench_write/utils.py +0 -0
  252. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/README.md +0 -0
  253. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/__init__.py +0 -0
  254. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/config_default.json +0 -0
  255. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/config_default.yaml +0 -0
  256. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/eval.py +0 -0
  257. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/infer.py +0 -0
  258. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/llm/__init__.py +0 -0
  259. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -0
  260. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/requirements.txt +0 -0
  261. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/third_party/toolbench_static/toolbench_static.py +0 -0
  262. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/tools/__init__.py +0 -0
  263. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/tools/combine_reports.py +0 -0
  264. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/tools/gen_mmlu_subject_mapping.py +0 -0
  265. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/tools/rewrite_eval_results.py +0 -0
  266. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/__init__.py +0 -0
  267. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/arena_utils.py +0 -0
  268. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/chat_service.py +0 -0
  269. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/completion_parsers.py +0 -0
  270. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/io_utils.py +0 -0
  271. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/logger.py +0 -0
  272. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/model_utils.py +0 -0
  273. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope/utils/utils.py +0 -0
  274. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope.egg-info/dependency_links.txt +0 -0
  275. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope.egg-info/entry_points.txt +0 -0
  276. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope.egg-info/not-zip-safe +0 -0
  277. {evalscope-0.8.1 → evalscope-0.8.2}/evalscope.egg-info/top_level.txt +0 -0
  278. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/docs.txt +0 -0
  279. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/framework.txt +0 -0
  280. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/inner.txt +0 -0
  281. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/opencompass.txt +0 -0
  282. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/perf.txt +0 -0
  283. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/tests.txt +0 -0
  284. {evalscope-0.8.1 → evalscope-0.8.2}/requirements/vlmeval.txt +0 -0
  285. {evalscope-0.8.1 → evalscope-0.8.2}/requirements.txt +0 -0
  286. {evalscope-0.8.1 → evalscope-0.8.2}/setup.cfg +0 -0
  287. {evalscope-0.8.1 → evalscope-0.8.2}/setup.py +0 -0
  288. {evalscope-0.8.1 → evalscope-0.8.2}/tests/__init__.py +0 -0
  289. {evalscope-0.8.1 → evalscope-0.8.2}/tests/cli/__init__.py +0 -0
  290. {evalscope-0.8.1 → evalscope-0.8.2}/tests/cli/test_run.py +0 -0
  291. {evalscope-0.8.1 → evalscope-0.8.2}/tests/perf/__init__.py +0 -0
  292. {evalscope-0.8.1 → evalscope-0.8.2}/tests/rag/__init__.py +0 -0
  293. {evalscope-0.8.1 → evalscope-0.8.2}/tests/rag/test_clip_benchmark.py +0 -0
  294. {evalscope-0.8.1 → evalscope-0.8.2}/tests/rag/test_mteb.py +0 -0
  295. {evalscope-0.8.1 → evalscope-0.8.2}/tests/rag/test_ragas.py +0 -0
  296. {evalscope-0.8.1 → evalscope-0.8.2}/tests/swift/__init__.py +0 -0
  297. {evalscope-0.8.1 → evalscope-0.8.2}/tests/swift/test_run_swift_eval.py +0 -0
  298. {evalscope-0.8.1 → evalscope-0.8.2}/tests/swift/test_run_swift_vlm_eval.py +0 -0
  299. {evalscope-0.8.1 → evalscope-0.8.2}/tests/swift/test_run_swift_vlm_jugde_eval.py +0 -0
  300. {evalscope-0.8.1 → evalscope-0.8.2}/tests/test_run_all.py +0 -0
  301. {evalscope-0.8.1 → evalscope-0.8.2}/tests/vlm/__init__.py +0 -0
  302. {evalscope-0.8.1 → evalscope-0.8.2}/tests/vlm/test_vlmeval.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.8.1
3
+ Version: 0.8.2
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -54,7 +54,7 @@ Provides-Extra: vlmeval
54
54
  Requires-Dist: ms-vlmeval>=0.0.9; extra == "vlmeval"
55
55
  Provides-Extra: rag
56
56
  Requires-Dist: mteb==1.19.4; extra == "rag"
57
- Requires-Dist: ragas==0.2.7; extra == "rag"
57
+ Requires-Dist: ragas==0.2.9; extra == "rag"
58
58
  Requires-Dist: webdataset>0.2.0; extra == "rag"
59
59
  Provides-Extra: perf
60
60
  Requires-Dist: aiohttp; extra == "perf"
@@ -125,7 +125,7 @@ Requires-Dist: transformers_stream_generator; extra == "all"
125
125
  Requires-Dist: ms-opencompass>=0.1.4; extra == "all"
126
126
  Requires-Dist: ms-vlmeval>=0.0.9; extra == "all"
127
127
  Requires-Dist: mteb==1.19.4; extra == "all"
128
- Requires-Dist: ragas==0.2.7; extra == "all"
128
+ Requires-Dist: ragas==0.2.9; extra == "all"
129
129
  Requires-Dist: webdataset>0.2.0; extra == "all"
130
130
  Requires-Dist: aiohttp; extra == "all"
131
131
  Requires-Dist: fastapi; extra == "all"
@@ -181,6 +181,8 @@ The framework accommodates multiple evaluation scenarios such as end-to-end RAG
181
181
  <br>EvalScope Framework.
182
182
  </p>
183
183
 
184
+ <details><summary>Framework Description</summary>
185
+
184
186
  The architecture includes the following modules:
185
187
  1. **Model Adapter**: The model adapter is used to convert the outputs of specific models into the format required by the framework, supporting both API call models and locally run models.
186
188
  2. **Data Adapter**: The data adapter is responsible for converting and processing input data to meet various evaluation needs and formats.
@@ -194,6 +196,16 @@ The architecture includes the following modules:
194
196
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
195
197
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
196
198
 
199
+ </details>
200
+
201
+ ## ☎ User Groups
202
+
203
+ Please scan the QR code below to join our community groups:
204
+
205
+ [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
206
+ :-------------------------:|:-------------------------:|:-------------------------:
207
+ <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
208
+
197
209
 
198
210
  ## 🎉 News
199
211
  - 🔥 **[2024.12.13]** Model evaluation optimization: no need to pass the `--template-type` parameter anymore; supports starting evaluation with `evalscope eval --args`. Refer to the [📖 User Guide](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html) for more details.
@@ -45,6 +45,8 @@ The framework accommodates multiple evaluation scenarios such as end-to-end RAG
45
45
  <br>EvalScope Framework.
46
46
  </p>
47
47
 
48
+ <details><summary>Framework Description</summary>
49
+
48
50
  The architecture includes the following modules:
49
51
  1. **Model Adapter**: The model adapter is used to convert the outputs of specific models into the format required by the framework, supporting both API call models and locally run models.
50
52
  2. **Data Adapter**: The data adapter is responsible for converting and processing input data to meet various evaluation needs and formats.
@@ -58,6 +60,16 @@ The architecture includes the following modules:
58
60
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
59
61
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
60
62
 
63
+ </details>
64
+
65
+ ## ☎ User Groups
66
+
67
+ Please scan the QR code below to join our community groups:
68
+
69
+ [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
70
+ :-------------------------:|:-------------------------:|:-------------------------:
71
+ <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
72
+
61
73
 
62
74
  ## 🎉 News
63
75
  - 🔥 **[2024.12.13]** Model evaluation optimization: no need to pass the `--template-type` parameter anymore; supports starting evaluation with `evalscope eval --args`. Refer to the [📖 User Guide](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html) for more details.
@@ -114,7 +114,9 @@ class TaskConfig:
114
114
  def from_args(args: Namespace):
115
115
  # Convert Namespace to a dictionary and filter out None values
116
116
  args_dict = {k: v for k, v in vars(args).items() if v is not None}
117
- del args_dict['func'] # Note: compat CLI arguments
117
+
118
+ if 'func' in args_dict:
119
+ del args_dict['func'] # Note: compat CLI arguments
118
120
 
119
121
  return TaskConfig.from_dict(args_dict)
120
122
 
@@ -86,6 +86,7 @@ class Evaluator(object):
86
86
  **kwargs)
87
87
 
88
88
  # Get prompts from dataset
89
+ # TODO: support sampler
89
90
  self.prompts = self.data_adapter.gen_prompts(data_dict=self.dataset)
90
91
  del self.dataset
91
92
 
@@ -429,7 +429,7 @@ class ChatGenerationModelAdapter(BaseModelAdapter):
429
429
  fix_do_sample_warning(self.generation_config)
430
430
 
431
431
  # Run inference
432
- output_ids = self.model.generate(**inputs, generation_config=self.generation_config)
432
+ output_ids = self.model.generate(input_ids, generation_config=self.generation_config)
433
433
 
434
434
  response = self.tokenizer.decode(output_ids[0, len(input_ids[0]):], skip_special_tokens=True)
435
435
  return response
@@ -68,6 +68,7 @@ class Arguments:
68
68
  model=args.model,
69
69
  attn_implementation=args.attn_implementation,
70
70
  url=args.url,
71
+ port=args.port,
71
72
  api_key=args.api_key,
72
73
  connect_timeout=args.connect_timeout,
73
74
  read_timeout=args.read_timeout,
@@ -157,7 +157,7 @@ async def statistic_benchmark_metric_worker(benchmark_data_queue: asyncio.Queue,
157
157
  while not (data_process_completed_event.is_set() and benchmark_data_queue.empty()):
158
158
  try:
159
159
  # Attempt to get benchmark data from the queue with a timeout
160
- benchmark_data = await asyncio.wait_for(benchmark_data_queue.get(), timeout=1)
160
+ benchmark_data = await asyncio.wait_for(benchmark_data_queue.get(), timeout=0.01)
161
161
  benchmark_data_queue.task_done()
162
162
  except asyncio.TimeoutError:
163
163
  # If timeout, continue to the next iteration
@@ -19,7 +19,9 @@ def run_perf_benchmark(args):
19
19
  args = Arguments(**args)
20
20
  elif isinstance(args, Namespace):
21
21
  args = Arguments.from_args(args)
22
- seed_everything(args.seed)
22
+
23
+ if args.seed is not None:
24
+ seed_everything(args.seed)
23
25
 
24
26
  # Setup logger and output
25
27
  args.outputs_dir = get_output_path(args)
@@ -96,60 +96,64 @@ class OpenaiPlugin(ApiPluginBase):
96
96
 
97
97
  def parse_responses(self, responses, request: Any = None, **kwargs) -> Dict:
98
98
  """Parser responses and return number of request and response tokens.
99
- sample of the output delta:
100
- {"id":"4","object":"chat.completion.chunk","created":1714030870,"model":"llama3","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
99
+ Only one response for non-stream, multiple responses for stream.
100
+ """
101
101
 
102
+ # when stream, the last response is the full usage
103
+ # when non-stream, the last response is the first response
104
+ last_response_js = json.loads(responses[-1])
105
+ if 'usage' in last_response_js and last_response_js['usage']:
106
+ input_tokens = last_response_js['usage']['prompt_tokens']
107
+ output_tokens = last_response_js['usage']['completion_tokens']
108
+ return input_tokens, output_tokens
102
109
 
103
- Args:
104
- responses (List[bytes]): List of http response body, for stream output,
105
- there are multiple responses, for general only one.
106
- kwargs: (Any): The command line --parameter content.
107
- Returns:
108
- Tuple: Return number of prompt token and number of completion tokens.
109
- """
110
- full_response_content = ''
110
+ # no usage information in the response, parse the response to get the tokens
111
111
  delta_contents = {}
112
- input_tokens = None
113
- output_tokens = None
114
112
  for response in responses:
115
113
  js = json.loads(response)
116
- if js['object'] == 'chat.completion':
117
- for choice in js['choices']:
118
- delta_contents[choice['index']] = [choice['message']['content']]
119
- input_tokens = js['usage']['prompt_tokens']
120
- output_tokens = js['usage']['completion_tokens']
121
- elif js['object'] == 'text_completion':
122
- for choice in js['choices']:
123
- delta_contents[choice['index']] = [choice['text']]
124
- input_tokens = js['usage']['prompt_tokens']
125
- output_tokens = js['usage']['completion_tokens']
126
- elif js['object'] == 'chat.completion.chunk':
127
- if 'choices' in js:
128
- for choice in js['choices']:
129
- if 'delta' in choice and 'index' in choice:
130
- delta = choice['delta']
131
- idx = choice['index']
132
- if 'content' in delta:
133
- delta_content = delta['content']
134
- if idx in delta_contents:
135
- delta_contents[idx].append(delta_content)
136
- else:
137
- delta_contents[idx] = [delta_content]
138
- # usage in chunk: {"id":"","object":"chat.completion.chunk","created":1718269986,"model":"llama3",
139
- # "choices":[],"usage":{"prompt_tokens":32,"total_tokens":384,"completion_tokens":352}}
140
- if 'usage' in js and js['usage']:
141
- input_tokens = js['usage']['prompt_tokens']
142
- output_tokens = js['usage']['completion_tokens']
143
- if (input_tokens is None and output_tokens is None and self.tokenizer is not None):
144
- input_tokens = 0
145
- output_tokens = 0
114
+ if 'object' in js:
115
+ self.__process_response_object(js, delta_contents)
116
+ else:
117
+ self.__process_no_object(js, delta_contents)
118
+
119
+ input_tokens, output_tokens = self.__calculate_tokens_from_content(request, delta_contents)
120
+ return input_tokens, output_tokens
121
+
122
+ def __process_response_object(self, js, delta_contents):
123
+ if js['object'] == 'chat.completion':
124
+ for choice in js['choices']:
125
+ delta_contents[choice['index']] = [choice['message']['content']]
126
+ elif js['object'] == 'text_completion':
127
+ for choice in js['choices']:
128
+ delta_contents[choice['index']] = [choice['text']]
129
+ elif js['object'] == 'chat.completion.chunk':
130
+ for choice in js.get('choices', []):
131
+ if 'delta' in choice and 'index' in choice:
132
+ delta = choice['delta']
133
+ idx = choice['index']
134
+ if 'content' in delta:
135
+ delta_content = delta['content']
136
+ delta_contents.setdefault(idx, []).append(delta_content)
137
+
138
+ def __process_no_object(self, js, delta_contents):
139
+ # assume the response is a single choice
140
+ for choice in js['choices']:
141
+ if 'delta' in choice:
142
+ delta = choice['delta']
143
+ idx = choice['index']
144
+ if 'content' in delta:
145
+ delta_content = delta['content']
146
+ delta_contents.setdefault(idx, []).append(delta_content)
147
+ else:
148
+ delta_contents[choice['index']] = [choice['message']['content']]
149
+
150
+ def __calculate_tokens_from_content(self, request, delta_contents):
151
+ input_tokens = output_tokens = 0
152
+ if self.tokenizer is not None:
146
153
  for idx, choice_contents in delta_contents.items():
147
- full_response_content = ''.join([m for m in choice_contents])
154
+ full_response_content = ''.join(choice_contents)
148
155
  input_tokens += len(self.tokenizer.encode(request['messages'][0]['content']))
149
156
  output_tokens += len(self.tokenizer.encode(full_response_content))
150
- elif input_tokens is None and output_tokens is None: # no usage info get.
151
- input_tokens = 0
152
- output_tokens = 0
157
+ else:
153
158
  logger.warning('No usage information found. Please specify `--tokenizer-path` to generate usage details.')
154
-
155
159
  return input_tokens, output_tokens
@@ -103,6 +103,7 @@ def start_app(args: Arguments):
103
103
  elif args.api == 'local_vllm':
104
104
  os.environ['VLLM_USE_MODELSCOPE'] = 'True'
105
105
  os.environ['VLLM_ALLOW_LONG_MAX_MODEL_LEN'] = '1'
106
+ os.environ['VLLM_WORKER_MULTIPROC_METHOD'] = 'spawn'
106
107
  # yapf: disable
107
108
  proc = subprocess.Popen([
108
109
  'python', '-m', 'vllm.entrypoints.openai.api_server',
@@ -0,0 +1,4 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ __version__ = '0.8.2'
4
+ __release_datetime__ = '2024-12-26 20:00:00'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.8.1
3
+ Version: 0.8.2
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -54,7 +54,7 @@ Provides-Extra: vlmeval
54
54
  Requires-Dist: ms-vlmeval>=0.0.9; extra == "vlmeval"
55
55
  Provides-Extra: rag
56
56
  Requires-Dist: mteb==1.19.4; extra == "rag"
57
- Requires-Dist: ragas==0.2.7; extra == "rag"
57
+ Requires-Dist: ragas==0.2.9; extra == "rag"
58
58
  Requires-Dist: webdataset>0.2.0; extra == "rag"
59
59
  Provides-Extra: perf
60
60
  Requires-Dist: aiohttp; extra == "perf"
@@ -125,7 +125,7 @@ Requires-Dist: transformers_stream_generator; extra == "all"
125
125
  Requires-Dist: ms-opencompass>=0.1.4; extra == "all"
126
126
  Requires-Dist: ms-vlmeval>=0.0.9; extra == "all"
127
127
  Requires-Dist: mteb==1.19.4; extra == "all"
128
- Requires-Dist: ragas==0.2.7; extra == "all"
128
+ Requires-Dist: ragas==0.2.9; extra == "all"
129
129
  Requires-Dist: webdataset>0.2.0; extra == "all"
130
130
  Requires-Dist: aiohttp; extra == "all"
131
131
  Requires-Dist: fastapi; extra == "all"
@@ -181,6 +181,8 @@ The framework accommodates multiple evaluation scenarios such as end-to-end RAG
181
181
  <br>EvalScope Framework.
182
182
  </p>
183
183
 
184
+ <details><summary>Framework Description</summary>
185
+
184
186
  The architecture includes the following modules:
185
187
  1. **Model Adapter**: The model adapter is used to convert the outputs of specific models into the format required by the framework, supporting both API call models and locally run models.
186
188
  2. **Data Adapter**: The data adapter is responsible for converting and processing input data to meet various evaluation needs and formats.
@@ -194,6 +196,16 @@ The architecture includes the following modules:
194
196
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
195
197
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
196
198
 
199
+ </details>
200
+
201
+ ## ☎ User Groups
202
+
203
+ Please scan the QR code below to join our community groups:
204
+
205
+ [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
206
+ :-------------------------:|:-------------------------:|:-------------------------:
207
+ <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
208
+
197
209
 
198
210
  ## 🎉 News
199
211
  - 🔥 **[2024.12.13]** Model evaluation optimization: no need to pass the `--template-type` parameter anymore; supports starting evaluation with `evalscope eval --args`. Refer to the [📖 User Guide](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html) for more details.
@@ -55,26 +55,6 @@ evalscope/backend/rag_eval/ragas/__init__.py
55
55
  evalscope/backend/rag_eval/ragas/arguments.py
56
56
  evalscope/backend/rag_eval/ragas/task_template.py
57
57
  evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py
58
- evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/correctness_prompt_chinese.json
59
- evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/long_form_answer_prompt_chinese.json
60
- evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerRelevancy/question_generation_chinese.json
61
- evalscope/backend/rag_eval/ragas/prompts/chinese/ContextPrecision/context_precision_prompt_chinese.json
62
- evalscope/backend/rag_eval/ragas/prompts/chinese/CustomNodeFilter/scoring_prompt_chinese.json
63
- evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/nli_statements_message_chinese.json
64
- evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/statement_prompt_chinese.json
65
- evalscope/backend/rag_eval/ragas/prompts/chinese/HeadlinesExtractor/prompt_chinese.json
66
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/concept_combination_prompt_chinese.json
67
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/generate_query_reference_prompt_chinese.json
68
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/theme_persona_matching_prompt_chinese.json
69
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json
70
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json
71
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalFaithfulness/faithfulness_prompt_chinese.json
72
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalRelevance/relevance_prompt_chinese.json
73
- evalscope/backend/rag_eval/ragas/prompts/chinese/NERExtractor/prompt_chinese.json
74
- evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json
75
- evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json
76
- evalscope/backend/rag_eval/ragas/prompts/chinese/SummaryExtractor/prompt_chinese.json
77
- evalscope/backend/rag_eval/ragas/prompts/chinese/ThemesExtractor/prompt_chinese.json
78
58
  evalscope/backend/rag_eval/ragas/tasks/__init__.py
79
59
  evalscope/backend/rag_eval/ragas/tasks/build_distribution.py
80
60
  evalscope/backend/rag_eval/ragas/tasks/build_transform.py
@@ -67,7 +67,7 @@ transformers_stream_generator
67
67
  ms-opencompass>=0.1.4
68
68
  ms-vlmeval>=0.0.9
69
69
  mteb==1.19.4
70
- ragas==0.2.7
70
+ ragas==0.2.9
71
71
  webdataset>0.2.0
72
72
  aiohttp
73
73
  fastapi
@@ -116,7 +116,7 @@ unicorn
116
116
 
117
117
  [rag]
118
118
  mteb==1.19.4
119
- ragas==0.2.7
119
+ ragas==0.2.9
120
120
  webdataset>0.2.0
121
121
 
122
122
  [vlmeval]
@@ -1,3 +1,3 @@
1
1
  mteb==1.19.4
2
- ragas==0.2.7
2
+ ragas==0.2.9
3
3
  webdataset>0.2.0
@@ -19,13 +19,13 @@ class TestPerf(unittest.TestCase):
19
19
  @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
20
20
  def test_run_perf(self):
21
21
  task_cfg = {
22
- 'url': 'http://127.0.0.1:8000/v1/chat/completions',
22
+ 'url': 'http://127.0.0.1:8001/v1/chat/completions',
23
23
  'parallel': 1,
24
24
  'model': 'qwen2.5',
25
25
  'number': 15,
26
26
  'api': 'openai',
27
27
  'dataset': 'openqa',
28
- 'stream': True,
28
+ # 'stream': True,
29
29
  'debug': True,
30
30
  }
31
31
  run_perf_benchmark(task_cfg)
@@ -47,7 +47,7 @@ class TestPerf(unittest.TestCase):
47
47
  @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
48
48
  def test_run_perf_speed_benchmark(self):
49
49
  task_cfg = {
50
- 'url': 'http://127.0.0.1:8801/v1/completions',
50
+ 'url': 'http://127.0.0.1:8001/v1/completions',
51
51
  'parallel': 1,
52
52
  'model': 'qwen2.5',
53
53
  'api': 'openai',
@@ -1,87 +0,0 @@
1
- {
2
- "ragas_version": "0.2.7",
3
- "original_hash": -492257975294377194,
4
- "language": "chinese",
5
- "instruction": "给定一个真实情况和一个答案陈述,分析每个陈述并将其分类为以下类别之一:TP(真正):答案中存在的陈述也直接由一个或多个真实情况中的陈述支持,FP(假正):答案中存在的陈述但没有被任何真实情况中的陈述直接支持,FN(假负):在真实情况中发现但在答案中不存在的陈述。每个陈述只能属于其中一个类别。为每个分类提供理由。",
6
- "examples": [
7
- {
8
- "input": {
9
- "question": "是什么为太阳提供能量,它的主要功能是什么?",
10
- "answer": [
11
- "太阳的能量来自核裂变,类似于地球上的核反应堆。",
12
- "太阳的主要功能是为太阳系提供光。"
13
- ],
14
- "ground_truth": [
15
- "太阳的能量来自核聚变,其中氢原子融合形成氦。",
16
- "太阳核心的这种聚变过程释放出巨大的能量。",
17
- "来自太阳的能量提供热量和光,这对地球上的生命至关重要。",
18
- "太阳的光在地球的气候系统中起着关键作用。",
19
- "阳光有助于驱动天气和海洋洋流。"
20
- ]
21
- },
22
- "output": {
23
- "TP": [
24
- {
25
- "statement": "太阳的主要功能是为太阳系提供光。",
26
- "reason": "这一说法在某种程度上得到了地面事实的支持,提到太阳提供光和它的作用,尽管它更广泛地关注太阳的能量。"
27
- }
28
- ],
29
- "FP": [
30
- {
31
- "statement": "太阳的能量来自核裂变,类似于地球上的核反应堆。",
32
- "reason": "这一说法是不正确的,与地面事实相矛盾,地面事实指出太阳的能量来自核聚变。"
33
- }
34
- ],
35
- "FN": [
36
- {
37
- "statement": "太阳的能量来自核聚变,其中氢原子融合形成氦。",
38
- "reason": "这种对太阳能量来源的准确描述没有包含在答案中。"
39
- },
40
- {
41
- "statement": "太阳核心的这种聚变过程释放出巨大的能量。",
42
- "reason": "这个过程及其重要性没有在答案中提到。"
43
- },
44
- {
45
- "statement": "来自太阳的能量提供热量和光,这对地球上的生命至关重要。",
46
- "reason": "答案中只提到了光,忽略了热量及其对生命的必要性,这些在地面事实中都有涵盖。"
47
- },
48
- {
49
- "statement": "太阳的光在地球的气候系统中起着关键作用。",
50
- "reason": "太阳光对地球气候系统的这种更广泛的影响没有在答案中提到。"
51
- },
52
- {
53
- "statement": "阳光有助于驱动天气和海洋洋流。",
54
- "reason": "答案中省略了阳光对天气模式和海洋洋流的影响。"
55
- }
56
- ]
57
- }
58
- },
59
- {
60
- "input": {
61
- "question": "水的沸点是多少?",
62
- "answer": [
63
- "水的沸点在海平面上是100摄氏度。"
64
- ],
65
- "ground_truth": [
66
- "水的沸点在海平面上是100摄氏度(212华氏度)。",
67
- "水的沸点会随着海拔的变化而变化。"
68
- ]
69
- },
70
- "output": {
71
- "TP": [
72
- {
73
- "statement": "水的沸点在海平面上是100摄氏度。",
74
- "reason": "这一说法直接得到了地面事实的支持,地面事实具体说明了水的沸点在海平面上是100摄氏度。"
75
- }
76
- ],
77
- "FP": [],
78
- "FN": [
79
- {
80
- "statement": "水的沸点会随着海拔的变化而变化。",
81
- "reason": "关于水的沸点如何随海拔变化的额外信息没有在答案中提到。"
82
- }
83
- ]
84
- }
85
- }
86
- ]
87
- }
@@ -1,36 +0,0 @@
1
- {
2
- "ragas_version": "0.2.7",
3
- "original_hash": -8546983388246528139,
4
- "language": "chinese",
5
- "instruction": "给定一个问题、一个答案和答案中的句子,分析在“句子”下给出的每个句子的复杂性,并将每个句子分解为一个或多个完全可理解的陈述,同时确保每个陈述中不使用代词。将输出格式化为JSON。",
6
- "examples": [
7
- {
8
- "input": {
9
- "question": "阿尔伯特·爱因斯坦是谁,他以什么而闻名?",
10
- "answer": "他是一位出生于德国的理论物理学家,被广泛认为是有史以来最伟大和最有影响力的物理学家之一。他最著名的是发展了相对论,他还对量子力学理论的发展做出了重要贡献。",
11
- "sentences": {
12
- "0": "他是一位出生于德国的理论物理学家,被广泛认为是有史以来最伟大和最有影响力的物理学家之一。",
13
- "1": "他最著名的是发展了相对论,他还对量子力学理论的发展做出了重要贡献。"
14
- }
15
- },
16
- "output": {
17
- "sentences": [
18
- {
19
- "sentence_index": 0,
20
- "simpler_statements": [
21
- "阿尔伯特·爱因斯坦是一位出生于德国的理论物理学家。",
22
- "阿尔伯特·爱因斯坦被认为是有史以来最伟大和最有影响力的物理学家之一。"
23
- ]
24
- },
25
- {
26
- "sentence_index": 1,
27
- "simpler_statements": [
28
- "阿尔伯特·爱因斯坦最著名的是发展了相对论。",
29
- "阿尔伯特·爱因斯坦还对量子力学理论的发展做出了重要贡献。"
30
- ]
31
- }
32
- ]
33
- }
34
- }
35
- ]
36
- }
@@ -1,26 +0,0 @@
1
- {
2
- "ragas_version": "0.2.7",
3
- "original_hash": 7951911230338252816,
4
- "language": "chinese",
5
- "instruction": "为给定的答案生成一个问题,并识别答案是否含糊不清。如果答案含糊不清,则给出1;如果答案明确,则给出0。含糊不清的答案是指那些回避的、模糊的或不明确的答案。例如,“我不知道”或“我不确定”是含糊不清的答案。",
6
- "examples": [
7
- {
8
- "input": {
9
- "response": "阿尔伯特·爱因斯坦出生在德国。"
10
- },
11
- "output": {
12
- "question": "阿尔伯特·爱因斯坦出生在哪里?",
13
- "noncommittal": 0
14
- }
15
- },
16
- {
17
- "input": {
18
- "response": "我不知道2023年发明的智能手机的突破性功能,因为我对2022年以后的信息不了解。"
19
- },
20
- "output": {
21
- "question": "2023年发明的智能手机的突破性功能是什么?",
22
- "noncommittal": 1
23
- }
24
- }
25
- ]
26
- }
@@ -1,41 +0,0 @@
1
- {
2
- "ragas_version": "0.2.7",
3
- "original_hash": -5318808809674890018,
4
- "language": "chinese",
5
- "instruction": "给定问题、答案和背景,验证背景在得出给定答案时是否有用。如果有用,判定为“1”,如果没有用,判定为“0”,并以json格式输出。",
6
- "examples": [
7
- {
8
- "input": {
9
- "question": "你能告诉我关于阿尔伯特·爱因斯坦的什么?",
10
- "context": "阿尔伯特·爱因斯坦(1879年3月14日-1955年4月18日)是一位德国出生的理论物理学家,被广泛认为是有史以来最伟大和最有影响力的科学家之一。他因发展相对论而闻名,同时也对量子力学做出了重要贡献,因此在20世纪前几十年现代物理学对自然科学理解的革命性重塑中起到了核心作用。他的质能等价公式E=mc²,源于相对论,被称为“世界上最著名的方程”。他因“对理论物理学的贡献,特别是发现光电效应定律”而获得1921年诺贝尔物理学奖,这是量子理论发展的关键一步。他的工作也因其对科学哲学的影响而闻名。在1999年由英国《物理世界》杂志对全球130位顶尖物理学家的调查中,爱因斯坦被评为有史以来最伟大的物理学家。他的智力成就和原创性使爱因斯坦成为天才的代名词。",
11
- "answer": "阿尔伯特·爱因斯坦,生于1879年3月14日,是一位德国出生的理论物理学家,被广泛认为是有史以来最伟大和最有影响力的科学家之一。他因对理论物理学的贡献而获得1921年诺贝尔物理学奖。"
12
- },
13
- "output": {
14
- "reason": "提供的背景确实有助于得出给定的答案。背景包括关于阿尔伯特·爱因斯坦的生活和贡献的关键信息,这些信息在答案中得到了反映。",
15
- "verdict": 1
16
- }
17
- },
18
- {
19
- "input": {
20
- "question": "谁赢得了2020年ICC世界杯?",
21
- "context": "2022年ICC男子T20世界杯于2022年10月16日至11月13日在澳大利亚举行,是该赛事的第八届。原定于2020年举行,但因COVID-19大流行而推迟。英格兰在决赛中以五个小门击败巴基斯坦,赢得了他们的第二个ICC男子T20世界杯冠军。",
22
- "answer": "英格兰"
23
- },
24
- "output": {
25
- "reason": "背景有助于澄清关于2020年ICC世界杯的情况,并指出英格兰是原定于2020年举行但实际上在2022年举行的比赛的获胜者。",
26
- "verdict": 1
27
- }
28
- },
29
- {
30
- "input": {
31
- "question": "世界上最高的山是什么?",
32
- "context": "安第斯山脉是世界上最长的大陆山脉,位于南美洲。它横跨七个国家,拥有西半球许多最高的山峰。该山脉以其多样的生态系统而闻名,包括高海拔的安第斯高原和亚马逊雨林。",
33
- "answer": "珠穆朗玛峰。"
34
- },
35
- "output": {
36
- "reason": "提供的背景讨论了安第斯山脉,虽然令人印象深刻,但不包括珠穆朗玛峰,也与关于世界最高山的问题没有直接关系。",
37
- "verdict": 0
38
- }
39
- }
40
- ]
41
- }
@@ -1,7 +0,0 @@
1
- {
2
- "ragas_version": "0.2.7",
3
- "original_hash": -1333942410710431097,
4
- "language": "chinese",
5
- "instruction": "给定文档摘要和节点内容,将节点内容评分在1到5的范围内。",
6
- "examples": []
7
- }