evalscope 0.5.5rc1__py3-none-any.whl → 0.6.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (48) hide show
  1. evalscope/backend/__init__.py +0 -3
  2. evalscope/backend/opencompass/tasks/eval_datasets.py +1 -0
  3. evalscope/backend/rag_eval/__init__.py +4 -0
  4. evalscope/backend/rag_eval/backend_manager.py +80 -0
  5. evalscope/backend/rag_eval/clip_benchmark/__init__.py +2 -0
  6. evalscope/backend/rag_eval/clip_benchmark/arguments.py +34 -0
  7. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +277 -0
  8. evalscope/backend/rag_eval/clip_benchmark/task_template.py +119 -0
  9. evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py +0 -0
  10. evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +83 -0
  11. evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +247 -0
  12. evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +170 -0
  13. evalscope/backend/rag_eval/cmteb/__init__.py +4 -0
  14. evalscope/backend/rag_eval/cmteb/arguments.py +61 -0
  15. evalscope/backend/rag_eval/cmteb/base.py +91 -0
  16. evalscope/backend/rag_eval/cmteb/task_template.py +85 -0
  17. evalscope/backend/rag_eval/cmteb/tasks/Classification.py +302 -0
  18. evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +252 -0
  19. evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +61 -0
  20. evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +113 -0
  21. evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +150 -0
  22. evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +345 -0
  23. evalscope/backend/rag_eval/cmteb/tasks/STS.py +302 -0
  24. evalscope/backend/rag_eval/cmteb/tasks/__init__.py +70 -0
  25. evalscope/backend/rag_eval/ragas/__init__.py +2 -0
  26. evalscope/backend/rag_eval/ragas/arguments.py +47 -0
  27. evalscope/backend/rag_eval/ragas/metrics/__init__.py +2 -0
  28. evalscope/backend/rag_eval/ragas/metrics/multi_modal_faithfulness.py +91 -0
  29. evalscope/backend/rag_eval/ragas/metrics/multi_modal_relevance.py +99 -0
  30. evalscope/backend/rag_eval/ragas/task_template.py +61 -0
  31. evalscope/backend/rag_eval/ragas/tasks/__init__.py +2 -0
  32. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +263 -0
  33. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +72 -0
  34. evalscope/backend/vlm_eval_kit/backend_manager.py +0 -1
  35. evalscope/backend/vlm_eval_kit/custom_dataset.py +1 -1
  36. evalscope/evaluator/evaluator.py +1 -0
  37. evalscope/models/api/openai_api.py +2 -2
  38. evalscope/perf/http_client.py +1 -1
  39. evalscope/perf/openai_api.py +2 -0
  40. evalscope/run.py +4 -0
  41. evalscope/utils/logger.py +44 -14
  42. evalscope/utils/task_utils.py +3 -0
  43. evalscope/version.py +2 -2
  44. {evalscope-0.5.5rc1.dist-info → evalscope-0.6.0rc0.dist-info}/METADATA +40 -44
  45. {evalscope-0.5.5rc1.dist-info → evalscope-0.6.0rc0.dist-info}/RECORD +48 -17
  46. {evalscope-0.5.5rc1.dist-info → evalscope-0.6.0rc0.dist-info}/WHEEL +0 -0
  47. {evalscope-0.5.5rc1.dist-info → evalscope-0.6.0rc0.dist-info}/entry_points.txt +0 -0
  48. {evalscope-0.5.5rc1.dist-info → evalscope-0.6.0rc0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: evalscope
3
- Version: 0.5.5rc1
3
+ Version: 0.6.0rc0
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Home-page: https://github.com/modelscope/evalscope
6
6
  Author: ModelScope team
@@ -19,22 +19,22 @@ Requires-Dist: torch
19
19
  Requires-Dist: absl-py
20
20
  Requires-Dist: accelerate
21
21
  Requires-Dist: cachetools
22
- Requires-Dist: datasets (<3.0.0,>=2.18.0)
22
+ Requires-Dist: datasets (<=3.0.1,>=3.0.0)
23
23
  Requires-Dist: editdistance
24
24
  Requires-Dist: jsonlines
25
25
  Requires-Dist: matplotlib
26
26
  Requires-Dist: modelscope[framework]
27
- Requires-Dist: nltk
27
+ Requires-Dist: nltk (>=3.9)
28
28
  Requires-Dist: openai
29
29
  Requires-Dist: pandas
30
30
  Requires-Dist: plotly
31
- Requires-Dist: pyarrow
31
+ Requires-Dist: pyarrow (<=17.0.0)
32
32
  Requires-Dist: pympler
33
33
  Requires-Dist: pyyaml
34
34
  Requires-Dist: regex
35
35
  Requires-Dist: requests
36
36
  Requires-Dist: requests-toolbelt
37
- Requires-Dist: rouge-score
37
+ Requires-Dist: rouge-score (>=0.1.0)
38
38
  Requires-Dist: sacrebleu
39
39
  Requires-Dist: scikit-learn
40
40
  Requires-Dist: seaborn
@@ -52,22 +52,22 @@ Requires-Dist: torch ; extra == 'all'
52
52
  Requires-Dist: absl-py ; extra == 'all'
53
53
  Requires-Dist: accelerate ; extra == 'all'
54
54
  Requires-Dist: cachetools ; extra == 'all'
55
- Requires-Dist: datasets (<3.0.0,>=2.18.0) ; extra == 'all'
55
+ Requires-Dist: datasets (<=3.0.1,>=3.0.0) ; extra == 'all'
56
56
  Requires-Dist: editdistance ; extra == 'all'
57
57
  Requires-Dist: jsonlines ; extra == 'all'
58
58
  Requires-Dist: matplotlib ; extra == 'all'
59
59
  Requires-Dist: modelscope[framework] ; extra == 'all'
60
- Requires-Dist: nltk ; extra == 'all'
60
+ Requires-Dist: nltk (>=3.9) ; extra == 'all'
61
61
  Requires-Dist: openai ; extra == 'all'
62
62
  Requires-Dist: pandas ; extra == 'all'
63
63
  Requires-Dist: plotly ; extra == 'all'
64
- Requires-Dist: pyarrow ; extra == 'all'
64
+ Requires-Dist: pyarrow (<=17.0.0) ; extra == 'all'
65
65
  Requires-Dist: pympler ; extra == 'all'
66
66
  Requires-Dist: pyyaml ; extra == 'all'
67
67
  Requires-Dist: regex ; extra == 'all'
68
68
  Requires-Dist: requests ; extra == 'all'
69
69
  Requires-Dist: requests-toolbelt ; extra == 'all'
70
- Requires-Dist: rouge-score ; extra == 'all'
70
+ Requires-Dist: rouge-score (>=0.1.0) ; extra == 'all'
71
71
  Requires-Dist: sacrebleu ; extra == 'all'
72
72
  Requires-Dist: scikit-learn ; extra == 'all'
73
73
  Requires-Dist: seaborn ; extra == 'all'
@@ -80,8 +80,11 @@ Requires-Dist: transformers (>=4.33) ; extra == 'all'
80
80
  Requires-Dist: transformers-stream-generator ; extra == 'all'
81
81
  Requires-Dist: jieba ; extra == 'all'
82
82
  Requires-Dist: rouge-chinese ; extra == 'all'
83
- Requires-Dist: ms-opencompass (>=0.1.1) ; extra == 'all'
83
+ Requires-Dist: ms-opencompass (>=0.1.3) ; extra == 'all'
84
84
  Requires-Dist: ms-vlmeval (>=0.0.5) ; extra == 'all'
85
+ Requires-Dist: mteb (==1.19.4) ; extra == 'all'
86
+ Requires-Dist: ragas (==0.2.3) ; extra == 'all'
87
+ Requires-Dist: webdataset (>0.2.0) ; extra == 'all'
85
88
  Provides-Extra: inner
86
89
  Requires-Dist: absl-py ; extra == 'inner'
87
90
  Requires-Dist: accelerate ; extra == 'inner'
@@ -109,15 +112,22 @@ Requires-Dist: tqdm ; extra == 'inner'
109
112
  Requires-Dist: transformers (<4.43,>=4.33) ; extra == 'inner'
110
113
  Requires-Dist: transformers-stream-generator ; extra == 'inner'
111
114
  Provides-Extra: opencompass
112
- Requires-Dist: ms-opencompass (>=0.1.1) ; extra == 'opencompass'
115
+ Requires-Dist: ms-opencompass (>=0.1.3) ; extra == 'opencompass'
116
+ Provides-Extra: rag
117
+ Requires-Dist: mteb (==1.19.4) ; extra == 'rag'
118
+ Requires-Dist: ragas (==0.2.3) ; extra == 'rag'
119
+ Requires-Dist: webdataset (>0.2.0) ; extra == 'rag'
113
120
  Provides-Extra: vlmeval
114
121
  Requires-Dist: ms-vlmeval (>=0.0.5) ; extra == 'vlmeval'
115
122
 
116
- English | [简体中文](README_zh.md)
117
123
 
118
124
 
119
125
  ![](docs/en/_static/images/evalscope_logo.png)
120
126
 
127
+ <p align="center">
128
+ English | <a href="README_zh.md">简体中文</a>
129
+ </p>
130
+
121
131
  <p align="center">
122
132
  <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
123
133
  <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope">
@@ -126,7 +136,7 @@ English | [简体中文](README_zh.md)
126
136
  <img src='https://readthedocs.org/projects/evalscope-en/badge/?version=latest' alt='Documentation Status' />
127
137
  </a>
128
138
  <br>
129
- <a href="https://evalscope.readthedocs.io/en/latest/"><span style="font-size: 16px;">📖 Documents</span></a> &nbsp | &nbsp<a href="https://evalscope.readthedocs.io/zh-cn/latest/"><span style="font-size: 16px;"> 📖 中文文档</span></a>
139
+ <a href="https://evalscope.readthedocs.io/en/latest/">📖 Documents</a>
130
140
  <p>
131
141
 
132
142
 
@@ -140,34 +150,15 @@ English | [简体中文](README_zh.md)
140
150
  - [Offline Evaluation](#offline-evaluation)
141
151
  - [Arena Mode](#arena-mode)
142
152
  - [Model Serving Performance Evaluation](#Model-Serving-Performance-Evaluation)
143
- - [Leaderboard](#leaderboard)
144
-
145
- ## 📝 Introduction
146
-
147
- Large Model (including Large Language Models, Multi-modal Large Language Models) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the EvalScope framework.
148
153
 
149
- ### Framework Features
150
- - **Benchmark Datasets**: Preloaded with several commonly used test benchmarks, including MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
151
- - **Evaluation Metrics**: Implements various commonly used evaluation metrics.
152
- - **Model Access**: A unified model access mechanism that is compatible with the Generate and Chat interfaces of multiple model families.
153
- - **Automated Evaluation**: Includes automatic evaluation of objective questions and complex task evaluation using expert models.
154
- - **Evaluation Reports**: Automatically generates evaluation reports.
155
- - **Arena Mode**: Used for comparisons between models and objective evaluation of models, supporting various evaluation modes, including:
156
- - **Single mode**: Scoring a single model.
157
- - **Pairwise-baseline mode**: Comparing against a baseline model.
158
- - **Pairwise (all) mode**: Pairwise comparison among all models.
159
- - **Visualization Tools**: Provides intuitive displays of evaluation results.
160
- - **Model Performance Evaluation**: Offers a performance testing tool for model inference services and detailed statistics, see [Model Performance Evaluation Documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html).
161
- - **OpenCompass Integration**: Supports OpenCompass as the evaluation backend, providing advanced encapsulation and task simplification, allowing for easier task submission for evaluation.
162
- - **VLMEvalKit Integration**: Supports VLMEvalKit as the evaluation backend, facilitating the initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
163
- - **Full-Link Support**: Through seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, provides a one-stop development process for model training, model deployment, model evaluation, and report viewing, enhancing user development efficiency.
164
154
 
155
+ ## 📝 Introduction
165
156
 
166
- <details><summary>Overall Architecture</summary>
157
+ EvalScope is the official model evaluation and performance benchmarking framework launched by the [ModelScope](https://modelscope.cn/) community. It comes with built-in common benchmarks and evaluation metrics, such as MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, and HumanEval. EvalScope supports various types of model evaluations, including LLMs, multimodal LLMs, embedding models, and reranker models. It is also applicable to multiple evaluation scenarios, such as end-to-end RAG evaluation, arena mode, and model inference performance stress testing. Moreover, with the seamless integration of the ms-swift training framework, evaluations can be initiated with a single click, providing full end-to-end support from model training to evaluation 🚀
167
158
 
168
159
  <p align="center">
169
160
  <img src="docs/en/_static/images/evalscope_framework.png" width="70%">
170
- <br>Fig 1. EvalScope Framework.
161
+ <br>EvalScope Framework.
171
162
  </p>
172
163
 
173
164
  The architecture includes the following modules:
@@ -177,14 +168,17 @@ The architecture includes the following modules:
177
168
  - **Native**: EvalScope’s own **default evaluation framework**, supporting various evaluation modes, including single model evaluation, arena mode, baseline model comparison mode, etc.
178
169
  - **OpenCompass**: Supports [OpenCompass](https://github.com/open-compass/opencompass) as the evaluation backend, providing advanced encapsulation and task simplification, allowing you to submit tasks for evaluation more easily.
179
170
  - **VLMEvalKit**: Supports [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) as the evaluation backend, enabling easy initiation of multi-modal evaluation tasks, supporting various multi-modal models and datasets.
171
+ - **RAGEval**: Supports RAG evaluation, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
180
172
  - **ThirdParty**: Other third-party evaluation tasks, such as ToolBench.
181
173
  4. **Performance Evaluator**: Model performance evaluation, responsible for measuring model inference service performance, including performance testing, stress testing, performance report generation, and visualization.
182
174
  5. **Evaluation Report**: The final generated evaluation report summarizes the model's performance, which can be used for decision-making and further model optimization.
183
175
  6. **Visualization**: Visualization results help users intuitively understand evaluation results, facilitating analysis and comparison of different model performances.
184
- </details>
185
176
 
186
177
 
187
178
  ## 🎉 News
179
+ - 🔥 **[2024.10.31]** The best practice for evaluating Multimodal-RAG has been updated, please check the [📖 Blog](https://evalscope.readthedocs.io/zh-cn/latest/blog/RAG/multimodal_RAG.html#multimodal-rag) for more details.
180
+ - 🔥 **[2024.10.23]** Supports multimodal RAG evaluation, including the assessment of image-text retrieval using [CLIP_Benchmark](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/clip_benchmark.html), and extends [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html) to support end-to-end multimodal metrics evaluation.
181
+ - 🔥 **[2024.10.8]** Support for RAG evaluation, including independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html).
188
182
  - 🔥 **[2024.09.18]** Our documentation has been updated to include a blog module, featuring some technical research and discussions related to evaluations. We invite you to [📖 read it](https://evalscope.readthedocs.io/en/refact_readme/blog/index.html).
189
183
  - 🔥 **[2024.09.12]** Support for LongWriter evaluation, which supports 10,000+ word generation. You can use the benchmark [LongBench-Write](evalscope/third_party/longbench_write/README.md) to measure the long output quality as well as the output length.
190
184
  - 🔥 **[2024.08.30]** Support for custom dataset evaluations, including text datasets and multimodal image-text datasets.
@@ -355,9 +349,10 @@ run_task(task_cfg=your_task_cfg)
355
349
  ## Evaluation Backend
356
350
  EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
357
351
  - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
358
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/opencompass_backend.html)
359
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/vlmevalkit_backend.html)
360
- - **ThirdParty**: The third-party task, e.g. [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html), you can contribute your own evaluation task to EvalScope as third-party backend.
352
+ - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
353
+ - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
354
+ - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
355
+ - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
361
356
 
362
357
  ## Custom Dataset Evaluation
363
358
  EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [📖User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset.html)
@@ -379,13 +374,10 @@ A stress testing tool that focuses on large language models and can be customize
379
374
  Refer to : Model Serving Performance Evaluation [📖 User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test.html)
380
375
 
381
376
 
382
- ## Leaderboard
383
- The LLM Leaderboard aims to provide an objective and comprehensive evaluation standard and platform to help researchers and developers understand and compare the performance of models on various tasks on ModelScope.
384
-
385
- Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
386
-
387
377
 
388
378
  ## TO-DO List
379
+ - [x] RAG evaluation
380
+ - [x] VLM evaluation
389
381
  - [x] Agents evaluation
390
382
  - [x] vLLM
391
383
  - [ ] Distributed evaluating
@@ -397,3 +389,7 @@ Refer to : [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
397
389
  - [ ] Auto-reviewer
398
390
  - [ ] Qwen-max
399
391
 
392
+
393
+ ## Star History
394
+
395
+ [![Star History Chart](https://api.star-history.com/svg?repos=modelscope/evalscope&type=Date)](https://star-history.com/#modelscope/evalscope&Date)
@@ -2,22 +2,53 @@ evalscope/__init__.py,sha256=3eLMMrjkAIAs3vGluXNZn5-xTSbO_vfba9yNPbkVtg8,105
2
2
  evalscope/cache.py,sha256=zpGjL9JMosqjk_dkODVwvIGiUC0WAMmMTHDNJOvBQU8,3288
3
3
  evalscope/config.py,sha256=G_rpSn5Kd1aPlFJO6asnZu5FUggZmwcYdAxxpuq0yDs,6972
4
4
  evalscope/constants.py,sha256=g8lGYlpA4Wk88HwtqId1-jJX_z8Lr2k02gWLsyofyj0,2670
5
- evalscope/run.py,sha256=T-2zoJpBx6YxLnLJH-iFF3UxUGYTU36PMV_DQ9e8tSM,18484
5
+ evalscope/run.py,sha256=uAXtaxIBcR94jyfHGFAecuzn0y71oLgu-d9VOohCJAw,18738
6
6
  evalscope/run_arena.py,sha256=BCWCAiX0BQ9pLMIq08svEcd-IoFr75gFShpV88robIY,8963
7
7
  evalscope/run_ms.py,sha256=UtJoGnah64SXigTawJQWTi_TEGjr7Td0rjCTaO-htL8,6028
8
8
  evalscope/summarizer.py,sha256=rIyML8HpjQxIpXg8KvQ0CzOS6xMS-JHZh6kUZzkaRsk,6640
9
- evalscope/version.py,sha256=auPe2msc57SycidTCg5C82JBZdws4TSukCurfVAMuVg,121
10
- evalscope/backend/__init__.py,sha256=UP_TW5KBq6V_Nvqkeb7PGvGGX3rVYussT43npwCwDgE,135
9
+ evalscope/version.py,sha256=HbExGw191bJuKShYz5RiaxbmdfvIqJQ_bjIjXZhfMDw,121
10
+ evalscope/backend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  evalscope/backend/base.py,sha256=5BLrDNNwxsGp35zorD-kphmN15tlBbkuuqwkz8jWZq0,876
12
12
  evalscope/backend/opencompass/__init__.py,sha256=UP_TW5KBq6V_Nvqkeb7PGvGGX3rVYussT43npwCwDgE,135
13
13
  evalscope/backend/opencompass/api_meta_template.py,sha256=sBW0XbVDOKeJ7mVUDLhmcG4e0yClw3eluazdp_8wtgQ,1753
14
14
  evalscope/backend/opencompass/backend_manager.py,sha256=_eg82FLAVxQ6t5e1OqlyuxZcngqD8rxvI5EijLUh_zI,10294
15
15
  evalscope/backend/opencompass/tasks/__init__.py,sha256=I_ANdxdcIHpkIzIXc1yKOlWwzb4oY0FwTPq1kYtgzQw,50
16
16
  evalscope/backend/opencompass/tasks/eval_api.py,sha256=12lrgDpMzZ1XBRboq5TEOovDPCMDwwGCJoRT78Ox_yo,1108
17
- evalscope/backend/opencompass/tasks/eval_datasets.py,sha256=bYFHkjiwZqh2FVRo1I88xEDZ6nYmZjAgG5ZODbthKFI,5241
17
+ evalscope/backend/opencompass/tasks/eval_datasets.py,sha256=t2t3_dHZf-eMfNqpQaD2XIjWZejTN4AxVXITdj_4Y3o,5324
18
+ evalscope/backend/rag_eval/__init__.py,sha256=8om6TVnTMmyTEQt1jBuUQA4UfIzyps-_-ih90H_Qjio,284
19
+ evalscope/backend/rag_eval/backend_manager.py,sha256=jmO-UMu6_iOXMnl4--PrMWCsnIYEhsbiX017rtURqm0,2997
20
+ evalscope/backend/rag_eval/clip_benchmark/__init__.py,sha256=gDXCiRUTSeGQHxd5SjQsnphMqHJ2si2jywRiHvujEOg,150
21
+ evalscope/backend/rag_eval/clip_benchmark/arguments.py,sha256=VbB7JY4NunV83ewkZrUiM74jTzSETMPcOLlllRs7djA,1537
22
+ evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py,sha256=vaguNflVBC5-0lk1kaU7CLTbkJuBf0hHGIdmoq4Bn8s,8474
23
+ evalscope/backend/rag_eval/clip_benchmark/task_template.py,sha256=asEF_Nt2Xt3DtIS49J9nQKEjTdrcAkYhY4zumCDzSws,3990
24
+ evalscope/backend/rag_eval/clip_benchmark/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
+ evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py,sha256=Bj2ysvM0JT-6T40v0rffeZgJIRht5KVX0GzMOiUphf0,2578
26
+ evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py,sha256=ZrUYDbQ75eo0vmIwXh5Bb9c4nyEwd4AO2oURaIqjIII,7502
27
+ evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py,sha256=Bcs64xece4BMNhxuaFimOwMJnlpjNxfGrdSCWOYItko,5977
28
+ evalscope/backend/rag_eval/cmteb/__init__.py,sha256=ajVz6XP5hqPq-jm66hp2poA2qKj1V19ZGoqjrGUlO7U,279
29
+ evalscope/backend/rag_eval/cmteb/arguments.py,sha256=wZvnVir2tSxYCV_DPR3TSDj4VxtUn3wLhBPqyMJYKno,2330
30
+ evalscope/backend/rag_eval/cmteb/base.py,sha256=fYrIjKwOLwBAHb2rlNkEjYScjZ5Qpyv2LdMmWZYWREA,2830
31
+ evalscope/backend/rag_eval/cmteb/task_template.py,sha256=Clyc8TZCtZrL6MjAw49rh55Xb3hf2y1C3SzLvZsorLE,2646
32
+ evalscope/backend/rag_eval/cmteb/tasks/Classification.py,sha256=7adR40W6Uu58-QR9jCUP4k7TdAnG0oT225v4xHXah2g,10635
33
+ evalscope/backend/rag_eval/cmteb/tasks/Clustering.py,sha256=-oJ9rXy7pgOB7Gyf68TcSlmmAUoBx5hKofcKNuIsCd8,8977
34
+ evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py,sha256=rF6dtrwOfvJoq2Y4myZg9_638M1g06qq0hWCmvxsIo0,2039
35
+ evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py,sha256=2WkaTE-jF8jqsu1UcNDqN8A4567UzW5boD_0B83j-9A,4008
36
+ evalscope/backend/rag_eval/cmteb/tasks/Reranking.py,sha256=C34nDuya8OT3aeMxYCYjUpUtWp7w00jSfIYQSInlNAg,5329
37
+ evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py,sha256=wUxiQH5aOmWNS4YswACyHqBn5xqP5eyvsq6U9WSp5R0,11457
38
+ evalscope/backend/rag_eval/cmteb/tasks/STS.py,sha256=6GMaoCANM-IKYLk4srHOYr_eurav3DGihHMQeJPXR6k,12054
39
+ evalscope/backend/rag_eval/cmteb/tasks/__init__.py,sha256=eBHm_TWeh7WiwpdVBtUlegeXMAxJyVQdUHRhJERobIs,1506
40
+ evalscope/backend/rag_eval/ragas/__init__.py,sha256=-VnStCVy7uHih2uipG_7AD4i2FQ5sVM7_NI-sEZBpRQ,170
41
+ evalscope/backend/rag_eval/ragas/arguments.py,sha256=BriXjcXVk2FqjDNuFYpfBZsUVzrkrYH7egbO9x-jcZ4,1873
42
+ evalscope/backend/rag_eval/ragas/task_template.py,sha256=nv2i9-NE2SXpLrVKo5zhadYYKbDFVXVVA4sfgb4ti4g,1693
43
+ evalscope/backend/rag_eval/ragas/metrics/__init__.py,sha256=HgY5nrcNtWpQ7gBi5lCEJXJVINd_R57dsmI8ldS2rd0,160
44
+ evalscope/backend/rag_eval/ragas/metrics/multi_modal_faithfulness.py,sha256=Uqz5qWZ76Gos95_QlhwncbATXyk0YX4wkI0LiAdPElU,3838
45
+ evalscope/backend/rag_eval/ragas/metrics/multi_modal_relevance.py,sha256=CdLnWHq1eTna6j3F5-pncW5YusxD_v3ScjzeCsZ7mng,3967
46
+ evalscope/backend/rag_eval/ragas/tasks/__init__.py,sha256=WO2xja0g0JSiYGdu2uAEDQgDceuFcgPWwPoqFnwDU0s,172
47
+ evalscope/backend/rag_eval/ragas/tasks/testset_generation.py,sha256=In-2VvZJIZvXl9idGUUQBTb7Gu-o1yFLjaqj-eJkWw0,8437
48
+ evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py,sha256=bXOqik6qKWzbrEz21ykdkqeqqPrmoUIhTwW6eRQXy0M,2222
18
49
  evalscope/backend/vlm_eval_kit/__init__.py,sha256=xTgHM95lWzh4s0W7zxLwYkgUbPAZfAb0UoGGmyyBXrs,83
19
- evalscope/backend/vlm_eval_kit/backend_manager.py,sha256=k52qTUqkp1kJivKn8bVrKoF8cng4xYTQLUmjnH_CWPM,6080
20
- evalscope/backend/vlm_eval_kit/custom_dataset.py,sha256=zC40Jw9bIqcGKuWS9oKPAlQdBARc-zY3sJlSiU-u-sI,1625
50
+ evalscope/backend/vlm_eval_kit/backend_manager.py,sha256=ewhpE9yzsqf5ED6kqsqek2YEgg96GBQOupxtVNhaXxI,6046
51
+ evalscope/backend/vlm_eval_kit/custom_dataset.py,sha256=Yz2A5kB1E8DYBnjuVCA6TTPtLjhg8vYKeJTh6FU_Ecw,1645
21
52
  evalscope/benchmarks/__init__.py,sha256=6TKP35wfKf7R_h870fsEtcIlIAgomKOcukNL9M-5I1Y,162
22
53
  evalscope/benchmarks/benchmark.py,sha256=EmwYyFdrAHBGMkSbsMZQOR_62Q0CSKl8zeLlr7xvJdQ,2159
23
54
  evalscope/benchmarks/data_adapter.py,sha256=eVQvOQYQOQbIl8UlvOEUqRThL3FP3aUD6DSlqF1bqO0,10395
@@ -91,7 +122,7 @@ evalscope/cli/cli.py,sha256=uZ-qC8WBsLd5-Hn94d43sSGg0UC_12RebSD4ToKjypg,844
91
122
  evalscope/cli/start_perf.py,sha256=TL6bMXYl3ln-tfs5uBmzb9x94uxz6f3PBFIt1l7g3VA,994
92
123
  evalscope/cli/start_server.py,sha256=ATGLP2TE0aImJNicpehdzBuFlNb50F7KhyL4A_ZSoGU,3885
93
124
  evalscope/evaluator/__init__.py,sha256=S6MU1O_iiNAaKxNIhO9MEmdW-BSNf_YH2l6NQ9lxVNo,103
94
- evalscope/evaluator/evaluator.py,sha256=gB408byOpu269Psh6MjYC9-a_uv9GvThoT7t07Oqh6w,30712
125
+ evalscope/evaluator/evaluator.py,sha256=eSCgPPDGfIJfKu0cthhbDLFm1xMhj_869iT3ngcQkPc,30817
95
126
  evalscope/evaluator/rating_eval.py,sha256=cJbkyXIuwFUZoe7ZJZM6eUskNd9zlORgndckuon2OQ8,5768
96
127
  evalscope/evaluator/reviewer/__init__.py,sha256=I_ANdxdcIHpkIzIXc1yKOlWwzb4oY0FwTPq1kYtgzQw,50
97
128
  evalscope/evaluator/reviewer/auto_reviewer.py,sha256=JycPYti9h1j_8DRcu_rc5U0wkEASHYg-XBqrUUoiO-Q,17054
@@ -109,7 +140,7 @@ evalscope/models/model_adapter.py,sha256=Cgs68ajRwTETEo1eU-OhFiFGuSx4eS1p7-JT3jO
109
140
  evalscope/models/openai_model.py,sha256=PoQS1FIiWIxp1xBJPV7Bq81LFD9FIT3vAHUvNa22DCc,3452
110
141
  evalscope/models/template.py,sha256=Yk7-QnvjiLD0zchSZcaDSLmpW8onIeFpngSwtUOYVPk,56035
111
142
  evalscope/models/api/__init__.py,sha256=0c75K78O1KaV02BqqtEp-hhtSSClXLawb8E0c2iqN_A,105
112
- evalscope/models/api/openai_api.py,sha256=o-FVJFSvfk5mFJm4egXcKfR5ya1fduo5b-uqTkeRu9A,7871
143
+ evalscope/models/api/openai_api.py,sha256=uBicJPaFLOhIrB5PKI8FE-SItb7v-fuDwBgkgns3CY0,7883
113
144
  evalscope/models/custom/__init__.py,sha256=K4Ewo7Qrs73-jBuPq4ffxd8hMnttKhic-Zj0amH3wiU,103
114
145
  evalscope/models/custom/custom_model.py,sha256=2ivxfGQs5V5HDnQEhTBi5v8KNBxJDbzPVJdNOGo3iSg,1566
115
146
  evalscope/perf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -119,8 +150,8 @@ evalscope/perf/custom_api.py,sha256=H2IgM-LMjqXxVhbrtkXuiREb-p14zwMmllgl26a-jgw,
119
150
  evalscope/perf/dashscope_api.py,sha256=_XUF3czkYdPdVgtP7nqzRxROKxlqDjWs4DQnTyocNvM,3410
120
151
  evalscope/perf/dataset_plugin_base.py,sha256=6veUTyZ38W1Iig65vxNV9SfmqrsR8ID_UHgNiUO9Bv4,1814
121
152
  evalscope/perf/how_to_analysis_result.py,sha256=UVd_aYJ_7N5hl_wK9oIZig1vSwfgzodxW7XC6IWqbdg,1044
122
- evalscope/perf/http_client.py,sha256=WYHuGY_BCeeh8vHi1fm9zrAndOKpVQp4h21j1kKnM64,34535
123
- evalscope/perf/openai_api.py,sha256=XrH6jg8VlO9Wu0vGwZna_bHq65XMAlCfCEyqMjs8w1c,5970
153
+ evalscope/perf/http_client.py,sha256=4ppaZAIwrajJ9nzdgdwc3EdjmGSJz1_dg7Q6wQYELgw,34537
154
+ evalscope/perf/openai_api.py,sha256=rJSGlXtnHgMNYcgO0bJQCsSLhKChUxklTk4cI63YTMQ,6066
124
155
  evalscope/perf/plugin_registry.py,sha256=D2MG2AXDBScjuKxB4g_Hg026pSRO752dBimonYtaAzM,782
125
156
  evalscope/perf/query_parameters.py,sha256=HfGRZJSzRMVfPezWTvbWhYeprCetGNPX_M_paoDtuOY,1346
126
157
  evalscope/perf/server_sent_event.py,sha256=s2UqUr1qAMWzBG1XWCFxhulyztd6FM0tGqVvPC8jD5o,1153
@@ -169,12 +200,12 @@ evalscope/tools/rewrite_eval_results.py,sha256=ZVi2hVjiTOmR_O5IaLv6qnQNpMz6FnDb9
169
200
  evalscope/utils/__init__.py,sha256=6RjACRYUSpGj6fkZ7NzYpl0lFppQCp9KVn5ktZe626s,128
170
201
  evalscope/utils/arena_utils.py,sha256=RMkymUv9Cxs37arUntzgDY5P0Dand2jGpsb7uy6wZmg,7670
171
202
  evalscope/utils/completion_parsers.py,sha256=61l8CTh1VxHgRoMDhtznpAhuJp47MssGgS-LdEe_h80,2997
172
- evalscope/utils/logger.py,sha256=Ycd0W17Z_oiByPuPX3_umNrOCHjT9O_e_Kws7ZWUSvU,1855
203
+ evalscope/utils/logger.py,sha256=cf3U400Mx1speMMNXorjwEE8noDz5Mbd-9PNgaulGeY,3013
173
204
  evalscope/utils/task_cfg_parser.py,sha256=LiNQ2X8lbZU0cODpaY_PbKyUhNoxZIC495UsLJigX64,138
174
- evalscope/utils/task_utils.py,sha256=Mv_u_f4Z91zcUeko6acZCmnOAPRfk61kf_dliLzG5Yk,459
205
+ evalscope/utils/task_utils.py,sha256=IMtBSBUp3H95Ko0vn8Q55Wmz2SFZXSfjVy49tyomL_g,537
175
206
  evalscope/utils/utils.py,sha256=zHo9hfxGBUVKE2xNMR7lDoEvfRnk4V4946DEfXQhlq4,20509
176
- evalscope-0.5.5rc1.dist-info/METADATA,sha256=yKE-108cG8WZFEHoDyY2FoNBcFIf4BgHkwTOnNhEc7w,20708
177
- evalscope-0.5.5rc1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
178
- evalscope-0.5.5rc1.dist-info/entry_points.txt,sha256=Qr4oTgGhg_K-iUtKwVH6lWUhFHDUiH9trIqydHGTEug,56
179
- evalscope-0.5.5rc1.dist-info/top_level.txt,sha256=jNR-HMn3TR8Atolq7_4rW8IWVX6GhvYV5_1Y_KbJKlY,10
180
- evalscope-0.5.5rc1.dist-info/RECORD,,
207
+ evalscope-0.6.0rc0.dist-info/METADATA,sha256=w2k8y1h3gVVNAI7Ey-mc4RWsaSjNBlokuu0hw4e-3aI,21242
208
+ evalscope-0.6.0rc0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
209
+ evalscope-0.6.0rc0.dist-info/entry_points.txt,sha256=Qr4oTgGhg_K-iUtKwVH6lWUhFHDUiH9trIqydHGTEug,56
210
+ evalscope-0.6.0rc0.dist-info/top_level.txt,sha256=jNR-HMn3TR8Atolq7_4rW8IWVX6GhvYV5_1Y_KbJKlY,10
211
+ evalscope-0.6.0rc0.dist-info/RECORD,,