evalscope 0.5.5__py3-none-any.whl → 0.5.5rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (30) hide show
  1. evalscope/backend/__init__.py +3 -0
  2. evalscope/backend/vlm_eval_kit/backend_manager.py +1 -0
  3. evalscope/backend/vlm_eval_kit/custom_dataset.py +1 -1
  4. evalscope/evaluator/evaluator.py +0 -1
  5. evalscope/metrics/bundled_rouge_score/rouge_scorer.py +0 -19
  6. evalscope/run.py +0 -4
  7. evalscope/utils/logger.py +14 -44
  8. evalscope/utils/task_utils.py +0 -3
  9. evalscope/version.py +2 -2
  10. {evalscope-0.5.5.dist-info → evalscope-0.5.5rc0.dist-info}/METADATA +30 -24
  11. {evalscope-0.5.5.dist-info → evalscope-0.5.5rc0.dist-info}/RECORD +14 -30
  12. evalscope/backend/rag_eval/__init__.py +0 -3
  13. evalscope/backend/rag_eval/backend_manager.py +0 -68
  14. evalscope/backend/rag_eval/cmteb/__init__.py +0 -4
  15. evalscope/backend/rag_eval/cmteb/arguments.py +0 -59
  16. evalscope/backend/rag_eval/cmteb/base.py +0 -89
  17. evalscope/backend/rag_eval/cmteb/task_template.py +0 -83
  18. evalscope/backend/rag_eval/cmteb/tasks/Classification.py +0 -302
  19. evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +0 -252
  20. evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +0 -113
  21. evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +0 -153
  22. evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +0 -345
  23. evalscope/backend/rag_eval/cmteb/tasks/STS.py +0 -302
  24. evalscope/backend/rag_eval/cmteb/tasks/__init__.py +0 -64
  25. evalscope/backend/rag_eval/ragas/__init__.py +0 -2
  26. evalscope/backend/rag_eval/ragas/arguments.py +0 -37
  27. evalscope/backend/rag_eval/ragas/task_template.py +0 -117
  28. {evalscope-0.5.5.dist-info → evalscope-0.5.5rc0.dist-info}/WHEEL +0 -0
  29. {evalscope-0.5.5.dist-info → evalscope-0.5.5rc0.dist-info}/entry_points.txt +0 -0
  30. {evalscope-0.5.5.dist-info → evalscope-0.5.5rc0.dist-info}/top_level.txt +0 -0
@@ -1,89 +0,0 @@
1
- from collections import defaultdict
2
- from typing import List
3
- from mteb import AbsTask
4
- from datasets import DatasetDict
5
- from modelscope import MsDataset
6
- import datasets
7
- from evalscope.backend.rag_eval.cmteb.tasks import CLS_DICT, CLS_RETRIEVAL
8
-
9
- __all__ = ["TaskBase"]
10
-
11
-
12
-
13
- class TaskBase:
14
-
15
- @staticmethod
16
- def get_tasks(task_names, **kwargs) -> List[AbsTask]:
17
-
18
- return [TaskBase.get_task(task_name, **kwargs) for task_name in task_names]
19
-
20
- @staticmethod
21
- def get_task(task_name, **kwargs) -> AbsTask:
22
-
23
- if task_name not in CLS_DICT:
24
- from mteb.overview import TASKS_REGISTRY
25
-
26
- task_cls = TASKS_REGISTRY[task_name]
27
- if task_cls.metadata.type != "Retrieval":
28
- task_cls.load_data = load_data
29
- else:
30
- task_cls = CLS_DICT[task_name]
31
- task_cls.load_data = load_data
32
- # init task instance
33
- task_instance = task_cls()
34
- return task_instance
35
-
36
-
37
- def load_data(self, **kwargs):
38
- """Load dataset from the hub, compatible with ModelScope and Hugging Face."""
39
- if self.data_loaded:
40
- return
41
-
42
- limits = kwargs.get("limits", None)
43
- hub = kwargs.get("hub", "modelscope")
44
- name = self.metadata_dict.get("name")
45
- path = self.metadata_dict["dataset"].get("path")
46
-
47
- assert path is not None, "Path must be specified in dataset"
48
-
49
- # Loading the dataset based on the source hub
50
- if hub == "modelscope":
51
- import re
52
-
53
- path = re.sub(r"^mteb/", "MTEB/", path)
54
- dataset = MsDataset.load(path)
55
- else:
56
- dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) # type: ignore
57
-
58
- if limits is not None:
59
- dataset = {
60
- split: dataset[split].select(range(min(limits, len(dataset[split]))))
61
- for split in dataset.keys()
62
- }
63
-
64
- if name in CLS_RETRIEVAL:
65
- self.corpus, self.queries, self.relevant_docs = load_retrieval_data(
66
- dataset,
67
- path,
68
- self.metadata_dict["eval_splits"],
69
- )
70
-
71
- self.dataset = dataset
72
- self.dataset_transform()
73
- self.data_loaded = True
74
-
75
-
76
- def load_retrieval_data(dataset, dataset_name: str, eval_splits: list) -> tuple:
77
- eval_split = eval_splits[0]
78
- qrels = MsDataset.load(dataset_name + "-qrels")[eval_split]
79
-
80
- corpus = {e["id"]: {"text": e["text"]} for e in dataset["corpus"]}
81
- queries = {e["id"]: e["text"] for e in dataset["queries"]}
82
- relevant_docs = defaultdict(dict)
83
- for e in qrels:
84
- relevant_docs[e["qid"]][e["pid"]] = e["score"]
85
-
86
- corpus = DatasetDict({eval_split: corpus})
87
- queries = DatasetDict({eval_split: queries})
88
- relevant_docs = DatasetDict({eval_split: relevant_docs})
89
- return corpus, queries, relevant_docs
@@ -1,83 +0,0 @@
1
- import os
2
- import mteb
3
- from evalscope.backend.rag_eval import EmbeddingModel
4
- from evalscope.backend.rag_eval import cmteb
5
- from mteb.task_selection import results_to_dataframe
6
- from evalscope.utils.logger import get_logger
7
-
8
- logger = get_logger()
9
-
10
-
11
- def show_results(output_folder, model, results):
12
- model_name = model.mteb_model_meta.model_name_as_path()
13
- revision = model.mteb_model_meta.revision
14
-
15
- results_df = results_to_dataframe({model_name: {revision: results}})
16
-
17
- save_path = os.path.join(
18
- output_folder,
19
- model_name,
20
- revision,
21
- )
22
- logger.info(f"Evaluation results:\n{results_df.to_markdown()}")
23
- logger.info(f"Evaluation results saved in {os.path.abspath(save_path)}")
24
-
25
-
26
- def one_stage_eval(
27
- model_args,
28
- eval_args,
29
- ) -> None:
30
- # load model
31
- model = EmbeddingModel.load(**model_args)
32
-
33
- # load task first to update instructions
34
- tasks = cmteb.TaskBase.get_tasks(task_names=eval_args["tasks"])
35
- evaluation = mteb.MTEB(tasks=tasks)
36
-
37
- # run evaluation
38
- results = evaluation.run(model, **eval_args)
39
-
40
- # save and log results
41
- show_results(eval_args["output_folder"], model, results)
42
-
43
-
44
- def two_stage_eval(
45
- model1_args,
46
- model2_args,
47
- eval_args,
48
- ) -> None:
49
- """a two-stage run with the second stage reading results saved from the first stage."""
50
- # load model
51
- dual_encoder = EmbeddingModel.load(**model1_args)
52
- cross_encoder = EmbeddingModel.load(**model2_args)
53
-
54
- first_stage_path = f"{eval_args['output_folder']}/stage1"
55
- second_stage_path = f"{eval_args['output_folder']}/stage2"
56
-
57
- tasks = cmteb.TaskBase.get_tasks(task_names=eval_args["tasks"])
58
- for task in tasks:
59
- evaluation = mteb.MTEB(tasks=[task])
60
-
61
- # stage 1: run dual encoder
62
- evaluation.run(
63
- dual_encoder,
64
- save_predictions=True,
65
- output_folder=first_stage_path,
66
- overwrite_results=True,
67
- hub=eval_args["hub"],
68
- limits=eval_args["limits"],
69
- )
70
- # stage 2: run cross encoder
71
- results = evaluation.run(
72
- cross_encoder,
73
- top_k=eval_args["top_k"],
74
- save_predictions=True,
75
- output_folder=second_stage_path,
76
- previous_results=f"{first_stage_path}/{task.metadata.name}_default_predictions.json",
77
- overwrite_results=True,
78
- hub=eval_args["hub"],
79
- limits=eval_args["limits"],
80
- )
81
-
82
- # save and log results
83
- show_results(second_stage_path, cross_encoder, results)
@@ -1,302 +0,0 @@
1
- from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
2
- from mteb.abstasks.TaskMetadata import TaskMetadata
3
-
4
-
5
- class TNews(AbsTaskClassification):
6
- metadata = TaskMetadata(
7
- name="TNews",
8
- description="Short Text Classification for News",
9
- reference="https://www.cluebenchmarks.com/introduce.html",
10
- dataset={
11
- "path": "C-MTEB/TNews-classification",
12
- "revision": "317f262bf1e6126357bbe89e875451e4b0938fe4",
13
- },
14
- type="Classification",
15
- category="s2s",
16
- modalities=["text"],
17
- eval_splits=["validation"],
18
- eval_langs=["cmn-Hans"],
19
- main_score="accuracy",
20
- date=None,
21
- domains=None,
22
- task_subtypes=None,
23
- license=None,
24
- annotations_creators=None,
25
- dialect=None,
26
- sample_creation=None,
27
- bibtex_citation="""@inproceedings {xu-etal-2020-clue,
28
- title = "{CLUE}: A {C}hinese Language Understanding Evaluation Benchmark",
29
- author = "Xu, Liang and
30
- Hu, Hai and
31
- Zhang, Xuanwei and
32
- Li, Lu and
33
- Cao, Chenjie and
34
- Li, Yudong and
35
- Xu, Yechen and
36
- Sun, Kai and
37
- Yu, Dian and
38
- Yu, Cong and
39
- Tian, Yin and
40
- Dong, Qianqian and
41
- Liu, Weitang and
42
- Shi, Bo and
43
- Cui, Yiming and
44
- Li, Junyi and
45
- Zeng, Jun and
46
- Wang, Rongzhao and
47
- Xie, Weijian and
48
- Li, Yanting and
49
- Patterson, Yina and
50
- Tian, Zuoyu and
51
- Zhang, Yiwen and
52
- Zhou, He and
53
- Liu, Shaoweihua and
54
- Zhao, Zhe and
55
- Zhao, Qipeng and
56
- Yue, Cong and
57
- Zhang, Xinrui and
58
- Yang, Zhengliang and
59
- Richardson, Kyle and
60
- Lan, Zhenzhong ",
61
- booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
62
- month = dec,
63
- year = "2020",
64
- address = "Barcelona, Spain (Online)",
65
- publisher = "International Committee on Computational Linguistics",
66
- url = "https://aclanthology.org/2020.coling-main.419",
67
- doi = "10.18653/v1/2020.coling-main.419",
68
- pages = "4762--4772",
69
- }""",
70
- descriptive_stats={"n_samples": None, "avg_character_length": None},
71
- )
72
-
73
- @property
74
- def metadata_dict(self) -> dict[str, str]:
75
- metadata_dict = super().metadata_dict
76
- metadata_dict["samples_per_label"] = 32
77
- return metadata_dict
78
-
79
-
80
- class IFlyTek(AbsTaskClassification):
81
- metadata = TaskMetadata(
82
- name="IFlyTek",
83
- description="Long Text classification for the description of Apps",
84
- reference="https://www.cluebenchmarks.com/introduce.html",
85
- dataset={
86
- "path": "C-MTEB/IFlyTek-classification",
87
- "revision": "421605374b29664c5fc098418fe20ada9bd55f8a",
88
- },
89
- type="Classification",
90
- category="s2s",
91
- modalities=["text"],
92
- eval_splits=["validation"],
93
- eval_langs=["cmn-Hans"],
94
- main_score="accuracy",
95
- date=None,
96
- domains=None,
97
- task_subtypes=None,
98
- license=None,
99
- annotations_creators=None,
100
- dialect=None,
101
- sample_creation=None,
102
- bibtex_citation="""@inproceedings {xu-etal-2020-clue,
103
- title = "{CLUE}: A {C}hinese Language Understanding Evaluation Benchmark",
104
- author = "Xu, Liang and
105
- Hu, Hai and
106
- Zhang, Xuanwei and
107
- Li, Lu and
108
- Cao, Chenjie and
109
- Li, Yudong and
110
- Xu, Yechen and
111
- Sun, Kai and
112
- Yu, Dian and
113
- Yu, Cong and
114
- Tian, Yin and
115
- Dong, Qianqian and
116
- Liu, Weitang and
117
- Shi, Bo and
118
- Cui, Yiming and
119
- Li, Junyi and
120
- Zeng, Jun and
121
- Wang, Rongzhao and
122
- Xie, Weijian and
123
- Li, Yanting and
124
- Patterson, Yina and
125
- Tian, Zuoyu and
126
- Zhang, Yiwen and
127
- Zhou, He and
128
- Liu, Shaoweihua and
129
- Zhao, Zhe and
130
- Zhao, Qipeng and
131
- Yue, Cong and
132
- Zhang, Xinrui and
133
- Yang, Zhengliang and
134
- Richardson, Kyle and
135
- Lan, Zhenzhong ",
136
- booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
137
- month = dec,
138
- year = "2020",
139
- address = "Barcelona, Spain (Online)",
140
- publisher = "International Committee on Computational Linguistics",
141
- url = "https://aclanthology.org/2020.coling-main.419",
142
- doi = "10.18653/v1/2020.coling-main.419",
143
- pages = "4762--4772",
144
- abstract = "The advent of natural language understanding (NLU) benchmarks for English, such as GLUE and SuperGLUE allows new NLU models to be evaluated across a diverse set of tasks. These comprehensive benchmarks have facilitated a broad range of research and applications in natural language processing (NLP). The problem, however, is that most such benchmarks are limited to English, which has made it difficult to replicate many of the successes in English NLU for other languages. To help remedy this issue, we introduce the first large-scale Chinese Language Understanding Evaluation (CLUE) benchmark. CLUE is an open-ended, community-driven project that brings together 9 tasks spanning several well-established single-sentence/sentence-pair classification tasks, as well as machine reading comprehension, all on original Chinese text. To establish results on these tasks, we report scores using an exhaustive set of current state-of-the-art pre-trained Chinese models (9 in total). We also introduce a number of supplementary datasets and additional tools to help facilitate further progress on Chinese NLU. Our benchmark is released at https://www.cluebenchmarks.com",
145
- }""",
146
- descriptive_stats={"n_samples": None, "avg_character_length": None},
147
- )
148
-
149
- @property
150
- def metadata_dict(self) -> dict[str, str]:
151
- metadata_dict = super().metadata_dict
152
- metadata_dict["samples_per_label"] = 32
153
- metadata_dict["n_experiments"] = 5
154
- return metadata_dict
155
-
156
-
157
- class MultilingualSentiment(AbsTaskClassification):
158
- metadata = TaskMetadata(
159
- name="MultilingualSentiment",
160
- description="A collection of multilingual sentiments datasets grouped into 3 classes -- positive, neutral, negative",
161
- reference="https://github.com/tyqiangz/multilingual-sentiment-datasets",
162
- dataset={
163
- "path": "C-MTEB/MultilingualSentiment-classification",
164
- "revision": "46958b007a63fdbf239b7672c25d0bea67b5ea1a",
165
- },
166
- type="Classification",
167
- category="s2s",
168
- modalities=["text"],
169
- eval_splits=["validation", "test"],
170
- eval_langs=["cmn-Hans"],
171
- main_score="accuracy",
172
- date=None,
173
- domains=None,
174
- task_subtypes=None,
175
- license=None,
176
- annotations_creators=None,
177
- dialect=None,
178
- sample_creation=None,
179
- bibtex_citation=None,
180
- descriptive_stats={"n_samples": None, "avg_character_length": None},
181
- )
182
-
183
- @property
184
- def metadata_dict(self) -> dict[str, str]:
185
- metadata_dict = super().metadata_dict
186
- metadata_dict["samples_per_label"] = 32
187
- return metadata_dict
188
-
189
-
190
- class JDReview(AbsTaskClassification):
191
- metadata = TaskMetadata(
192
- name="JDReview",
193
- description="review for iphone",
194
- reference="https://aclanthology.org/2023.nodalida-1.20/",
195
- dataset={
196
- "path": "C-MTEB/JDReview-classification",
197
- "revision": "b7c64bd89eb87f8ded463478346f76731f07bf8b",
198
- },
199
- type="Classification",
200
- category="s2s",
201
- modalities=["text"],
202
- eval_splits=["test"],
203
- eval_langs=["cmn-Hans"],
204
- main_score="accuracy",
205
- date=None,
206
- domains=None,
207
- task_subtypes=None,
208
- license=None,
209
- annotations_creators=None,
210
- dialect=None,
211
- sample_creation=None,
212
- bibtex_citation="""@article{xiao2023c,
213
- title={C-pack: Packaged resources to advance general chinese embedding},
214
- author={Xiao, Shitao and Liu, Zheng and Zhang, Peitian and Muennighof, Niklas},
215
- journal={arXiv preprint arXiv:2309.07597},
216
- year={2023}
217
- }""",
218
- descriptive_stats={"n_samples": None, "avg_character_length": None},
219
- )
220
-
221
- @property
222
- def metadata_dict(self) -> dict[str, str]:
223
- metadata_dict = super().metadata_dict
224
- metadata_dict["samples_per_label"] = 32
225
- return metadata_dict
226
-
227
-
228
- class OnlineShopping(AbsTaskClassification):
229
- metadata = TaskMetadata(
230
- name="OnlineShopping",
231
- description="Sentiment Analysis of User Reviews on Online Shopping Websites",
232
- reference="https://aclanthology.org/2023.nodalida-1.20/",
233
- dataset={
234
- "path": "C-MTEB/OnlineShopping-classification",
235
- "revision": "e610f2ebd179a8fda30ae534c3878750a96db120",
236
- },
237
- type="Classification",
238
- category="s2s",
239
- modalities=["text"],
240
- eval_splits=["test"],
241
- eval_langs=["cmn-Hans"],
242
- main_score="accuracy",
243
- date=None,
244
- domains=None,
245
- task_subtypes=None,
246
- license=None,
247
- annotations_creators=None,
248
- dialect=None,
249
- sample_creation=None,
250
- bibtex_citation="""@article{xiao2023c,
251
- title={C-pack: Packaged resources to advance general chinese embedding},
252
- author={Xiao, Shitao and Liu, Zheng and Zhang, Peitian and Muennighof, Niklas},
253
- journal={arXiv preprint arXiv:2309.07597},
254
- year={2023}
255
- }""",
256
- descriptive_stats={"n_samples": None, "avg_character_length": None},
257
- )
258
-
259
- @property
260
- def metadata_dict(self) -> dict[str, str]:
261
- metadata_dict = super().metadata_dict
262
- metadata_dict["samples_per_label"] = 32
263
- return metadata_dict
264
-
265
-
266
- class Waimai(AbsTaskClassification):
267
- metadata = TaskMetadata(
268
- name="Waimai",
269
- description="Sentiment Analysis of user reviews on takeaway platforms",
270
- reference="https://aclanthology.org/2023.nodalida-1.20/",
271
- dataset={
272
- "path": "C-MTEB/waimai-classification",
273
- "revision": "339287def212450dcaa9df8c22bf93e9980c7023",
274
- },
275
- type="Classification",
276
- category="s2s",
277
- modalities=["text"],
278
- eval_splits=["test"],
279
- eval_langs=["cmn-Hans"],
280
- main_score="accuracy",
281
- date=None,
282
- domains=None,
283
- task_subtypes=None,
284
- license=None,
285
- annotations_creators=None,
286
- dialect=None,
287
- sample_creation=None,
288
- bibtex_citation="""@article{xiao2023c,
289
- title={C-pack: Packaged resources to advance general chinese embedding},
290
- author={Xiao, Shitao and Liu, Zheng and Zhang, Peitian and Muennighof, Niklas},
291
- journal={arXiv preprint arXiv:2309.07597},
292
- year={2023}
293
- }""",
294
- descriptive_stats={"n_samples": None, "avg_character_length": None},
295
- )
296
-
297
- @property
298
- def metadata_dict(self) -> dict[str, str]:
299
- metadata_dict = super().metadata_dict
300
- metadata_dict["samples_per_label"] = 32
301
-
302
- return metadata_dict