evalscope 0.5.4__py3-none-any.whl → 0.5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/backend/__init__.py +0 -3
- evalscope/backend/opencompass/tasks/eval_datasets.py +2 -2
- evalscope/backend/rag_eval/__init__.py +3 -0
- evalscope/backend/rag_eval/backend_manager.py +68 -0
- evalscope/backend/rag_eval/cmteb/__init__.py +4 -0
- evalscope/backend/rag_eval/cmteb/arguments.py +59 -0
- evalscope/backend/rag_eval/cmteb/base.py +89 -0
- evalscope/backend/rag_eval/cmteb/task_template.py +83 -0
- evalscope/backend/rag_eval/cmteb/tasks/Classification.py +302 -0
- evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +252 -0
- evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +113 -0
- evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +153 -0
- evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +345 -0
- evalscope/backend/rag_eval/cmteb/tasks/STS.py +302 -0
- evalscope/backend/rag_eval/cmteb/tasks/__init__.py +64 -0
- evalscope/backend/rag_eval/ragas/__init__.py +2 -0
- evalscope/backend/rag_eval/ragas/arguments.py +37 -0
- evalscope/backend/rag_eval/ragas/task_template.py +117 -0
- evalscope/backend/vlm_eval_kit/backend_manager.py +0 -1
- evalscope/backend/vlm_eval_kit/custom_dataset.py +1 -1
- evalscope/evaluator/evaluator.py +1 -0
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +19 -0
- evalscope/run.py +4 -0
- evalscope/utils/logger.py +44 -14
- evalscope/utils/task_utils.py +3 -0
- evalscope/version.py +2 -2
- {evalscope-0.5.4.dist-info → evalscope-0.5.5.dist-info}/METADATA +26 -32
- {evalscope-0.5.4.dist-info → evalscope-0.5.5.dist-info}/RECORD +31 -15
- {evalscope-0.5.4.dist-info → evalscope-0.5.5.dist-info}/WHEEL +0 -0
- {evalscope-0.5.4.dist-info → evalscope-0.5.5.dist-info}/entry_points.txt +0 -0
- {evalscope-0.5.4.dist-info → evalscope-0.5.5.dist-info}/top_level.txt +0 -0
evalscope/backend/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ with read_base():
|
|
|
7
7
|
from opencompass.configs.datasets.agieval.agieval_gen_64afd3 import agieval_datasets
|
|
8
8
|
from opencompass.configs.datasets.GaokaoBench.GaokaoBench_gen_5cfe9e import GaokaoBench_datasets
|
|
9
9
|
from opencompass.configs.datasets.humaneval.humaneval_gen_8e312c import humaneval_datasets
|
|
10
|
-
from opencompass.configs.datasets.mbpp.
|
|
10
|
+
from opencompass.configs.datasets.mbpp.mbpp_gen_830460 import mbpp_datasets
|
|
11
11
|
from opencompass.configs.datasets.CLUE_C3.CLUE_C3_gen_8c358f import C3_datasets
|
|
12
12
|
from opencompass.configs.datasets.CLUE_CMRC.CLUE_CMRC_gen_1bd3c8 import CMRC_datasets
|
|
13
13
|
from opencompass.configs.datasets.CLUE_DRCD.CLUE_DRCD_gen_1bd3c8 import DRCD_datasets
|
|
@@ -45,7 +45,7 @@ with read_base():
|
|
|
45
45
|
from opencompass.configs.datasets.piqa.piqa_gen_1194eb import piqa_datasets
|
|
46
46
|
from opencompass.configs.datasets.siqa.siqa_gen_e78df3 import siqa_datasets
|
|
47
47
|
from opencompass.configs.datasets.strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets
|
|
48
|
-
from opencompass.configs.datasets.winogrande.
|
|
48
|
+
from opencompass.configs.datasets.winogrande.winogrande_gen_458220 import winogrande_datasets
|
|
49
49
|
from opencompass.configs.datasets.obqa.obqa_gen_9069e4 import obqa_datasets
|
|
50
50
|
from opencompass.configs.datasets.nq.nq_gen_c788f6 import nq_datasets
|
|
51
51
|
from opencompass.configs.datasets.triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional, Union
|
|
3
|
+
from evalscope.utils import is_module_installed, get_valid_list
|
|
4
|
+
from evalscope.backend.base import BackendManager
|
|
5
|
+
from evalscope.utils.logger import get_logger
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
logger = get_logger()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class RAGEvalBackendManager(BackendManager):
|
|
12
|
+
def __init__(self, config: Union[str, dict], **kwargs):
|
|
13
|
+
"""BackendManager for VLM Evaluation Kit
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
config (Union[str, dict]): the configuration yaml-file or the configuration dictionary
|
|
17
|
+
"""
|
|
18
|
+
super().__init__(config, **kwargs)
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def _check_env(module_name: str):
|
|
22
|
+
if is_module_installed(module_name):
|
|
23
|
+
logger.info(f"Check `{module_name}` Installed")
|
|
24
|
+
else:
|
|
25
|
+
logger.error(f"Please install `{module_name}` first")
|
|
26
|
+
|
|
27
|
+
def run_mteb(self):
|
|
28
|
+
from evalscope.backend.rag_eval.cmteb import ModelArguments, EvalArguments
|
|
29
|
+
from evalscope.backend.rag_eval.cmteb import one_stage_eval, two_stage_eval
|
|
30
|
+
|
|
31
|
+
if len(self.model_args) > 2:
|
|
32
|
+
raise ValueError("Not support multiple models yet")
|
|
33
|
+
|
|
34
|
+
# Convert arguments to dictionary
|
|
35
|
+
model_args_list = [ModelArguments(**args).to_dict() for args in self.model_args]
|
|
36
|
+
eval_args = EvalArguments(**self.eval_args).to_dict()
|
|
37
|
+
|
|
38
|
+
if len(model_args_list) == 1:
|
|
39
|
+
one_stage_eval(model_args_list[0], eval_args)
|
|
40
|
+
else: # len(model_args_list) == 2
|
|
41
|
+
two_stage_eval(model_args_list[0], model_args_list[1], eval_args)
|
|
42
|
+
|
|
43
|
+
def run_ragas(self):
|
|
44
|
+
from evalscope.backend.rag_eval.ragas import rag_eval, testset_generation
|
|
45
|
+
from evalscope.backend.rag_eval.ragas import (
|
|
46
|
+
TestsetGenerationArguments,
|
|
47
|
+
EvaluationArguments,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
if self.testset_args is not None:
|
|
51
|
+
testset_generation(TestsetGenerationArguments(**self.testset_args))
|
|
52
|
+
if self.eval_args is not None:
|
|
53
|
+
rag_eval(EvaluationArguments(**self.eval_args))
|
|
54
|
+
|
|
55
|
+
def run(self, *args, **kwargs):
|
|
56
|
+
tool = self.config_d.pop("tool")
|
|
57
|
+
if tool.lower() == "mteb":
|
|
58
|
+
self._check_env("mteb")
|
|
59
|
+
self.model_args = self.config_d["model"]
|
|
60
|
+
self.eval_args = self.config_d["eval"]
|
|
61
|
+
self.run_mteb()
|
|
62
|
+
elif tool.lower() == "ragas":
|
|
63
|
+
self._check_env("ragas")
|
|
64
|
+
self.testset_args = self.config_d.get("testset_generation", None)
|
|
65
|
+
self.eval_args = self.config_d.get("eval", None)
|
|
66
|
+
self.run_ragas()
|
|
67
|
+
else:
|
|
68
|
+
raise ValueError(f"Unknown tool: {tool}")
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
from evalscope.backend.rag_eval.cmteb.tasks import *
|
|
2
|
+
from evalscope.backend.rag_eval.cmteb.base import *
|
|
3
|
+
from evalscope.backend.rag_eval.cmteb.arguments import ModelArguments, EvalArguments
|
|
4
|
+
from evalscope.backend.rag_eval.cmteb.task_template import one_stage_eval, two_stage_eval
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import List, Optional, Union, Dict, Any
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass
|
|
6
|
+
class ModelArguments:
|
|
7
|
+
# Arguments for embeding model: sentence transformer or cross encoder
|
|
8
|
+
model_name_or_path: str = "" # model name or path
|
|
9
|
+
is_cross_encoder: bool = False # whether the model is a cross encoder
|
|
10
|
+
# pooling mode: Either “cls”, “lasttoken”, “max”, “mean”, “mean_sqrt_len_tokens”, or “weightedmean”.
|
|
11
|
+
pooling_mode: Optional[str] = None
|
|
12
|
+
max_seq_length: int = 512 # max sequence length
|
|
13
|
+
# prompt for llm based model
|
|
14
|
+
prompt: str = ""
|
|
15
|
+
# model kwargs
|
|
16
|
+
model_kwargs: dict = field(default_factory=lambda: {"torch_dtype": "auto"})
|
|
17
|
+
# config kwargs
|
|
18
|
+
config_kwargs: Dict[str, Any] = field(default_factory=dict)
|
|
19
|
+
# encode kwargs
|
|
20
|
+
encode_kwargs: dict = field(
|
|
21
|
+
default_factory=lambda: {"show_progress_bar": True, "batch_size": 32}
|
|
22
|
+
)
|
|
23
|
+
hub: str = "modelscope" # modelscope or huggingface
|
|
24
|
+
|
|
25
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
26
|
+
return {
|
|
27
|
+
"model_name_or_path": self.model_name_or_path,
|
|
28
|
+
"is_cross_encoder": self.is_cross_encoder,
|
|
29
|
+
"pooling_mode": self.pooling_mode,
|
|
30
|
+
"max_seq_length": self.max_seq_length,
|
|
31
|
+
"prompt": self.prompt,
|
|
32
|
+
"model_kwargs": self.model_kwargs,
|
|
33
|
+
"config_kwargs": self.config_kwargs,
|
|
34
|
+
"encode_kwargs": self.encode_kwargs,
|
|
35
|
+
"hub": self.hub,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class EvalArguments:
|
|
41
|
+
# Evaluation
|
|
42
|
+
tasks: List[str] = field(default_factory=list) # task names
|
|
43
|
+
verbosity: int = 2 # verbosity level 0-3
|
|
44
|
+
output_folder: str = "outputs" # output folder
|
|
45
|
+
overwrite_results: bool = True # overwrite results
|
|
46
|
+
limits: Optional[int] = None # limit number of samples
|
|
47
|
+
hub: str = "modelscope" # modelscope or huggingface
|
|
48
|
+
top_k: int = 5
|
|
49
|
+
|
|
50
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
51
|
+
return {
|
|
52
|
+
"tasks": self.tasks,
|
|
53
|
+
"verbosity": self.verbosity,
|
|
54
|
+
"output_folder": self.output_folder,
|
|
55
|
+
"overwrite_results": self.overwrite_results,
|
|
56
|
+
"limits": self.limits,
|
|
57
|
+
"hub": self.hub,
|
|
58
|
+
"top_k": 5,
|
|
59
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
from typing import List
|
|
3
|
+
from mteb import AbsTask
|
|
4
|
+
from datasets import DatasetDict
|
|
5
|
+
from modelscope import MsDataset
|
|
6
|
+
import datasets
|
|
7
|
+
from evalscope.backend.rag_eval.cmteb.tasks import CLS_DICT, CLS_RETRIEVAL
|
|
8
|
+
|
|
9
|
+
__all__ = ["TaskBase"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TaskBase:
|
|
14
|
+
|
|
15
|
+
@staticmethod
|
|
16
|
+
def get_tasks(task_names, **kwargs) -> List[AbsTask]:
|
|
17
|
+
|
|
18
|
+
return [TaskBase.get_task(task_name, **kwargs) for task_name in task_names]
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def get_task(task_name, **kwargs) -> AbsTask:
|
|
22
|
+
|
|
23
|
+
if task_name not in CLS_DICT:
|
|
24
|
+
from mteb.overview import TASKS_REGISTRY
|
|
25
|
+
|
|
26
|
+
task_cls = TASKS_REGISTRY[task_name]
|
|
27
|
+
if task_cls.metadata.type != "Retrieval":
|
|
28
|
+
task_cls.load_data = load_data
|
|
29
|
+
else:
|
|
30
|
+
task_cls = CLS_DICT[task_name]
|
|
31
|
+
task_cls.load_data = load_data
|
|
32
|
+
# init task instance
|
|
33
|
+
task_instance = task_cls()
|
|
34
|
+
return task_instance
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def load_data(self, **kwargs):
|
|
38
|
+
"""Load dataset from the hub, compatible with ModelScope and Hugging Face."""
|
|
39
|
+
if self.data_loaded:
|
|
40
|
+
return
|
|
41
|
+
|
|
42
|
+
limits = kwargs.get("limits", None)
|
|
43
|
+
hub = kwargs.get("hub", "modelscope")
|
|
44
|
+
name = self.metadata_dict.get("name")
|
|
45
|
+
path = self.metadata_dict["dataset"].get("path")
|
|
46
|
+
|
|
47
|
+
assert path is not None, "Path must be specified in dataset"
|
|
48
|
+
|
|
49
|
+
# Loading the dataset based on the source hub
|
|
50
|
+
if hub == "modelscope":
|
|
51
|
+
import re
|
|
52
|
+
|
|
53
|
+
path = re.sub(r"^mteb/", "MTEB/", path)
|
|
54
|
+
dataset = MsDataset.load(path)
|
|
55
|
+
else:
|
|
56
|
+
dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) # type: ignore
|
|
57
|
+
|
|
58
|
+
if limits is not None:
|
|
59
|
+
dataset = {
|
|
60
|
+
split: dataset[split].select(range(min(limits, len(dataset[split]))))
|
|
61
|
+
for split in dataset.keys()
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if name in CLS_RETRIEVAL:
|
|
65
|
+
self.corpus, self.queries, self.relevant_docs = load_retrieval_data(
|
|
66
|
+
dataset,
|
|
67
|
+
path,
|
|
68
|
+
self.metadata_dict["eval_splits"],
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
self.dataset = dataset
|
|
72
|
+
self.dataset_transform()
|
|
73
|
+
self.data_loaded = True
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def load_retrieval_data(dataset, dataset_name: str, eval_splits: list) -> tuple:
|
|
77
|
+
eval_split = eval_splits[0]
|
|
78
|
+
qrels = MsDataset.load(dataset_name + "-qrels")[eval_split]
|
|
79
|
+
|
|
80
|
+
corpus = {e["id"]: {"text": e["text"]} for e in dataset["corpus"]}
|
|
81
|
+
queries = {e["id"]: e["text"] for e in dataset["queries"]}
|
|
82
|
+
relevant_docs = defaultdict(dict)
|
|
83
|
+
for e in qrels:
|
|
84
|
+
relevant_docs[e["qid"]][e["pid"]] = e["score"]
|
|
85
|
+
|
|
86
|
+
corpus = DatasetDict({eval_split: corpus})
|
|
87
|
+
queries = DatasetDict({eval_split: queries})
|
|
88
|
+
relevant_docs = DatasetDict({eval_split: relevant_docs})
|
|
89
|
+
return corpus, queries, relevant_docs
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import mteb
|
|
3
|
+
from evalscope.backend.rag_eval import EmbeddingModel
|
|
4
|
+
from evalscope.backend.rag_eval import cmteb
|
|
5
|
+
from mteb.task_selection import results_to_dataframe
|
|
6
|
+
from evalscope.utils.logger import get_logger
|
|
7
|
+
|
|
8
|
+
logger = get_logger()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def show_results(output_folder, model, results):
|
|
12
|
+
model_name = model.mteb_model_meta.model_name_as_path()
|
|
13
|
+
revision = model.mteb_model_meta.revision
|
|
14
|
+
|
|
15
|
+
results_df = results_to_dataframe({model_name: {revision: results}})
|
|
16
|
+
|
|
17
|
+
save_path = os.path.join(
|
|
18
|
+
output_folder,
|
|
19
|
+
model_name,
|
|
20
|
+
revision,
|
|
21
|
+
)
|
|
22
|
+
logger.info(f"Evaluation results:\n{results_df.to_markdown()}")
|
|
23
|
+
logger.info(f"Evaluation results saved in {os.path.abspath(save_path)}")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def one_stage_eval(
|
|
27
|
+
model_args,
|
|
28
|
+
eval_args,
|
|
29
|
+
) -> None:
|
|
30
|
+
# load model
|
|
31
|
+
model = EmbeddingModel.load(**model_args)
|
|
32
|
+
|
|
33
|
+
# load task first to update instructions
|
|
34
|
+
tasks = cmteb.TaskBase.get_tasks(task_names=eval_args["tasks"])
|
|
35
|
+
evaluation = mteb.MTEB(tasks=tasks)
|
|
36
|
+
|
|
37
|
+
# run evaluation
|
|
38
|
+
results = evaluation.run(model, **eval_args)
|
|
39
|
+
|
|
40
|
+
# save and log results
|
|
41
|
+
show_results(eval_args["output_folder"], model, results)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def two_stage_eval(
|
|
45
|
+
model1_args,
|
|
46
|
+
model2_args,
|
|
47
|
+
eval_args,
|
|
48
|
+
) -> None:
|
|
49
|
+
"""a two-stage run with the second stage reading results saved from the first stage."""
|
|
50
|
+
# load model
|
|
51
|
+
dual_encoder = EmbeddingModel.load(**model1_args)
|
|
52
|
+
cross_encoder = EmbeddingModel.load(**model2_args)
|
|
53
|
+
|
|
54
|
+
first_stage_path = f"{eval_args['output_folder']}/stage1"
|
|
55
|
+
second_stage_path = f"{eval_args['output_folder']}/stage2"
|
|
56
|
+
|
|
57
|
+
tasks = cmteb.TaskBase.get_tasks(task_names=eval_args["tasks"])
|
|
58
|
+
for task in tasks:
|
|
59
|
+
evaluation = mteb.MTEB(tasks=[task])
|
|
60
|
+
|
|
61
|
+
# stage 1: run dual encoder
|
|
62
|
+
evaluation.run(
|
|
63
|
+
dual_encoder,
|
|
64
|
+
save_predictions=True,
|
|
65
|
+
output_folder=first_stage_path,
|
|
66
|
+
overwrite_results=True,
|
|
67
|
+
hub=eval_args["hub"],
|
|
68
|
+
limits=eval_args["limits"],
|
|
69
|
+
)
|
|
70
|
+
# stage 2: run cross encoder
|
|
71
|
+
results = evaluation.run(
|
|
72
|
+
cross_encoder,
|
|
73
|
+
top_k=eval_args["top_k"],
|
|
74
|
+
save_predictions=True,
|
|
75
|
+
output_folder=second_stage_path,
|
|
76
|
+
previous_results=f"{first_stage_path}/{task.metadata.name}_default_predictions.json",
|
|
77
|
+
overwrite_results=True,
|
|
78
|
+
hub=eval_args["hub"],
|
|
79
|
+
limits=eval_args["limits"],
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# save and log results
|
|
83
|
+
show_results(second_stage_path, cross_encoder, results)
|
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
from mteb.abstasks.AbsTaskClassification import AbsTaskClassification
|
|
2
|
+
from mteb.abstasks.TaskMetadata import TaskMetadata
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class TNews(AbsTaskClassification):
|
|
6
|
+
metadata = TaskMetadata(
|
|
7
|
+
name="TNews",
|
|
8
|
+
description="Short Text Classification for News",
|
|
9
|
+
reference="https://www.cluebenchmarks.com/introduce.html",
|
|
10
|
+
dataset={
|
|
11
|
+
"path": "C-MTEB/TNews-classification",
|
|
12
|
+
"revision": "317f262bf1e6126357bbe89e875451e4b0938fe4",
|
|
13
|
+
},
|
|
14
|
+
type="Classification",
|
|
15
|
+
category="s2s",
|
|
16
|
+
modalities=["text"],
|
|
17
|
+
eval_splits=["validation"],
|
|
18
|
+
eval_langs=["cmn-Hans"],
|
|
19
|
+
main_score="accuracy",
|
|
20
|
+
date=None,
|
|
21
|
+
domains=None,
|
|
22
|
+
task_subtypes=None,
|
|
23
|
+
license=None,
|
|
24
|
+
annotations_creators=None,
|
|
25
|
+
dialect=None,
|
|
26
|
+
sample_creation=None,
|
|
27
|
+
bibtex_citation="""@inproceedings {xu-etal-2020-clue,
|
|
28
|
+
title = "{CLUE}: A {C}hinese Language Understanding Evaluation Benchmark",
|
|
29
|
+
author = "Xu, Liang and
|
|
30
|
+
Hu, Hai and
|
|
31
|
+
Zhang, Xuanwei and
|
|
32
|
+
Li, Lu and
|
|
33
|
+
Cao, Chenjie and
|
|
34
|
+
Li, Yudong and
|
|
35
|
+
Xu, Yechen and
|
|
36
|
+
Sun, Kai and
|
|
37
|
+
Yu, Dian and
|
|
38
|
+
Yu, Cong and
|
|
39
|
+
Tian, Yin and
|
|
40
|
+
Dong, Qianqian and
|
|
41
|
+
Liu, Weitang and
|
|
42
|
+
Shi, Bo and
|
|
43
|
+
Cui, Yiming and
|
|
44
|
+
Li, Junyi and
|
|
45
|
+
Zeng, Jun and
|
|
46
|
+
Wang, Rongzhao and
|
|
47
|
+
Xie, Weijian and
|
|
48
|
+
Li, Yanting and
|
|
49
|
+
Patterson, Yina and
|
|
50
|
+
Tian, Zuoyu and
|
|
51
|
+
Zhang, Yiwen and
|
|
52
|
+
Zhou, He and
|
|
53
|
+
Liu, Shaoweihua and
|
|
54
|
+
Zhao, Zhe and
|
|
55
|
+
Zhao, Qipeng and
|
|
56
|
+
Yue, Cong and
|
|
57
|
+
Zhang, Xinrui and
|
|
58
|
+
Yang, Zhengliang and
|
|
59
|
+
Richardson, Kyle and
|
|
60
|
+
Lan, Zhenzhong ",
|
|
61
|
+
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
|
|
62
|
+
month = dec,
|
|
63
|
+
year = "2020",
|
|
64
|
+
address = "Barcelona, Spain (Online)",
|
|
65
|
+
publisher = "International Committee on Computational Linguistics",
|
|
66
|
+
url = "https://aclanthology.org/2020.coling-main.419",
|
|
67
|
+
doi = "10.18653/v1/2020.coling-main.419",
|
|
68
|
+
pages = "4762--4772",
|
|
69
|
+
}""",
|
|
70
|
+
descriptive_stats={"n_samples": None, "avg_character_length": None},
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def metadata_dict(self) -> dict[str, str]:
|
|
75
|
+
metadata_dict = super().metadata_dict
|
|
76
|
+
metadata_dict["samples_per_label"] = 32
|
|
77
|
+
return metadata_dict
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class IFlyTek(AbsTaskClassification):
|
|
81
|
+
metadata = TaskMetadata(
|
|
82
|
+
name="IFlyTek",
|
|
83
|
+
description="Long Text classification for the description of Apps",
|
|
84
|
+
reference="https://www.cluebenchmarks.com/introduce.html",
|
|
85
|
+
dataset={
|
|
86
|
+
"path": "C-MTEB/IFlyTek-classification",
|
|
87
|
+
"revision": "421605374b29664c5fc098418fe20ada9bd55f8a",
|
|
88
|
+
},
|
|
89
|
+
type="Classification",
|
|
90
|
+
category="s2s",
|
|
91
|
+
modalities=["text"],
|
|
92
|
+
eval_splits=["validation"],
|
|
93
|
+
eval_langs=["cmn-Hans"],
|
|
94
|
+
main_score="accuracy",
|
|
95
|
+
date=None,
|
|
96
|
+
domains=None,
|
|
97
|
+
task_subtypes=None,
|
|
98
|
+
license=None,
|
|
99
|
+
annotations_creators=None,
|
|
100
|
+
dialect=None,
|
|
101
|
+
sample_creation=None,
|
|
102
|
+
bibtex_citation="""@inproceedings {xu-etal-2020-clue,
|
|
103
|
+
title = "{CLUE}: A {C}hinese Language Understanding Evaluation Benchmark",
|
|
104
|
+
author = "Xu, Liang and
|
|
105
|
+
Hu, Hai and
|
|
106
|
+
Zhang, Xuanwei and
|
|
107
|
+
Li, Lu and
|
|
108
|
+
Cao, Chenjie and
|
|
109
|
+
Li, Yudong and
|
|
110
|
+
Xu, Yechen and
|
|
111
|
+
Sun, Kai and
|
|
112
|
+
Yu, Dian and
|
|
113
|
+
Yu, Cong and
|
|
114
|
+
Tian, Yin and
|
|
115
|
+
Dong, Qianqian and
|
|
116
|
+
Liu, Weitang and
|
|
117
|
+
Shi, Bo and
|
|
118
|
+
Cui, Yiming and
|
|
119
|
+
Li, Junyi and
|
|
120
|
+
Zeng, Jun and
|
|
121
|
+
Wang, Rongzhao and
|
|
122
|
+
Xie, Weijian and
|
|
123
|
+
Li, Yanting and
|
|
124
|
+
Patterson, Yina and
|
|
125
|
+
Tian, Zuoyu and
|
|
126
|
+
Zhang, Yiwen and
|
|
127
|
+
Zhou, He and
|
|
128
|
+
Liu, Shaoweihua and
|
|
129
|
+
Zhao, Zhe and
|
|
130
|
+
Zhao, Qipeng and
|
|
131
|
+
Yue, Cong and
|
|
132
|
+
Zhang, Xinrui and
|
|
133
|
+
Yang, Zhengliang and
|
|
134
|
+
Richardson, Kyle and
|
|
135
|
+
Lan, Zhenzhong ",
|
|
136
|
+
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
|
|
137
|
+
month = dec,
|
|
138
|
+
year = "2020",
|
|
139
|
+
address = "Barcelona, Spain (Online)",
|
|
140
|
+
publisher = "International Committee on Computational Linguistics",
|
|
141
|
+
url = "https://aclanthology.org/2020.coling-main.419",
|
|
142
|
+
doi = "10.18653/v1/2020.coling-main.419",
|
|
143
|
+
pages = "4762--4772",
|
|
144
|
+
abstract = "The advent of natural language understanding (NLU) benchmarks for English, such as GLUE and SuperGLUE allows new NLU models to be evaluated across a diverse set of tasks. These comprehensive benchmarks have facilitated a broad range of research and applications in natural language processing (NLP). The problem, however, is that most such benchmarks are limited to English, which has made it difficult to replicate many of the successes in English NLU for other languages. To help remedy this issue, we introduce the first large-scale Chinese Language Understanding Evaluation (CLUE) benchmark. CLUE is an open-ended, community-driven project that brings together 9 tasks spanning several well-established single-sentence/sentence-pair classification tasks, as well as machine reading comprehension, all on original Chinese text. To establish results on these tasks, we report scores using an exhaustive set of current state-of-the-art pre-trained Chinese models (9 in total). We also introduce a number of supplementary datasets and additional tools to help facilitate further progress on Chinese NLU. Our benchmark is released at https://www.cluebenchmarks.com",
|
|
145
|
+
}""",
|
|
146
|
+
descriptive_stats={"n_samples": None, "avg_character_length": None},
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
@property
|
|
150
|
+
def metadata_dict(self) -> dict[str, str]:
|
|
151
|
+
metadata_dict = super().metadata_dict
|
|
152
|
+
metadata_dict["samples_per_label"] = 32
|
|
153
|
+
metadata_dict["n_experiments"] = 5
|
|
154
|
+
return metadata_dict
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class MultilingualSentiment(AbsTaskClassification):
|
|
158
|
+
metadata = TaskMetadata(
|
|
159
|
+
name="MultilingualSentiment",
|
|
160
|
+
description="A collection of multilingual sentiments datasets grouped into 3 classes -- positive, neutral, negative",
|
|
161
|
+
reference="https://github.com/tyqiangz/multilingual-sentiment-datasets",
|
|
162
|
+
dataset={
|
|
163
|
+
"path": "C-MTEB/MultilingualSentiment-classification",
|
|
164
|
+
"revision": "46958b007a63fdbf239b7672c25d0bea67b5ea1a",
|
|
165
|
+
},
|
|
166
|
+
type="Classification",
|
|
167
|
+
category="s2s",
|
|
168
|
+
modalities=["text"],
|
|
169
|
+
eval_splits=["validation", "test"],
|
|
170
|
+
eval_langs=["cmn-Hans"],
|
|
171
|
+
main_score="accuracy",
|
|
172
|
+
date=None,
|
|
173
|
+
domains=None,
|
|
174
|
+
task_subtypes=None,
|
|
175
|
+
license=None,
|
|
176
|
+
annotations_creators=None,
|
|
177
|
+
dialect=None,
|
|
178
|
+
sample_creation=None,
|
|
179
|
+
bibtex_citation=None,
|
|
180
|
+
descriptive_stats={"n_samples": None, "avg_character_length": None},
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
@property
|
|
184
|
+
def metadata_dict(self) -> dict[str, str]:
|
|
185
|
+
metadata_dict = super().metadata_dict
|
|
186
|
+
metadata_dict["samples_per_label"] = 32
|
|
187
|
+
return metadata_dict
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class JDReview(AbsTaskClassification):
|
|
191
|
+
metadata = TaskMetadata(
|
|
192
|
+
name="JDReview",
|
|
193
|
+
description="review for iphone",
|
|
194
|
+
reference="https://aclanthology.org/2023.nodalida-1.20/",
|
|
195
|
+
dataset={
|
|
196
|
+
"path": "C-MTEB/JDReview-classification",
|
|
197
|
+
"revision": "b7c64bd89eb87f8ded463478346f76731f07bf8b",
|
|
198
|
+
},
|
|
199
|
+
type="Classification",
|
|
200
|
+
category="s2s",
|
|
201
|
+
modalities=["text"],
|
|
202
|
+
eval_splits=["test"],
|
|
203
|
+
eval_langs=["cmn-Hans"],
|
|
204
|
+
main_score="accuracy",
|
|
205
|
+
date=None,
|
|
206
|
+
domains=None,
|
|
207
|
+
task_subtypes=None,
|
|
208
|
+
license=None,
|
|
209
|
+
annotations_creators=None,
|
|
210
|
+
dialect=None,
|
|
211
|
+
sample_creation=None,
|
|
212
|
+
bibtex_citation="""@article{xiao2023c,
|
|
213
|
+
title={C-pack: Packaged resources to advance general chinese embedding},
|
|
214
|
+
author={Xiao, Shitao and Liu, Zheng and Zhang, Peitian and Muennighof, Niklas},
|
|
215
|
+
journal={arXiv preprint arXiv:2309.07597},
|
|
216
|
+
year={2023}
|
|
217
|
+
}""",
|
|
218
|
+
descriptive_stats={"n_samples": None, "avg_character_length": None},
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
@property
|
|
222
|
+
def metadata_dict(self) -> dict[str, str]:
|
|
223
|
+
metadata_dict = super().metadata_dict
|
|
224
|
+
metadata_dict["samples_per_label"] = 32
|
|
225
|
+
return metadata_dict
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
class OnlineShopping(AbsTaskClassification):
|
|
229
|
+
metadata = TaskMetadata(
|
|
230
|
+
name="OnlineShopping",
|
|
231
|
+
description="Sentiment Analysis of User Reviews on Online Shopping Websites",
|
|
232
|
+
reference="https://aclanthology.org/2023.nodalida-1.20/",
|
|
233
|
+
dataset={
|
|
234
|
+
"path": "C-MTEB/OnlineShopping-classification",
|
|
235
|
+
"revision": "e610f2ebd179a8fda30ae534c3878750a96db120",
|
|
236
|
+
},
|
|
237
|
+
type="Classification",
|
|
238
|
+
category="s2s",
|
|
239
|
+
modalities=["text"],
|
|
240
|
+
eval_splits=["test"],
|
|
241
|
+
eval_langs=["cmn-Hans"],
|
|
242
|
+
main_score="accuracy",
|
|
243
|
+
date=None,
|
|
244
|
+
domains=None,
|
|
245
|
+
task_subtypes=None,
|
|
246
|
+
license=None,
|
|
247
|
+
annotations_creators=None,
|
|
248
|
+
dialect=None,
|
|
249
|
+
sample_creation=None,
|
|
250
|
+
bibtex_citation="""@article{xiao2023c,
|
|
251
|
+
title={C-pack: Packaged resources to advance general chinese embedding},
|
|
252
|
+
author={Xiao, Shitao and Liu, Zheng and Zhang, Peitian and Muennighof, Niklas},
|
|
253
|
+
journal={arXiv preprint arXiv:2309.07597},
|
|
254
|
+
year={2023}
|
|
255
|
+
}""",
|
|
256
|
+
descriptive_stats={"n_samples": None, "avg_character_length": None},
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
@property
|
|
260
|
+
def metadata_dict(self) -> dict[str, str]:
|
|
261
|
+
metadata_dict = super().metadata_dict
|
|
262
|
+
metadata_dict["samples_per_label"] = 32
|
|
263
|
+
return metadata_dict
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
class Waimai(AbsTaskClassification):
|
|
267
|
+
metadata = TaskMetadata(
|
|
268
|
+
name="Waimai",
|
|
269
|
+
description="Sentiment Analysis of user reviews on takeaway platforms",
|
|
270
|
+
reference="https://aclanthology.org/2023.nodalida-1.20/",
|
|
271
|
+
dataset={
|
|
272
|
+
"path": "C-MTEB/waimai-classification",
|
|
273
|
+
"revision": "339287def212450dcaa9df8c22bf93e9980c7023",
|
|
274
|
+
},
|
|
275
|
+
type="Classification",
|
|
276
|
+
category="s2s",
|
|
277
|
+
modalities=["text"],
|
|
278
|
+
eval_splits=["test"],
|
|
279
|
+
eval_langs=["cmn-Hans"],
|
|
280
|
+
main_score="accuracy",
|
|
281
|
+
date=None,
|
|
282
|
+
domains=None,
|
|
283
|
+
task_subtypes=None,
|
|
284
|
+
license=None,
|
|
285
|
+
annotations_creators=None,
|
|
286
|
+
dialect=None,
|
|
287
|
+
sample_creation=None,
|
|
288
|
+
bibtex_citation="""@article{xiao2023c,
|
|
289
|
+
title={C-pack: Packaged resources to advance general chinese embedding},
|
|
290
|
+
author={Xiao, Shitao and Liu, Zheng and Zhang, Peitian and Muennighof, Niklas},
|
|
291
|
+
journal={arXiv preprint arXiv:2309.07597},
|
|
292
|
+
year={2023}
|
|
293
|
+
}""",
|
|
294
|
+
descriptive_stats={"n_samples": None, "avg_character_length": None},
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
@property
|
|
298
|
+
def metadata_dict(self) -> dict[str, str]:
|
|
299
|
+
metadata_dict = super().metadata_dict
|
|
300
|
+
metadata_dict["samples_per_label"] = 32
|
|
301
|
+
|
|
302
|
+
return metadata_dict
|