evalscope 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. evalscope/__init__.py +3 -0
  2. evalscope/backend/__init__.py +3 -0
  3. evalscope/backend/base.py +27 -0
  4. evalscope/backend/opencompass/__init__.py +3 -0
  5. evalscope/backend/opencompass/api_meta_template.py +64 -0
  6. evalscope/backend/opencompass/backend_manager.py +247 -0
  7. evalscope/backend/opencompass/tasks/__init__.py +1 -0
  8. evalscope/backend/opencompass/tasks/eval_api.py +30 -0
  9. evalscope/backend/opencompass/tasks/eval_datasets.py +71 -0
  10. evalscope/backend/vlm_eval_kit/__init__.py +1 -0
  11. evalscope/backend/vlm_eval_kit/backend_manager.py +153 -0
  12. evalscope/benchmarks/__init__.py +4 -0
  13. evalscope/benchmarks/arc/__init__.py +5 -0
  14. evalscope/benchmarks/arc/ai2_arc.py +148 -0
  15. evalscope/benchmarks/arc/arc_adapter.py +231 -0
  16. evalscope/benchmarks/bbh/__init__.py +6 -0
  17. evalscope/benchmarks/bbh/bbh_adapter.py +308 -0
  18. evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +23 -0
  19. evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +25 -0
  20. evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +33 -0
  21. evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +37 -0
  22. evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +72 -0
  23. evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +44 -0
  24. evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +78 -0
  25. evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +28 -0
  26. evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +37 -0
  27. evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +37 -0
  28. evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +37 -0
  29. evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +42 -0
  30. evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +25 -0
  31. evalscope/benchmarks/bbh/cot_prompts/navigate.txt +43 -0
  32. evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +37 -0
  33. evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +41 -0
  34. evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +63 -0
  35. evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +44 -0
  36. evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +40 -0
  37. evalscope/benchmarks/bbh/cot_prompts/snarks.txt +30 -0
  38. evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +10 -0
  39. evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +77 -0
  40. evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +40 -0
  41. evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +40 -0
  42. evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +40 -0
  43. evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +28 -0
  44. evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +17 -0
  45. evalscope/benchmarks/benchmark.py +65 -0
  46. evalscope/benchmarks/ceval/__init__.py +5 -0
  47. evalscope/benchmarks/ceval/ceval_adapter.py +340 -0
  48. evalscope/benchmarks/ceval/ceval_exam.py +159 -0
  49. evalscope/benchmarks/cmmlu/__init__.py +5 -0
  50. evalscope/benchmarks/cmmlu/cmmlu.py +166 -0
  51. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +369 -0
  52. evalscope/benchmarks/competition_math/__init__.py +5 -0
  53. evalscope/benchmarks/competition_math/competition_math.py +88 -0
  54. evalscope/benchmarks/competition_math/competition_math_adapter.py +470 -0
  55. evalscope/benchmarks/data_adapter.py +263 -0
  56. evalscope/benchmarks/general_qa/__init__.py +5 -0
  57. evalscope/benchmarks/general_qa/general_qa_adapter.py +186 -0
  58. evalscope/benchmarks/gsm8k/__init__.py +5 -0
  59. evalscope/benchmarks/gsm8k/gsm8k.py +127 -0
  60. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +236 -0
  61. evalscope/benchmarks/hellaswag/__init__.py +5 -0
  62. evalscope/benchmarks/hellaswag/hellaswag.py +116 -0
  63. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +222 -0
  64. evalscope/benchmarks/humaneval/__init__.py +5 -0
  65. evalscope/benchmarks/humaneval/humaneval.py +82 -0
  66. evalscope/benchmarks/humaneval/humaneval_adapter.py +21 -0
  67. evalscope/benchmarks/mmlu/__init__.py +5 -0
  68. evalscope/benchmarks/mmlu/mmlu.py +174 -0
  69. evalscope/benchmarks/mmlu/mmlu_adapter.py +375 -0
  70. evalscope/benchmarks/race/__init__.py +5 -0
  71. evalscope/benchmarks/race/race.py +118 -0
  72. evalscope/benchmarks/race/race_adapter.py +229 -0
  73. evalscope/benchmarks/trivia_qa/__init__.py +5 -0
  74. evalscope/benchmarks/trivia_qa/trivia_qa.py +104 -0
  75. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +207 -0
  76. evalscope/benchmarks/truthful_qa/__init__.py +5 -0
  77. evalscope/benchmarks/truthful_qa/truthful_qa.py +167 -0
  78. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +351 -0
  79. evalscope/cache.py +98 -0
  80. evalscope/cli/__init__.py +1 -0
  81. evalscope/cli/base.py +20 -0
  82. evalscope/cli/cli.py +26 -0
  83. evalscope/cli/start_perf.py +37 -0
  84. evalscope/cli/start_server.py +138 -0
  85. evalscope/config.py +165 -0
  86. evalscope/constants.py +150 -0
  87. evalscope/evaluator/__init__.py +3 -0
  88. evalscope/evaluator/evaluator.py +689 -0
  89. evalscope/evaluator/rating_eval.py +178 -0
  90. evalscope/evaluator/reviewer/__init__.py +1 -0
  91. evalscope/evaluator/reviewer/auto_reviewer.py +411 -0
  92. evalscope/metrics/__init__.py +1 -0
  93. evalscope/metrics/bundled_rouge_score/__init__.py +14 -0
  94. evalscope/metrics/bundled_rouge_score/rouge_scorer.py +342 -0
  95. evalscope/metrics/code_metric.py +104 -0
  96. evalscope/metrics/math_accuracy.py +60 -0
  97. evalscope/metrics/metrics.py +405 -0
  98. evalscope/metrics/rouge_metric.py +129 -0
  99. evalscope/models/__init__.py +4 -0
  100. evalscope/models/custom/__init__.py +4 -0
  101. evalscope/models/custom/custom_model.py +53 -0
  102. evalscope/models/dummy_chat_model.py +50 -0
  103. evalscope/models/model.py +88 -0
  104. evalscope/models/model_adapter.py +586 -0
  105. evalscope/models/openai_model.py +103 -0
  106. evalscope/models/template.py +1446 -0
  107. evalscope/perf/__init__.py +0 -0
  108. evalscope/perf/_logging.py +32 -0
  109. evalscope/perf/api_plugin_base.py +60 -0
  110. evalscope/perf/custom_api.py +87 -0
  111. evalscope/perf/dashscope_api.py +84 -0
  112. evalscope/perf/dataset_plugin_base.py +64 -0
  113. evalscope/perf/datasets/__init__.py +0 -0
  114. evalscope/perf/datasets/line_by_line.py +18 -0
  115. evalscope/perf/datasets/longalpaca_12k.py +20 -0
  116. evalscope/perf/datasets/openqa.py +22 -0
  117. evalscope/perf/how_to_analysis_result.py +24 -0
  118. evalscope/perf/http_client.py +756 -0
  119. evalscope/perf/openai_api.py +130 -0
  120. evalscope/perf/plugin_registry.py +35 -0
  121. evalscope/perf/query_parameters.py +42 -0
  122. evalscope/perf/server_sent_event.py +43 -0
  123. evalscope/preprocess/__init__.py +1 -0
  124. evalscope/preprocess/tokenizers/__init__.py +0 -0
  125. evalscope/preprocess/tokenizers/gpt2_tokenizer.py +221 -0
  126. evalscope/registry/__init__.py +1 -0
  127. evalscope/registry/tasks/arc.yaml +29 -0
  128. evalscope/registry/tasks/bbh.yaml +27 -0
  129. evalscope/registry/tasks/bbh_mini.yaml +27 -0
  130. evalscope/registry/tasks/ceval.yaml +27 -0
  131. evalscope/registry/tasks/ceval_mini.yaml +27 -0
  132. evalscope/registry/tasks/cmmlu.yaml +27 -0
  133. evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +28 -0
  134. evalscope/registry/tasks/general_qa.yaml +27 -0
  135. evalscope/registry/tasks/gsm8k.yaml +29 -0
  136. evalscope/registry/tasks/mmlu.yaml +29 -0
  137. evalscope/registry/tasks/mmlu_mini.yaml +27 -0
  138. evalscope/run.py +404 -0
  139. evalscope/run_arena.py +204 -0
  140. evalscope/run_ms.py +140 -0
  141. evalscope/summarizer.py +144 -0
  142. evalscope/third_party/__init__.py +1 -0
  143. evalscope/third_party/toolbench_static/__init__.py +3 -0
  144. evalscope/third_party/toolbench_static/eval.py +219 -0
  145. evalscope/third_party/toolbench_static/infer.py +278 -0
  146. evalscope/third_party/toolbench_static/llm/__init__.py +1 -0
  147. evalscope/third_party/toolbench_static/llm/swift_infer.py +45 -0
  148. evalscope/third_party/toolbench_static/toolbench_static.py +50 -0
  149. evalscope/tools/__init__.py +1 -0
  150. evalscope/tools/combine_reports.py +140 -0
  151. evalscope/tools/gen_mmlu_subject_mapping.py +90 -0
  152. evalscope/tools/rewrite_eval_results.py +95 -0
  153. evalscope/utils/__init__.py +4 -0
  154. evalscope/utils/arena_utils.py +247 -0
  155. evalscope/utils/completion_parsers.py +87 -0
  156. evalscope/utils/logger.py +64 -0
  157. evalscope/utils/task_cfg_parser.py +10 -0
  158. evalscope/utils/task_utils.py +19 -0
  159. evalscope/utils/utils.py +625 -0
  160. evalscope/version.py +4 -0
  161. evalscope-0.5.0.dist-info/METADATA +566 -0
  162. evalscope-0.5.0.dist-info/RECORD +165 -0
  163. evalscope-0.5.0.dist-info/WHEEL +5 -0
  164. evalscope-0.5.0.dist-info/entry_points.txt +3 -0
  165. evalscope-0.5.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,130 @@
1
+ from typing import Any, Dict, Iterator, List
2
+ import json
3
+ from evalscope.perf.api_plugin_base import ApiPluginBase
4
+ from transformers import AutoTokenizer
5
+ from evalscope.perf.plugin_registry import register_api
6
+ from evalscope.perf.query_parameters import QueryParameters
7
+
8
+ @register_api("openai")
9
+ class OpenaiPlugin(ApiPluginBase):
10
+ """Base of openai interface.
11
+ """
12
+ def __init__(self, mode_path: str):
13
+ """Init the plugin
14
+
15
+ Args:
16
+ mode_path (str): The model path, we use the tokenizer
17
+ weight in the model to calculate the number of the
18
+ input and output tokens.
19
+ """
20
+ super().__init__(model_path=mode_path)
21
+ if mode_path is not None:
22
+ self.tokenizer = AutoTokenizer.from_pretrained(mode_path)
23
+ else:
24
+ self.tokenizer = None
25
+
26
+ def build_request(self, messages: List[Dict], param: QueryParameters) -> Dict:
27
+ """Build the openai format request based on prompt, dataset
28
+
29
+ Args:
30
+ message (Dict): The basic message to generator query.
31
+ param (QueryParameters): The query parameters.
32
+
33
+ Raises:
34
+ Exception: NotImplemented
35
+
36
+ Returns:
37
+ Dict: The request body. None if prompt format is error.
38
+ """
39
+ try:
40
+ if param.query_template is not None:
41
+ query = json.loads(param.query_template)
42
+ query['messages'] = messages # replace template messages with input messages.
43
+ return self.__compose_query_from_parameter(query, param)
44
+ else:
45
+ query = {'messages': messages}
46
+ return self.__compose_query_from_parameter(query, param)
47
+ except Exception as e:
48
+ print(e)
49
+ return None
50
+
51
+ def __compose_query_from_parameter(self, payload: Dict, param: QueryParameters):
52
+ payload['model'] = param.model
53
+ if param.max_tokens is not None:
54
+ payload['max_tokens'] = param.max_tokens
55
+ if param.frequency_penalty is not None:
56
+ payload['frequency_penalty'] = param.frequency_penalty
57
+ if param.logprobs is not None:
58
+ payload['logprobs'] = param.logprobs
59
+ if param.n_choices is not None:
60
+ payload['n'] = param.n_choices
61
+ if param.seed is not None:
62
+ payload['seed'] = param.seed
63
+ if param.stop is not None:
64
+ payload['stop'] = param.stop
65
+ if param.stream is not None and param.stream:
66
+ payload['stream'] = param.stream
67
+ payload['stream_options'] = {"include_usage": True}
68
+ if param.stop_token_ids is not None:
69
+ payload['stop_token_ids'] = param.stop_token_ids
70
+ if param.temperature is not None:
71
+ payload['temperature'] = param.temperature
72
+ if param.top_p is not None:
73
+ payload['top_p'] = param.top_p
74
+ return payload
75
+
76
+ def parse_responses(self, responses, request: Any = None, **kwargs) -> Dict:
77
+ """Parser responses and return number of request and response tokens.
78
+ sample of the output delta:
79
+ {"id":"4","object":"chat.completion.chunk","created":1714030870,"model":"llama3","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
80
+
81
+
82
+ Args:
83
+ responses (List[bytes]): List of http response body, for stream output,
84
+ there are multiple responses, for general only one.
85
+ kwargs: (Any): The command line --parameter content.
86
+ Returns:
87
+ Tuple: Return number of prompt token and number of completion tokens.
88
+ """
89
+ full_response_content = ''
90
+ delta_contents = {}
91
+ input_tokens = None
92
+ output_tokens = None
93
+ for response in responses:
94
+ js = json.loads(response)
95
+ if js['object'] == 'chat.completion':
96
+ for choice in js['choices']:
97
+ delta_contents[choice['index']] = [choice['message']['content']]
98
+ input_tokens = js['usage']['prompt_tokens']
99
+ output_tokens = js['usage']['completion_tokens']
100
+ else: # 'object' == "chat.completion.chunk":
101
+ if 'choices' in js:
102
+ for choice in js['choices']:
103
+ if 'delta' in choice and 'index' in choice:
104
+ delta = choice['delta']
105
+ idx = choice['index']
106
+ if 'content' in delta:
107
+ delta_content = delta['content']
108
+ if idx in delta_contents:
109
+ delta_contents[idx].append(delta_content)
110
+ else:
111
+ delta_contents[idx] = [delta_content]
112
+ # usage in chunk: {"id":"","object":"chat.completion.chunk","created":1718269986,"model":"llama3",
113
+ # "choices":[],"usage":{"prompt_tokens":32,"total_tokens":384,"completion_tokens":352}}
114
+ if 'usage' in js and js['usage']:
115
+ input_tokens = js['usage']['prompt_tokens']
116
+ output_tokens = js['usage']['completion_tokens']
117
+ if input_tokens is None and output_tokens is None and self.tokenizer is not None:
118
+ input_tokens = 0
119
+ output_tokens = 0
120
+ for idx, choice_contents in delta_contents.items():
121
+ full_response_content = ''.join([m for m in choice_contents])
122
+ input_tokens += len(self.tokenizer.encode(request['messages'][0]['content']))
123
+ output_tokens += len(self.tokenizer.encode(full_response_content))
124
+ elif input_tokens is None and output_tokens is None: # no usage info get.
125
+ input_tokens = 0
126
+ output_tokens = 0
127
+
128
+ return input_tokens, output_tokens
129
+
130
+
@@ -0,0 +1,35 @@
1
+
2
+ from typing import Any
3
+
4
+
5
+ class PluginRegistry:
6
+ def __init__(self):
7
+ self._registry = {}
8
+
9
+ def register(self, name, cls):
10
+ self._registry[name] = cls
11
+ return cls
12
+
13
+ def get_class(self, name):
14
+ return self._registry[name]
15
+
16
+ def all_classes(self):
17
+ return list(self._registry.keys())
18
+
19
+ def __call__(self, name: str) -> Any:
20
+ return self.get_class(name)
21
+
22
+ dataset_registry = PluginRegistry()
23
+ api_registry = PluginRegistry()
24
+
25
+ def register_dataset(name: str):
26
+ def class_decorator(cls):
27
+ dataset_registry.register(name, cls)
28
+ return cls
29
+ return class_decorator
30
+
31
+ def register_api(name: str):
32
+ def class_decorator(cls):
33
+ api_registry.register(name, cls)
34
+ return cls
35
+ return class_decorator
@@ -0,0 +1,42 @@
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+
4
+
5
+ @dataclass
6
+ class QueryParameters:
7
+ model: str
8
+ prompt: Optional[str]
9
+ dataset: Optional[str]
10
+ query_template: Optional[str]
11
+ dataset_path: Optional[str]
12
+ frequency_penalty: Optional[float]
13
+ logprobs: Optional[bool]
14
+ max_tokens: Optional[int]
15
+ n_choices: Optional[int]
16
+ seed: Optional[int]
17
+ stop: Optional[str]
18
+ stream: Optional[bool]
19
+ temperature: Optional[float]
20
+ top_p: Optional[float]
21
+ max_prompt_length: Optional[int]
22
+ min_prompt_length: Optional[int]
23
+ include_usage: Optional[bool]
24
+
25
+ def __init__(self, args):
26
+ self.model = args.model
27
+ self.prompt = args.prompt
28
+ self.dataset = args.dataset
29
+ self.query_template = args.query_template
30
+ self.dataset_path = args.dataset_path
31
+ self.frequency_penalty = args.frequency_penalty
32
+ self.logprobs = args.logprobs
33
+ self.max_tokens = args.max_tokens
34
+ self.n_choices = args.n_choices
35
+ self.seed = args.seed
36
+ self.stop = args.stop
37
+ self.stream = args.stream
38
+ self.temperature = args.temperature
39
+ self.top_p = args.top_p
40
+ self.max_prompt_length = args.max_prompt_length
41
+ self.min_prompt_length = args.min_prompt_length
42
+ self.stop_token_ids = args.stop_token_ids
@@ -0,0 +1,43 @@
1
+ from dataclasses import dataclass
2
+
3
+ @dataclass
4
+ class ServerSentEvent(object):
5
+ def __init__(self, data='', event=None, id=None, retry=None):
6
+ self.data = data
7
+ self.event = event
8
+ self.id = id
9
+ self.retry = retry
10
+
11
+ @classmethod
12
+ def decode(cls, line):
13
+ """ Decode line to ServerSentEvent
14
+
15
+
16
+ Args:
17
+ line (str): The line.
18
+
19
+ Return:
20
+ ServerSentEvent (obj:`ServerSentEvent`): The ServerSentEvent object.
21
+
22
+ """
23
+ if not line:
24
+ return None
25
+ sse_msg = cls()
26
+ # format data:xxx
27
+ field_type, _, field_value = line.partition(":")
28
+ if field_value.startswith(" "): # compatible with openai api
29
+ field_value = field_value[1:]
30
+ if field_type == "event":
31
+ sse_msg.event = field_value
32
+ elif field_type == "data":
33
+ field_value = field_value.rstrip()
34
+ sse_msg.data = field_value
35
+ elif field_type == "id":
36
+ sse_msg.id = field_value
37
+ elif field_type == "retry":
38
+ sse_msg.retry = field_value
39
+ else:
40
+ pass
41
+
42
+ return sse_msg
43
+
@@ -0,0 +1 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
File without changes
@@ -0,0 +1,221 @@
1
+ import logging
2
+ import sys
3
+ from functools import lru_cache
4
+ from typing import Sequence
5
+
6
+ import json
7
+ import regex as re
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def get_pairs(word):
13
+ """Return set of symbol pairs in a word.
14
+
15
+ Word is represented as tuple of symbols (symbols being variable-length strings).
16
+ """
17
+ pairs = set()
18
+ prev_char = word[0]
19
+ for char in word[1:]:
20
+ pairs.add((prev_char, char))
21
+ prev_char = char
22
+ return pairs
23
+
24
+
25
+ @lru_cache()
26
+ def bytes_to_unicode():
27
+ """
28
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
29
+ The reversible bpe codes work on unicode strings.
30
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
31
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
32
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
33
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
34
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
35
+ """
36
+ bs = list(range(ord('!'),
37
+ ord('~') + 1)) + list(range(
38
+ ord('¡'),
39
+ ord('¬') + 1)) + list(range(ord('®'),
40
+ ord('ÿ') + 1))
41
+ cs = bs[:]
42
+ n = 0
43
+ for b in range(2**8):
44
+ if b not in bs:
45
+ bs.append(b)
46
+ cs.append(2**8 + n)
47
+ n += 1
48
+ cs = [chr(n) for n in cs]
49
+ return dict(zip(bs, cs))
50
+
51
+
52
+ class GPT2Tokenizer(object):
53
+ """
54
+ GPT-2 BPE tokenizer. Peculiarities:
55
+ - Byte-level BPE
56
+
57
+ [NOTE]: Copied from megatron.tokenizer.gpt2_tokenization.GPT2Tokenizer.
58
+ """
59
+
60
+ def __init__(self,
61
+ vocab_file,
62
+ merges_file,
63
+ errors='replace',
64
+ special_tokens=None,
65
+ max_len=None):
66
+ assert sys.version_info[0] != 2
67
+
68
+ self.max_len = max_len if max_len is not None else int(1e12)
69
+ self.encoder = json.load(open(vocab_file))
70
+ self.decoder = {v: k for k, v in self.encoder.items()}
71
+ self.errors = errors # how to handle errors in decoding
72
+ self.byte_encoder = bytes_to_unicode()
73
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
74
+ bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
75
+ bpe_merges = [tuple(merge.split()) for merge in bpe_data]
76
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
77
+ self.cache = {}
78
+
79
+ # Should haved added re.IGNORECASE so BPE merges can happen for
80
+ # capitalized versions of contractions
81
+ self.pat = re.compile(
82
+ r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
83
+ )
84
+
85
+ self.special_tokens = {}
86
+ self.special_tokens_decoder = {}
87
+ self.set_special_tokens(special_tokens)
88
+
89
+ def __len__(self):
90
+ return len(self.encoder) + len(self.special_tokens)
91
+
92
+ def set_special_tokens(self, special_tokens):
93
+ """ Add a list of additional tokens to the encoder.
94
+ The additional tokens are indexed starting from the last index of the
95
+ current vocabulary in the order of the `special_tokens` list.
96
+ """
97
+ if not special_tokens:
98
+ self.special_tokens = {}
99
+ self.special_tokens_decoder = {}
100
+ return
101
+ self.special_tokens = dict((tok, len(self.encoder) + i)
102
+ for i, tok in enumerate(special_tokens))
103
+ self.special_tokens_decoder = {
104
+ v: k
105
+ for k, v in self.special_tokens.items()
106
+ }
107
+ logger.info('Special tokens {}'.format(self.special_tokens))
108
+
109
+ def bpe(self, token):
110
+ if token in self.cache:
111
+ return self.cache[token]
112
+ word = tuple(token)
113
+ pairs = get_pairs(word)
114
+
115
+ if not pairs:
116
+ return token
117
+
118
+ while True:
119
+ bigram = min(
120
+ pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
121
+ if bigram not in self.bpe_ranks:
122
+ break
123
+ first, second = bigram
124
+ new_word = []
125
+ i = 0
126
+ while i < len(word):
127
+ try:
128
+ j = word.index(first, i)
129
+ new_word.extend(word[i:j])
130
+ i = j
131
+ except BaseException:
132
+ new_word.extend(word[i:])
133
+ break
134
+
135
+ if word[i] == first and i < len(word) - 1 and word[
136
+ i + 1] == second:
137
+ new_word.append(first + second)
138
+ i += 2
139
+ else:
140
+ new_word.append(word[i])
141
+ i += 1
142
+ new_word = tuple(new_word)
143
+ word = new_word
144
+ if len(word) == 1:
145
+ break
146
+ else:
147
+ pairs = get_pairs(word)
148
+ word = ' '.join(word)
149
+ self.cache[token] = word
150
+ return word
151
+
152
+ def tokenize(self, text):
153
+ """ Tokenize a string. """
154
+ bpe_tokens = []
155
+ for token in re.findall(self.pat, text):
156
+ if sys.version_info[0] == 2:
157
+ token = ''.join(self.byte_encoder[ord(b)] for b in token)
158
+ else:
159
+ token = ''.join(self.byte_encoder[b]
160
+ for b in token.encode('utf-8'))
161
+ bpe_tokens.extend(
162
+ bpe_token for bpe_token in self.bpe(token).split(' '))
163
+ return bpe_tokens
164
+
165
+ def convert_tokens_to_ids(self, tokens):
166
+ """ Converts a sequence of tokens into ids using the vocab. """
167
+ ids = []
168
+ if isinstance(tokens, str):
169
+ if tokens in self.special_tokens:
170
+ return self.special_tokens[tokens]
171
+ else:
172
+ return self.encoder.get(tokens, 0)
173
+ for token in tokens:
174
+ if token in self.special_tokens:
175
+ ids.append(self.special_tokens[token])
176
+ else:
177
+ ids.append(self.encoder.get(token, 0))
178
+ if len(ids) > self.max_len:
179
+ logger.warning(
180
+ 'Token indices sequence length is longer than the specified maximum '
181
+ ' sequence length for this OpenAI GPT model ({} > {}). Running this'
182
+ ' sequence through the model will result in indexing errors'.
183
+ format(len(ids), self.max_len))
184
+ return ids
185
+
186
+ def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
187
+ """Converts a sequence of ids in BPE tokens using the vocab."""
188
+ tokens = []
189
+ for i in ids:
190
+ if i in self.special_tokens_decoder:
191
+ if not skip_special_tokens:
192
+ tokens.append(self.special_tokens_decoder[i])
193
+ else:
194
+ tokens.append(self.decoder[i])
195
+ return tokens
196
+
197
+ def encode(self, text):
198
+ return self.convert_tokens_to_ids(self.tokenize(text))
199
+
200
+ def decode(self, tokens):
201
+ text = ''.join([self.decoder[token] for token in tokens])
202
+ text = bytearray([self.byte_decoder[c] for c in text]).decode(
203
+ 'utf-8', errors=self.errors)
204
+ return text
205
+
206
+
207
+ class DummyTokenizer:
208
+
209
+ def tokenize(self, text: str):
210
+ return text.split()
211
+
212
+
213
+ def get_tokenized_string(tokenizer: GPT2Tokenizer, text_list: Sequence[str]):
214
+ token_ids_list, tokenized_string_list = [], []
215
+ for text in text_list:
216
+ assert tokenizer is not None
217
+ token_ids = tokenizer.encode(text)
218
+ tokenized_string = ' '.join(tokenizer.convert_ids_to_tokens(token_ids))
219
+ token_ids_list.append(token_ids)
220
+ tokenized_string_list.append(tokenized_string)
221
+ return token_ids_list, tokenized_string_list
@@ -0,0 +1 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
@@ -0,0 +1,29 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: default
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args:
18
+ arc:
19
+ prompt_template: 'The following are multiple choice questions, please output correct answer in the form of A or B or C or D, do not output explanation:'
20
+ dry_run: false
21
+ model: null # Note: to be implemented as CustomModel
22
+ eval_type: custom
23
+ datasets:
24
+ - arc
25
+ outputs: null # structure: configs, logs, predictions, reviews, reports # TODO: need to parse
26
+ use_cache: false
27
+ stage: all
28
+ dataset_hub: ModelScope # `Local` or `ModelScope`
29
+ limit: null
@@ -0,0 +1,27 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: default
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args: {}
18
+ dry_run: false
19
+ model: null # Note: to be implemented as CustomModel
20
+ eval_type: custom
21
+ datasets:
22
+ - bbh
23
+ outputs: null # structure: configs, logs, predictions, reviews, reports # TODO: need to parse
24
+ use_cache: false
25
+ stage: all
26
+ dataset_hub: ModelScope # `Local` or `ModelScope`
27
+ limit: null
@@ -0,0 +1,27 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: default
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args: {'bbh': {'subset_list': ['temporal_sequences', 'multistep_arithmetic_two']}}
18
+ dry_run: false
19
+ model: null # Note: to be implemented as CustomModel
20
+ eval_type: custom
21
+ datasets:
22
+ - bbh
23
+ outputs: null # structure: configs, logs, predictions, reviews, reports # TODO: need to parse
24
+ use_cache: false
25
+ stage: all
26
+ dataset_hub: ModelScope # `Local` or `ModelScope`
27
+ limit: null
@@ -0,0 +1,27 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: default
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args: {}
18
+ dry_run: false
19
+ model: null # Note: to be implemented as CustomModel
20
+ eval_type: custom
21
+ datasets:
22
+ - ceval
23
+ outputs: null # structure: configs, logs, predictions, reviews, reports # TODO: need to parse
24
+ use_cache: false
25
+ stage: all
26
+ dataset_hub: ModelScope # `Local` or `ModelScope`
27
+ limit: null
@@ -0,0 +1,27 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: default
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args: {'ceval': {'subset_list': ['computer_network', 'operating_system']}}
18
+ dry_run: false
19
+ model: null # Note: to be implemented as CustomModel
20
+ eval_type: custom
21
+ datasets:
22
+ - ceval
23
+ outputs: null # structure: configs, logs, predictions, reviews, reports # TODO: need to parse
24
+ use_cache: false
25
+ stage: all
26
+ dataset_hub: ModelScope # `Local` or `ModelScope`
27
+ limit: null
@@ -0,0 +1,27 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: default
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args: {}
18
+ dry_run: false
19
+ model: null # Note: to be implemented as CustomModel
20
+ eval_type: custom
21
+ datasets:
22
+ - cmmlu
23
+ outputs: null # structure: configs, logs, predictions, reviews, reports # TODO: need to parse
24
+ use_cache: false
25
+ stage: all
26
+ dataset_hub: ModelScope # `Local` or `ModelScope`
27
+ limit: null
@@ -0,0 +1,28 @@
1
+ model_args: # model args should be followed by benchmark requirements
2
+ revision: v1.0.0
3
+ precision: torch.float16
4
+ device_map: auto
5
+ # model_name_or_path: qwen/qwen-7b-chat
6
+ generation_config:
7
+ temperature: 0.3
8
+ max_length: 2048
9
+ max_new_tokens: 512
10
+ top_k: 50
11
+ top_p: 0.85
12
+ do_sample: false
13
+ num_beams: 1
14
+ repetition_penalty: 1.0
15
+ # eos_token_id: null
16
+ # pad_token_id: null
17
+ dataset_args: {}
18
+ dry_run: false
19
+ model: null # Note: to be implemented as CustomModel
20
+ eval_type: custom
21
+ datasets:
22
+ - arc
23
+ - gsm8k
24
+ outputs: ./outputs/eval_qwen-7b-chat_v100 # Directory to save the outputs, structure: logs, predictions, reviews, reports
25
+ use_cache: false
26
+ stage: all
27
+ dataset_hub: ModelScope # `Local` or `ModelScope`
28
+ limit: 10