SinaTools 0.1.40__py2.py3-none-any.whl → 1.0.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {SinaTools-0.1.40.dist-info → SinaTools-1.0.1.dist-info}/METADATA +1 -1
- SinaTools-1.0.1.dist-info/RECORD +73 -0
- sinatools/VERSION +1 -1
- sinatools/ner/__init__.py +5 -7
- sinatools/ner/trainers/BertNestedTrainer.py +203 -203
- sinatools/ner/trainers/BertTrainer.py +163 -163
- sinatools/ner/trainers/__init__.py +2 -2
- SinaTools-0.1.40.dist-info/RECORD +0 -123
- sinatools/arabert/arabert/__init__.py +0 -14
- sinatools/arabert/arabert/create_classification_data.py +0 -260
- sinatools/arabert/arabert/create_pretraining_data.py +0 -534
- sinatools/arabert/arabert/extract_features.py +0 -444
- sinatools/arabert/arabert/lamb_optimizer.py +0 -158
- sinatools/arabert/arabert/modeling.py +0 -1027
- sinatools/arabert/arabert/optimization.py +0 -202
- sinatools/arabert/arabert/run_classifier.py +0 -1078
- sinatools/arabert/arabert/run_pretraining.py +0 -593
- sinatools/arabert/arabert/run_squad.py +0 -1440
- sinatools/arabert/arabert/tokenization.py +0 -414
- sinatools/arabert/araelectra/__init__.py +0 -1
- sinatools/arabert/araelectra/build_openwebtext_pretraining_dataset.py +0 -103
- sinatools/arabert/araelectra/build_pretraining_dataset.py +0 -230
- sinatools/arabert/araelectra/build_pretraining_dataset_single_file.py +0 -90
- sinatools/arabert/araelectra/configure_finetuning.py +0 -172
- sinatools/arabert/araelectra/configure_pretraining.py +0 -143
- sinatools/arabert/araelectra/finetune/__init__.py +0 -14
- sinatools/arabert/araelectra/finetune/feature_spec.py +0 -56
- sinatools/arabert/araelectra/finetune/preprocessing.py +0 -173
- sinatools/arabert/araelectra/finetune/scorer.py +0 -54
- sinatools/arabert/araelectra/finetune/task.py +0 -74
- sinatools/arabert/araelectra/finetune/task_builder.py +0 -70
- sinatools/arabert/araelectra/flops_computation.py +0 -215
- sinatools/arabert/araelectra/model/__init__.py +0 -14
- sinatools/arabert/araelectra/model/modeling.py +0 -1029
- sinatools/arabert/araelectra/model/optimization.py +0 -193
- sinatools/arabert/araelectra/model/tokenization.py +0 -355
- sinatools/arabert/araelectra/pretrain/__init__.py +0 -14
- sinatools/arabert/araelectra/pretrain/pretrain_data.py +0 -160
- sinatools/arabert/araelectra/pretrain/pretrain_helpers.py +0 -229
- sinatools/arabert/araelectra/run_finetuning.py +0 -323
- sinatools/arabert/araelectra/run_pretraining.py +0 -469
- sinatools/arabert/araelectra/util/__init__.py +0 -14
- sinatools/arabert/araelectra/util/training_utils.py +0 -112
- sinatools/arabert/araelectra/util/utils.py +0 -109
- sinatools/arabert/aragpt2/__init__.py +0 -2
- sinatools/arabert/aragpt2/create_pretraining_data.py +0 -95
- sinatools/arabert/aragpt2/gpt2/__init__.py +0 -2
- sinatools/arabert/aragpt2/gpt2/lamb_optimizer.py +0 -158
- sinatools/arabert/aragpt2/gpt2/optimization.py +0 -225
- sinatools/arabert/aragpt2/gpt2/run_pretraining.py +0 -397
- sinatools/arabert/aragpt2/grover/__init__.py +0 -0
- sinatools/arabert/aragpt2/grover/dataloader.py +0 -161
- sinatools/arabert/aragpt2/grover/modeling.py +0 -803
- sinatools/arabert/aragpt2/grover/modeling_gpt2.py +0 -1196
- sinatools/arabert/aragpt2/grover/optimization_adafactor.py +0 -234
- sinatools/arabert/aragpt2/grover/train_tpu.py +0 -187
- sinatools/arabert/aragpt2/grover/utils.py +0 -234
- sinatools/arabert/aragpt2/train_bpe_tokenizer.py +0 -59
- {SinaTools-0.1.40.data → SinaTools-1.0.1.data}/data/sinatools/environment.yml +0 -0
- {SinaTools-0.1.40.dist-info → SinaTools-1.0.1.dist-info}/AUTHORS.rst +0 -0
- {SinaTools-0.1.40.dist-info → SinaTools-1.0.1.dist-info}/LICENSE +0 -0
- {SinaTools-0.1.40.dist-info → SinaTools-1.0.1.dist-info}/WHEEL +0 -0
- {SinaTools-0.1.40.dist-info → SinaTools-1.0.1.dist-info}/entry_points.txt +0 -0
- {SinaTools-0.1.40.dist-info → SinaTools-1.0.1.dist-info}/top_level.txt +0 -0
@@ -1,414 +0,0 @@
|
|
1
|
-
# coding=utf-8
|
2
|
-
# Copyright 2018 The Google AI Language Team Authors.
|
3
|
-
#
|
4
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
-
# you may not use this file except in compliance with the License.
|
6
|
-
# You may obtain a copy of the License at
|
7
|
-
#
|
8
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
-
#
|
10
|
-
# Unless required by applicable law or agreed to in writing, software
|
11
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
-
# See the License for the specific language governing permissions and
|
14
|
-
# limitations under the License.
|
15
|
-
"""Tokenization classes."""
|
16
|
-
|
17
|
-
from __future__ import absolute_import
|
18
|
-
from __future__ import division
|
19
|
-
from __future__ import print_function
|
20
|
-
|
21
|
-
import collections
|
22
|
-
import re
|
23
|
-
import unicodedata
|
24
|
-
import six
|
25
|
-
import tensorflow as tf
|
26
|
-
|
27
|
-
|
28
|
-
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
|
29
|
-
"""Checks whether the casing config is consistent with the checkpoint name."""
|
30
|
-
|
31
|
-
# The casing has to be passed in by the user and there is no explicit check
|
32
|
-
# as to whether it matches the checkpoint. The casing information probably
|
33
|
-
# should have been stored in the bert_config.json file, but it's not, so
|
34
|
-
# we have to heuristically detect it to validate.
|
35
|
-
|
36
|
-
if not init_checkpoint:
|
37
|
-
return
|
38
|
-
|
39
|
-
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
|
40
|
-
if m is None:
|
41
|
-
return
|
42
|
-
|
43
|
-
model_name = m.group(1)
|
44
|
-
|
45
|
-
lower_models = [
|
46
|
-
"uncased_L-24_H-1024_A-16",
|
47
|
-
"uncased_L-12_H-768_A-12",
|
48
|
-
"multilingual_L-12_H-768_A-12",
|
49
|
-
"chinese_L-12_H-768_A-12",
|
50
|
-
]
|
51
|
-
|
52
|
-
cased_models = [
|
53
|
-
"cased_L-12_H-768_A-12",
|
54
|
-
"cased_L-24_H-1024_A-16",
|
55
|
-
"multi_cased_L-12_H-768_A-12",
|
56
|
-
]
|
57
|
-
|
58
|
-
is_bad_config = False
|
59
|
-
if model_name in lower_models and not do_lower_case:
|
60
|
-
is_bad_config = True
|
61
|
-
actual_flag = "False"
|
62
|
-
case_name = "lowercased"
|
63
|
-
opposite_flag = "True"
|
64
|
-
|
65
|
-
if model_name in cased_models and do_lower_case:
|
66
|
-
is_bad_config = True
|
67
|
-
actual_flag = "True"
|
68
|
-
case_name = "cased"
|
69
|
-
opposite_flag = "False"
|
70
|
-
|
71
|
-
if is_bad_config:
|
72
|
-
raise ValueError(
|
73
|
-
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
|
74
|
-
"However, `%s` seems to be a %s model, so you "
|
75
|
-
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
|
76
|
-
"how the model was pre-training. If this error is wrong, please "
|
77
|
-
"just comment out this check."
|
78
|
-
% (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)
|
79
|
-
)
|
80
|
-
|
81
|
-
|
82
|
-
def convert_to_unicode(text):
|
83
|
-
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
|
84
|
-
if six.PY3:
|
85
|
-
if isinstance(text, str):
|
86
|
-
return text
|
87
|
-
elif isinstance(text, bytes):
|
88
|
-
return text.decode("utf-8", "ignore")
|
89
|
-
else:
|
90
|
-
raise ValueError("Unsupported string type: %s" % (type(text)))
|
91
|
-
elif six.PY2:
|
92
|
-
if isinstance(text, str):
|
93
|
-
return text.decode("utf-8", "ignore")
|
94
|
-
elif isinstance(text, unicode):
|
95
|
-
return text
|
96
|
-
else:
|
97
|
-
raise ValueError("Unsupported string type: %s" % (type(text)))
|
98
|
-
else:
|
99
|
-
raise ValueError("Not running on Python2 or Python 3?")
|
100
|
-
|
101
|
-
|
102
|
-
def printable_text(text):
|
103
|
-
"""Returns text encoded in a way suitable for print or `tf.logging`."""
|
104
|
-
|
105
|
-
# These functions want `str` for both Python2 and Python3, but in one case
|
106
|
-
# it's a Unicode string and in the other it's a byte string.
|
107
|
-
if six.PY3:
|
108
|
-
if isinstance(text, str):
|
109
|
-
return text
|
110
|
-
elif isinstance(text, bytes):
|
111
|
-
return text.decode("utf-8", "ignore")
|
112
|
-
else:
|
113
|
-
raise ValueError("Unsupported string type: %s" % (type(text)))
|
114
|
-
elif six.PY2:
|
115
|
-
if isinstance(text, str):
|
116
|
-
return text
|
117
|
-
elif isinstance(text, unicode):
|
118
|
-
return text.encode("utf-8")
|
119
|
-
else:
|
120
|
-
raise ValueError("Unsupported string type: %s" % (type(text)))
|
121
|
-
else:
|
122
|
-
raise ValueError("Not running on Python2 or Python 3?")
|
123
|
-
|
124
|
-
|
125
|
-
def load_vocab(vocab_file):
|
126
|
-
"""Loads a vocabulary file into a dictionary."""
|
127
|
-
vocab = collections.OrderedDict()
|
128
|
-
index = 0
|
129
|
-
with tf.gfile.GFile(vocab_file, "r") as reader:
|
130
|
-
while True:
|
131
|
-
token = convert_to_unicode(reader.readline())
|
132
|
-
if not token:
|
133
|
-
break
|
134
|
-
token = token.strip()
|
135
|
-
vocab[token] = index
|
136
|
-
index += 1
|
137
|
-
return vocab
|
138
|
-
|
139
|
-
|
140
|
-
def convert_by_vocab(vocab, items):
|
141
|
-
"""Converts a sequence of [tokens|ids] using the vocab."""
|
142
|
-
output = []
|
143
|
-
for item in items:
|
144
|
-
output.append(vocab[item])
|
145
|
-
return output
|
146
|
-
|
147
|
-
|
148
|
-
def convert_tokens_to_ids(vocab, tokens):
|
149
|
-
return convert_by_vocab(vocab, tokens)
|
150
|
-
|
151
|
-
|
152
|
-
def convert_ids_to_tokens(inv_vocab, ids):
|
153
|
-
return convert_by_vocab(inv_vocab, ids)
|
154
|
-
|
155
|
-
|
156
|
-
def whitespace_tokenize(text):
|
157
|
-
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
158
|
-
text = text.strip()
|
159
|
-
if not text:
|
160
|
-
return []
|
161
|
-
tokens = text.split()
|
162
|
-
return tokens
|
163
|
-
|
164
|
-
|
165
|
-
class FullTokenizer(object):
|
166
|
-
"""Runs end-to-end tokenziation."""
|
167
|
-
|
168
|
-
def __init__(self, vocab_file, do_lower_case=True):
|
169
|
-
self.vocab = load_vocab(vocab_file)
|
170
|
-
self.inv_vocab = {v: k for k, v in self.vocab.items()}
|
171
|
-
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
|
172
|
-
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
|
173
|
-
|
174
|
-
def tokenize(self, text):
|
175
|
-
split_tokens = []
|
176
|
-
for token in self.basic_tokenizer.tokenize(text):
|
177
|
-
for sub_token in self.wordpiece_tokenizer.tokenize(token):
|
178
|
-
split_tokens.append(sub_token)
|
179
|
-
|
180
|
-
return split_tokens
|
181
|
-
|
182
|
-
def convert_tokens_to_ids(self, tokens):
|
183
|
-
return convert_by_vocab(self.vocab, tokens)
|
184
|
-
|
185
|
-
def convert_ids_to_tokens(self, ids):
|
186
|
-
return convert_by_vocab(self.inv_vocab, ids)
|
187
|
-
|
188
|
-
|
189
|
-
class BasicTokenizer(object):
|
190
|
-
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
|
191
|
-
|
192
|
-
def __init__(self, do_lower_case=True):
|
193
|
-
"""Constructs a BasicTokenizer.
|
194
|
-
|
195
|
-
Args:
|
196
|
-
do_lower_case: Whether to lower case the input.
|
197
|
-
"""
|
198
|
-
self.do_lower_case = do_lower_case
|
199
|
-
|
200
|
-
def tokenize(self, text):
|
201
|
-
"""Tokenizes a piece of text."""
|
202
|
-
text = convert_to_unicode(text)
|
203
|
-
text = self._clean_text(text)
|
204
|
-
|
205
|
-
# This was added on November 1st, 2018 for the multilingual and Chinese
|
206
|
-
# models. This is also applied to the English models now, but it doesn't
|
207
|
-
# matter since the English models were not trained on any Chinese data
|
208
|
-
# and generally don't have any Chinese data in them (there are Chinese
|
209
|
-
# characters in the vocabulary because Wikipedia does have some Chinese
|
210
|
-
# words in the English Wikipedia.).
|
211
|
-
text = self._tokenize_chinese_chars(text)
|
212
|
-
|
213
|
-
orig_tokens = whitespace_tokenize(text)
|
214
|
-
split_tokens = []
|
215
|
-
for token in orig_tokens:
|
216
|
-
if self.do_lower_case:
|
217
|
-
token = token.lower()
|
218
|
-
token = self._run_strip_accents(token)
|
219
|
-
split_tokens.extend(self._run_split_on_punc(token))
|
220
|
-
|
221
|
-
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
222
|
-
return output_tokens
|
223
|
-
|
224
|
-
def _run_strip_accents(self, text):
|
225
|
-
"""Strips accents from a piece of text."""
|
226
|
-
text = unicodedata.normalize("NFD", text)
|
227
|
-
output = []
|
228
|
-
for char in text:
|
229
|
-
cat = unicodedata.category(char)
|
230
|
-
if cat == "Mn":
|
231
|
-
continue
|
232
|
-
output.append(char)
|
233
|
-
return "".join(output)
|
234
|
-
|
235
|
-
def _run_split_on_punc(self, text):
|
236
|
-
"""Splits punctuation on a piece of text."""
|
237
|
-
chars = list(text)
|
238
|
-
i = 0
|
239
|
-
start_new_word = True
|
240
|
-
output = []
|
241
|
-
while i < len(chars):
|
242
|
-
char = chars[i]
|
243
|
-
if _is_punctuation(char):
|
244
|
-
output.append([char])
|
245
|
-
start_new_word = True
|
246
|
-
else:
|
247
|
-
if start_new_word:
|
248
|
-
output.append([])
|
249
|
-
start_new_word = False
|
250
|
-
output[-1].append(char)
|
251
|
-
i += 1
|
252
|
-
|
253
|
-
return ["".join(x) for x in output]
|
254
|
-
|
255
|
-
def _tokenize_chinese_chars(self, text):
|
256
|
-
"""Adds whitespace around any CJK character."""
|
257
|
-
output = []
|
258
|
-
for char in text:
|
259
|
-
cp = ord(char)
|
260
|
-
if self._is_chinese_char(cp):
|
261
|
-
output.append(" ")
|
262
|
-
output.append(char)
|
263
|
-
output.append(" ")
|
264
|
-
else:
|
265
|
-
output.append(char)
|
266
|
-
return "".join(output)
|
267
|
-
|
268
|
-
def _is_chinese_char(self, cp):
|
269
|
-
"""Checks whether CP is the codepoint of a CJK character."""
|
270
|
-
# This defines a "chinese character" as anything in the CJK Unicode block:
|
271
|
-
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
272
|
-
#
|
273
|
-
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
274
|
-
# despite its name. The modern Korean Hangul alphabet is a different block,
|
275
|
-
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
276
|
-
# space-separated words, so they are not treated specially and handled
|
277
|
-
# like the all of the other languages.
|
278
|
-
if (
|
279
|
-
(cp >= 0x4E00 and cp <= 0x9FFF)
|
280
|
-
or (cp >= 0x3400 and cp <= 0x4DBF) #
|
281
|
-
or (cp >= 0x20000 and cp <= 0x2A6DF) #
|
282
|
-
or (cp >= 0x2A700 and cp <= 0x2B73F) #
|
283
|
-
or (cp >= 0x2B740 and cp <= 0x2B81F) #
|
284
|
-
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
|
285
|
-
or (cp >= 0xF900 and cp <= 0xFAFF)
|
286
|
-
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
|
287
|
-
): #
|
288
|
-
return True
|
289
|
-
|
290
|
-
return False
|
291
|
-
|
292
|
-
def _clean_text(self, text):
|
293
|
-
"""Performs invalid character removal and whitespace cleanup on text."""
|
294
|
-
output = []
|
295
|
-
for char in text:
|
296
|
-
cp = ord(char)
|
297
|
-
if cp == 0 or cp == 0xFFFD or _is_control(char):
|
298
|
-
continue
|
299
|
-
if _is_whitespace(char):
|
300
|
-
output.append(" ")
|
301
|
-
else:
|
302
|
-
output.append(char)
|
303
|
-
return "".join(output)
|
304
|
-
|
305
|
-
|
306
|
-
class WordpieceTokenizer(object):
|
307
|
-
"""Runs WordPiece tokenziation."""
|
308
|
-
|
309
|
-
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
|
310
|
-
self.vocab = vocab
|
311
|
-
self.unk_token = unk_token
|
312
|
-
self.max_input_chars_per_word = max_input_chars_per_word
|
313
|
-
|
314
|
-
def tokenize(self, text):
|
315
|
-
"""Tokenizes a piece of text into its word pieces.
|
316
|
-
|
317
|
-
This uses a greedy longest-match-first algorithm to perform tokenization
|
318
|
-
using the given vocabulary.
|
319
|
-
|
320
|
-
For example:
|
321
|
-
input = "unaffable"
|
322
|
-
output = ["un", "##aff", "##able"]
|
323
|
-
|
324
|
-
Args:
|
325
|
-
text: A single token or whitespace separated tokens. This should have
|
326
|
-
already been passed through `BasicTokenizer.
|
327
|
-
|
328
|
-
Returns:
|
329
|
-
A list of wordpiece tokens.
|
330
|
-
"""
|
331
|
-
|
332
|
-
text = convert_to_unicode(text)
|
333
|
-
|
334
|
-
output_tokens = []
|
335
|
-
for token in whitespace_tokenize(text):
|
336
|
-
chars = list(token)
|
337
|
-
if len(chars) > self.max_input_chars_per_word:
|
338
|
-
output_tokens.append(self.unk_token)
|
339
|
-
continue
|
340
|
-
|
341
|
-
is_bad = False
|
342
|
-
start = 0
|
343
|
-
sub_tokens = []
|
344
|
-
while start < len(chars):
|
345
|
-
end = len(chars)
|
346
|
-
cur_substr = None
|
347
|
-
while start < end:
|
348
|
-
substr = "".join(chars[start:end])
|
349
|
-
if start > 0:
|
350
|
-
substr = "##" + substr
|
351
|
-
if substr in self.vocab:
|
352
|
-
cur_substr = substr
|
353
|
-
break
|
354
|
-
end -= 1
|
355
|
-
if cur_substr is None:
|
356
|
-
is_bad = True
|
357
|
-
break
|
358
|
-
sub_tokens.append(cur_substr)
|
359
|
-
start = end
|
360
|
-
|
361
|
-
if is_bad:
|
362
|
-
output_tokens.append(self.unk_token)
|
363
|
-
else:
|
364
|
-
output_tokens.extend(sub_tokens)
|
365
|
-
return output_tokens
|
366
|
-
|
367
|
-
|
368
|
-
def _is_whitespace(char):
|
369
|
-
"""Checks whether `chars` is a whitespace character."""
|
370
|
-
# \t, \n, and \r are technically contorl characters but we treat them
|
371
|
-
# as whitespace since they are generally considered as such.
|
372
|
-
if char == " " or char == "\t" or char == "\n" or char == "\r":
|
373
|
-
return True
|
374
|
-
cat = unicodedata.category(char)
|
375
|
-
if cat == "Zs":
|
376
|
-
return True
|
377
|
-
return False
|
378
|
-
|
379
|
-
|
380
|
-
def _is_control(char):
|
381
|
-
"""Checks whether `chars` is a control character."""
|
382
|
-
# These are technically control characters but we count them as whitespace
|
383
|
-
# characters.
|
384
|
-
if char == "\t" or char == "\n" or char == "\r":
|
385
|
-
return False
|
386
|
-
cat = unicodedata.category(char)
|
387
|
-
if cat in ("Cc", "Cf"):
|
388
|
-
return True
|
389
|
-
return False
|
390
|
-
|
391
|
-
|
392
|
-
def _is_punctuation(char):
|
393
|
-
"""Checks whether `chars` is a punctuation character."""
|
394
|
-
cp = ord(char)
|
395
|
-
# We treat all non-letter/number ASCII as punctuation.
|
396
|
-
# Characters such as "^", "$", and "`" are not in the Unicode
|
397
|
-
# Punctuation class but we treat them as punctuation anyways, for
|
398
|
-
# consistency.
|
399
|
-
if (
|
400
|
-
cp == 91 or cp == 93 or cp == 43
|
401
|
-
): # [ and ] are not punctuation since they are used in [xx] and the +
|
402
|
-
return False
|
403
|
-
|
404
|
-
if (
|
405
|
-
(cp >= 33 and cp <= 47)
|
406
|
-
or (cp >= 58 and cp <= 64)
|
407
|
-
or (cp >= 91 and cp <= 96)
|
408
|
-
or (cp >= 123 and cp <= 126)
|
409
|
-
):
|
410
|
-
return True
|
411
|
-
cat = unicodedata.category(char)
|
412
|
-
if cat.startswith("P"):
|
413
|
-
return True
|
414
|
-
return False
|
@@ -1 +0,0 @@
|
|
1
|
-
# coding=utf-8
|
@@ -1,103 +0,0 @@
|
|
1
|
-
# coding=utf-8
|
2
|
-
# Copyright 2020 The Google Research Authors.
|
3
|
-
#
|
4
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
-
# you may not use this file except in compliance with the License.
|
6
|
-
# You may obtain a copy of the License at
|
7
|
-
#
|
8
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
-
#
|
10
|
-
# Unless required by applicable law or agreed to in writing, software
|
11
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
-
# See the License for the specific language governing permissions and
|
14
|
-
# limitations under the License.
|
15
|
-
|
16
|
-
"""Preprocessess the Open WebText corpus for ELECTRA pre-training."""
|
17
|
-
|
18
|
-
import argparse
|
19
|
-
import multiprocessing
|
20
|
-
import os
|
21
|
-
import random
|
22
|
-
import tarfile
|
23
|
-
import time
|
24
|
-
import tensorflow as tf
|
25
|
-
|
26
|
-
import build_pretraining_dataset
|
27
|
-
from util import utils
|
28
|
-
|
29
|
-
|
30
|
-
def write_examples(job_id, args):
|
31
|
-
"""A single process creating and writing out pre-processed examples."""
|
32
|
-
job_tmp_dir = os.path.join(args.data_dir, "tmp", "job_" + str(job_id))
|
33
|
-
owt_dir = os.path.join(args.data_dir, "openwebtext")
|
34
|
-
|
35
|
-
def log(*args):
|
36
|
-
msg = " ".join(map(str, args))
|
37
|
-
print("Job {}:".format(job_id), msg)
|
38
|
-
|
39
|
-
log("Creating example writer")
|
40
|
-
example_writer = build_pretraining_dataset.ExampleWriter(
|
41
|
-
job_id=job_id,
|
42
|
-
vocab_file=os.path.join(args.data_dir, "vocab.txt"),
|
43
|
-
output_dir=os.path.join(args.data_dir, "pretrain_tfrecords"),
|
44
|
-
max_seq_length=args.max_seq_length,
|
45
|
-
num_jobs=args.num_processes,
|
46
|
-
blanks_separate_docs=False,
|
47
|
-
do_lower_case=args.do_lower_case
|
48
|
-
)
|
49
|
-
log("Writing tf examples")
|
50
|
-
fnames = sorted(tf.io.gfile.listdir(owt_dir))
|
51
|
-
fnames = [f for (i, f) in enumerate(fnames)
|
52
|
-
if i % args.num_processes == job_id]
|
53
|
-
random.shuffle(fnames)
|
54
|
-
start_time = time.time()
|
55
|
-
for file_no, fname in enumerate(fnames):
|
56
|
-
if file_no > 0 and file_no % 10 == 0:
|
57
|
-
elapsed = time.time() - start_time
|
58
|
-
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
|
59
|
-
"{:} examples written".format(
|
60
|
-
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
|
61
|
-
int((len(fnames) - file_no) / (file_no / elapsed)),
|
62
|
-
example_writer.n_written))
|
63
|
-
utils.rmkdir(job_tmp_dir)
|
64
|
-
with tarfile.open(os.path.join(owt_dir, fname)) as f:
|
65
|
-
f.extractall(job_tmp_dir)
|
66
|
-
extracted_files = tf.io.gfile.listdir(job_tmp_dir)
|
67
|
-
random.shuffle(extracted_files)
|
68
|
-
for txt_fname in extracted_files:
|
69
|
-
example_writer.write_examples(os.path.join(job_tmp_dir, txt_fname))
|
70
|
-
example_writer.finish()
|
71
|
-
log("Done!")
|
72
|
-
|
73
|
-
|
74
|
-
def main():
|
75
|
-
parser = argparse.ArgumentParser(description=__doc__)
|
76
|
-
parser.add_argument("--data-dir", required=True,
|
77
|
-
help="Location of data (vocab file, corpus, etc).")
|
78
|
-
parser.add_argument("--max-seq-length", default=128, type=int,
|
79
|
-
help="Number of tokens per example.")
|
80
|
-
parser.add_argument("--num-processes", default=1, type=int,
|
81
|
-
help="Parallelize across multiple processes.")
|
82
|
-
parser.add_argument("--do-lower-case", dest='do_lower_case',
|
83
|
-
action='store_true', help="Lower case input text.")
|
84
|
-
parser.add_argument("--no-lower-case", dest='do_lower_case',
|
85
|
-
action='store_false', help="Don't lower case input text.")
|
86
|
-
parser.set_defaults(do_lower_case=True)
|
87
|
-
args = parser.parse_args()
|
88
|
-
|
89
|
-
utils.rmkdir(os.path.join(args.data_dir, "pretrain_tfrecords"))
|
90
|
-
if args.num_processes == 1:
|
91
|
-
write_examples(0, args)
|
92
|
-
else:
|
93
|
-
jobs = []
|
94
|
-
for i in range(args.num_processes):
|
95
|
-
job = multiprocessing.Process(target=write_examples, args=(i, args))
|
96
|
-
jobs.append(job)
|
97
|
-
job.start()
|
98
|
-
for job in jobs:
|
99
|
-
job.join()
|
100
|
-
|
101
|
-
|
102
|
-
if __name__ == "__main__":
|
103
|
-
main()
|