project-llm-trainer 0.9.3__py3-none-any.whl → 0.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of project-llm-trainer might be problematic. Click here for more details.
- llm_trainer/tokenizer.py +13 -38
- llm_trainer/tools.py +1 -1
- {project_llm_trainer-0.9.3.dist-info → project_llm_trainer-0.9.4.dist-info}/METADATA +1 -1
- {project_llm_trainer-0.9.3.dist-info → project_llm_trainer-0.9.4.dist-info}/RECORD +13 -13
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/calc_intermediate_size +0 -0
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/ddp_train +0 -0
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/ds_train +0 -0
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/plot_loss +0 -0
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/plot_lr +0 -0
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/py_train +0 -0
- {project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/smart_train +0 -0
- {project_llm_trainer-0.9.3.dist-info → project_llm_trainer-0.9.4.dist-info}/WHEEL +0 -0
- {project_llm_trainer-0.9.3.dist-info → project_llm_trainer-0.9.4.dist-info}/top_level.txt +0 -0
llm_trainer/tokenizer.py
CHANGED
|
@@ -1,22 +1,15 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import warnings
|
|
3
3
|
from typing import List, Dict, Union
|
|
4
|
-
from transformers import
|
|
5
|
-
from transformers import AddedToken
|
|
6
|
-
from transformers import LlamaTokenizerFast
|
|
4
|
+
from transformers import AutoTokenizer
|
|
7
5
|
import torch
|
|
8
|
-
|
|
9
|
-
TOKEN_TYPE_QWEN = 'qwen'
|
|
10
|
-
TOKEN_TYPE_ZH_LLAMA = "zh_llama"
|
|
11
|
-
|
|
12
|
-
AVAILABLE_TOKEN_TYPES = [TOKEN_TYPE_QWEN, TOKEN_TYPE_ZH_LLAMA]
|
|
6
|
+
from .log import log
|
|
13
7
|
|
|
14
8
|
|
|
15
9
|
class Tokenizer:
|
|
16
|
-
def __init__(self
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
self.token_type = token_type
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self.tokenizer = AutoTokenizer.from_pretrained(os.environ['TOKEN_DIR'])
|
|
12
|
+
log(f'is fast tokenizer={self.tokenizer.is_fast}')
|
|
20
13
|
|
|
21
14
|
self.text_end = '</s>'
|
|
22
15
|
|
|
@@ -36,31 +29,6 @@ class Tokenizer:
|
|
|
36
29
|
|
|
37
30
|
self.text_image = '<image>'
|
|
38
31
|
|
|
39
|
-
if token_type == TOKEN_TYPE_QWEN:
|
|
40
|
-
self.tokenizer = Qwen2TokenizerFast(
|
|
41
|
-
vocab_file=f"{os.environ['TOKEN_DIR']}qwen_vocab.json",
|
|
42
|
-
merges_file=f"{os.environ['TOKEN_DIR']}qwen_merges.txt",
|
|
43
|
-
unk_token=self.text_unk,
|
|
44
|
-
eos_token=self.text_end,
|
|
45
|
-
pad_token=self.text_pad
|
|
46
|
-
)
|
|
47
|
-
additional_special_tokens = [
|
|
48
|
-
AddedToken(self.text_user, lstrip=False, rstrip=False),
|
|
49
|
-
AddedToken(self.text_assistant, lstrip=False, rstrip=False),
|
|
50
|
-
AddedToken(self.text_think_start, lstrip=False, rstrip=False),
|
|
51
|
-
AddedToken(self.text_think_end, lstrip=False, rstrip=False),
|
|
52
|
-
AddedToken(self.text_answer_start, lstrip=False, rstrip=False),
|
|
53
|
-
AddedToken(self.text_answer_end, lstrip=False, rstrip=False),
|
|
54
|
-
AddedToken(self.text_system, lstrip=False, rstrip=False),
|
|
55
|
-
AddedToken(self.text_image, lstrip=False, rstrip=False),
|
|
56
|
-
]
|
|
57
|
-
|
|
58
|
-
self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
|
|
59
|
-
else:
|
|
60
|
-
self.tokenizer = LlamaTokenizerFast.from_pretrained(os.environ['TOKEN_DIR'])
|
|
61
|
-
# self.tokenizer = AutoTokenizer.from_pretrained(os.environ['TOKEN_DIR'])
|
|
62
|
-
# self.tokenizer = PreTrainedTokenizerFast.from_pretrained(os.environ['TOKEN_DIR'], trust_remote_code=True)
|
|
63
|
-
|
|
64
32
|
self.end = self.tokenizer.convert_tokens_to_ids(self.text_end)
|
|
65
33
|
|
|
66
34
|
self.pad = self.tokenizer.convert_tokens_to_ids(self.text_pad)
|
|
@@ -105,8 +73,15 @@ class Tokenizer:
|
|
|
105
73
|
padding = False,
|
|
106
74
|
truncation = False,
|
|
107
75
|
covert_tensor: bool = False,
|
|
76
|
+
return_attention_mask: bool = False
|
|
108
77
|
) -> Union[torch.Tensor, List[List[int]]]:
|
|
109
|
-
encoded = self.tokenizer(
|
|
78
|
+
encoded = self.tokenizer(
|
|
79
|
+
text,
|
|
80
|
+
padding=padding,
|
|
81
|
+
truncation=truncation,
|
|
82
|
+
return_attention_mask=return_attention_mask
|
|
83
|
+
)['input_ids']
|
|
84
|
+
|
|
110
85
|
if covert_tensor:
|
|
111
86
|
encoded = torch.tensor(encoded, dtype=torch.long)
|
|
112
87
|
|
llm_trainer/tools.py
CHANGED
|
@@ -28,7 +28,7 @@ class TrainerTools:
|
|
|
28
28
|
|
|
29
29
|
self.parallel = self._new_parallel()
|
|
30
30
|
|
|
31
|
-
self.tokenizer = Tokenizer(
|
|
31
|
+
self.tokenizer = Tokenizer()
|
|
32
32
|
self.use_amp = 'cuda' in self.parallel.device and not isinstance(self.parallel, DsParallel)
|
|
33
33
|
|
|
34
34
|
log(f'word_size={self.parallel.world_size}, use_amp={self.use_amp}')
|
|
@@ -15,19 +15,19 @@ llm_trainer/parallel_none.py,sha256=TG6Pm829Dg-yQu-97O-EHV3FCARBlNcP47KkGFAs16E,
|
|
|
15
15
|
llm_trainer/partition_utils.py,sha256=eEYNhfEIF4hGzZ3OLa6sEBIECz261drptEz_n7fZYtk,8396
|
|
16
16
|
llm_trainer/scheduler.py,sha256=LAI_0VxClsIQkix0bRoduRD4vPfVuIZDhZgTAT_KK8k,4901
|
|
17
17
|
llm_trainer/sft_trainer.py,sha256=rSOGZx53jMgOuJdztfxQASYJ62uD0dVaih4IAnSwGBc,1787
|
|
18
|
-
llm_trainer/tokenizer.py,sha256=
|
|
19
|
-
llm_trainer/tools.py,sha256=
|
|
18
|
+
llm_trainer/tokenizer.py,sha256=_jk4zb9JSHjwiWlWAW40zUqf85AmRrwDfQ_L2jkhRoM,5955
|
|
19
|
+
llm_trainer/tools.py,sha256=Yca2OkXqFto37Jw13Feu1xzAP0s1proUcVKtLvdGxrk,2592
|
|
20
20
|
llm_trainer/train_configs.py,sha256=afXUZ7M_Uoj0B3c2Nwf5xE-Lv7QAZZHTdW8LBw-QeWE,7704
|
|
21
21
|
llm_trainer/trainer.py,sha256=bVghqvQY4bvYAZFPgyh2ywX8WanqAC525Lkg8bNv4FQ,29721
|
|
22
22
|
llm_trainer/utils.py,sha256=xC5plG-8-_Al5yIF5xIU5lroOcBBk98TEhtUJrazZPE,12305
|
|
23
|
-
project_llm_trainer-0.9.
|
|
24
|
-
project_llm_trainer-0.9.
|
|
25
|
-
project_llm_trainer-0.9.
|
|
26
|
-
project_llm_trainer-0.9.
|
|
27
|
-
project_llm_trainer-0.9.
|
|
28
|
-
project_llm_trainer-0.9.
|
|
29
|
-
project_llm_trainer-0.9.
|
|
30
|
-
project_llm_trainer-0.9.
|
|
31
|
-
project_llm_trainer-0.9.
|
|
32
|
-
project_llm_trainer-0.9.
|
|
33
|
-
project_llm_trainer-0.9.
|
|
23
|
+
project_llm_trainer-0.9.4.data/scripts/calc_intermediate_size,sha256=AggpgNHokJiJMbEtVdOnolqr_4bH3i1UYuZNEAzC2Gc,460
|
|
24
|
+
project_llm_trainer-0.9.4.data/scripts/ddp_train,sha256=eZSud6KYQAoKLsYB5QB-FI2zq5AZm6Apq1azKdupV3o,477
|
|
25
|
+
project_llm_trainer-0.9.4.data/scripts/ds_train,sha256=41q4rOxwbvZDUY0FDdAIpG13PEaUWBpthhvFvww8uOc,388
|
|
26
|
+
project_llm_trainer-0.9.4.data/scripts/plot_loss,sha256=O9ooioAJ-79-X06LosgqF8XOqQe-beRxYm3LsLunmoU,908
|
|
27
|
+
project_llm_trainer-0.9.4.data/scripts/plot_lr,sha256=w_7XR_x3KYYyboeOVAeu_I4fveLFI-C0wBmRrNlmWUI,894
|
|
28
|
+
project_llm_trainer-0.9.4.data/scripts/py_train,sha256=tOp9TquORQeU8XN5H7OVIk5O0Ypwi34p_GENxTwgwdk,265
|
|
29
|
+
project_llm_trainer-0.9.4.data/scripts/smart_train,sha256=N8dp2n7k6bghGczedBVwOdtf1O66oM_cNPh9QmZt0bM,914
|
|
30
|
+
project_llm_trainer-0.9.4.dist-info/METADATA,sha256=OAh8orL_7cliVbV9KK_2YxDheiD0NfmdJh-YU9_G92U,195
|
|
31
|
+
project_llm_trainer-0.9.4.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
|
32
|
+
project_llm_trainer-0.9.4.dist-info/top_level.txt,sha256=LtRFg28i0QIG7iBCD2t095oSco99LCtkijibS9cMGik,12
|
|
33
|
+
project_llm_trainer-0.9.4.dist-info/RECORD,,
|
{project_llm_trainer-0.9.3.data → project_llm_trainer-0.9.4.data}/scripts/calc_intermediate_size
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|