project-llm-trainer 0.9.2__py3-none-any.whl → 0.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of project-llm-trainer might be problematic. Click here for more details.
- llm_trainer/tokenizer.py +15 -7
- {project_llm_trainer-0.9.2.dist-info → project_llm_trainer-0.9.3.dist-info}/METADATA +1 -1
- {project_llm_trainer-0.9.2.dist-info → project_llm_trainer-0.9.3.dist-info}/RECORD +12 -12
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/calc_intermediate_size +0 -0
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/ddp_train +0 -0
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/ds_train +0 -0
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/plot_loss +0 -0
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/plot_lr +0 -0
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/py_train +0 -0
- {project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/smart_train +0 -0
- {project_llm_trainer-0.9.2.dist-info → project_llm_trainer-0.9.3.dist-info}/WHEEL +0 -0
- {project_llm_trainer-0.9.2.dist-info → project_llm_trainer-0.9.3.dist-info}/top_level.txt +0 -0
llm_trainer/tokenizer.py
CHANGED
|
@@ -89,21 +89,29 @@ class Tokenizer:
|
|
|
89
89
|
# [x,x,x]
|
|
90
90
|
encoded = self.tokenizer.encode(text, add_special_tokens=False)
|
|
91
91
|
|
|
92
|
-
# if self.token_type == TOKEN_TYPE_MISTRAL:
|
|
93
|
-
# # 处理MISTRAL每句话前面都会增加一个29473的问题
|
|
94
|
-
# if encoded[0] == 29473:
|
|
95
|
-
# encoded = encoded[1:]
|
|
96
|
-
|
|
97
92
|
if unsqueeze:
|
|
98
93
|
# tensor: [[x,x,x]]
|
|
99
|
-
return torch.tensor(encoded
|
|
94
|
+
return torch.tensor(encoded, dtype=torch.long).unsqueeze(0)
|
|
100
95
|
else:
|
|
101
96
|
# tensor: # [x,x,x]
|
|
102
97
|
if covert_tensor:
|
|
103
|
-
return torch.tensor(encoded
|
|
98
|
+
return torch.tensor(encoded, dtype=torch.long)
|
|
104
99
|
|
|
105
100
|
return encoded
|
|
106
101
|
|
|
102
|
+
def batch_encode(
|
|
103
|
+
self,
|
|
104
|
+
text: List[str],
|
|
105
|
+
padding = False,
|
|
106
|
+
truncation = False,
|
|
107
|
+
covert_tensor: bool = False,
|
|
108
|
+
) -> Union[torch.Tensor, List[List[int]]]:
|
|
109
|
+
encoded = self.tokenizer(text, padding=padding, truncation=truncation)['input_ids']
|
|
110
|
+
if covert_tensor:
|
|
111
|
+
encoded = torch.tensor(encoded, dtype=torch.long)
|
|
112
|
+
|
|
113
|
+
return encoded
|
|
114
|
+
|
|
107
115
|
def decode(
|
|
108
116
|
self,
|
|
109
117
|
token: Union[torch.Tensor, List[int]],
|
|
@@ -15,19 +15,19 @@ llm_trainer/parallel_none.py,sha256=TG6Pm829Dg-yQu-97O-EHV3FCARBlNcP47KkGFAs16E,
|
|
|
15
15
|
llm_trainer/partition_utils.py,sha256=eEYNhfEIF4hGzZ3OLa6sEBIECz261drptEz_n7fZYtk,8396
|
|
16
16
|
llm_trainer/scheduler.py,sha256=LAI_0VxClsIQkix0bRoduRD4vPfVuIZDhZgTAT_KK8k,4901
|
|
17
17
|
llm_trainer/sft_trainer.py,sha256=rSOGZx53jMgOuJdztfxQASYJ62uD0dVaih4IAnSwGBc,1787
|
|
18
|
-
llm_trainer/tokenizer.py,sha256=
|
|
18
|
+
llm_trainer/tokenizer.py,sha256=23MgueOq2FtIXR8KUDgmzK9jfSUXzx0v1VmolbvR-7U,7487
|
|
19
19
|
llm_trainer/tools.py,sha256=5op5qrjjkK-Lr9oes5VxIVnOVYOYGoAdlIJq9mPUf64,2637
|
|
20
20
|
llm_trainer/train_configs.py,sha256=afXUZ7M_Uoj0B3c2Nwf5xE-Lv7QAZZHTdW8LBw-QeWE,7704
|
|
21
21
|
llm_trainer/trainer.py,sha256=bVghqvQY4bvYAZFPgyh2ywX8WanqAC525Lkg8bNv4FQ,29721
|
|
22
22
|
llm_trainer/utils.py,sha256=xC5plG-8-_Al5yIF5xIU5lroOcBBk98TEhtUJrazZPE,12305
|
|
23
|
-
project_llm_trainer-0.9.
|
|
24
|
-
project_llm_trainer-0.9.
|
|
25
|
-
project_llm_trainer-0.9.
|
|
26
|
-
project_llm_trainer-0.9.
|
|
27
|
-
project_llm_trainer-0.9.
|
|
28
|
-
project_llm_trainer-0.9.
|
|
29
|
-
project_llm_trainer-0.9.
|
|
30
|
-
project_llm_trainer-0.9.
|
|
31
|
-
project_llm_trainer-0.9.
|
|
32
|
-
project_llm_trainer-0.9.
|
|
33
|
-
project_llm_trainer-0.9.
|
|
23
|
+
project_llm_trainer-0.9.3.data/scripts/calc_intermediate_size,sha256=AggpgNHokJiJMbEtVdOnolqr_4bH3i1UYuZNEAzC2Gc,460
|
|
24
|
+
project_llm_trainer-0.9.3.data/scripts/ddp_train,sha256=eZSud6KYQAoKLsYB5QB-FI2zq5AZm6Apq1azKdupV3o,477
|
|
25
|
+
project_llm_trainer-0.9.3.data/scripts/ds_train,sha256=41q4rOxwbvZDUY0FDdAIpG13PEaUWBpthhvFvww8uOc,388
|
|
26
|
+
project_llm_trainer-0.9.3.data/scripts/plot_loss,sha256=O9ooioAJ-79-X06LosgqF8XOqQe-beRxYm3LsLunmoU,908
|
|
27
|
+
project_llm_trainer-0.9.3.data/scripts/plot_lr,sha256=w_7XR_x3KYYyboeOVAeu_I4fveLFI-C0wBmRrNlmWUI,894
|
|
28
|
+
project_llm_trainer-0.9.3.data/scripts/py_train,sha256=tOp9TquORQeU8XN5H7OVIk5O0Ypwi34p_GENxTwgwdk,265
|
|
29
|
+
project_llm_trainer-0.9.3.data/scripts/smart_train,sha256=N8dp2n7k6bghGczedBVwOdtf1O66oM_cNPh9QmZt0bM,914
|
|
30
|
+
project_llm_trainer-0.9.3.dist-info/METADATA,sha256=MXUhvp1gftCShHCKRPRxBgadUWmKmnC6trtU0Qf_Y1k,195
|
|
31
|
+
project_llm_trainer-0.9.3.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
|
32
|
+
project_llm_trainer-0.9.3.dist-info/top_level.txt,sha256=LtRFg28i0QIG7iBCD2t095oSco99LCtkijibS9cMGik,12
|
|
33
|
+
project_llm_trainer-0.9.3.dist-info/RECORD,,
|
{project_llm_trainer-0.9.2.data → project_llm_trainer-0.9.3.data}/scripts/calc_intermediate_size
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|