project-llm-trainer 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of project-llm-trainer might be problematic. Click here for more details.

llm_trainer/tokenizer.py CHANGED
@@ -1,22 +1,15 @@
1
1
  import os
2
2
  import warnings
3
3
  from typing import List, Dict, Union
4
- from transformers import Qwen2TokenizerFast
5
- from transformers import AddedToken
6
- from transformers import LlamaTokenizerFast
4
+ from transformers import AutoTokenizer
7
5
  import torch
8
-
9
- TOKEN_TYPE_QWEN = 'qwen'
10
- TOKEN_TYPE_ZH_LLAMA = "zh_llama"
11
-
12
- AVAILABLE_TOKEN_TYPES = [TOKEN_TYPE_QWEN, TOKEN_TYPE_ZH_LLAMA]
6
+ from .log import log
13
7
 
14
8
 
15
9
  class Tokenizer:
16
- def __init__(self, token_type: str = TOKEN_TYPE_ZH_LLAMA):
17
- super().__init__()
18
- assert token_type in AVAILABLE_TOKEN_TYPES, 'token type is unavailable'
19
- self.token_type = token_type
10
+ def __init__(self):
11
+ self.tokenizer = AutoTokenizer.from_pretrained(os.environ['TOKEN_DIR'])
12
+ log(f'is fast tokenizer={self.tokenizer.is_fast}')
20
13
 
21
14
  self.text_end = '</s>'
22
15
 
@@ -36,31 +29,6 @@ class Tokenizer:
36
29
 
37
30
  self.text_image = '<image>'
38
31
 
39
- if token_type == TOKEN_TYPE_QWEN:
40
- self.tokenizer = Qwen2TokenizerFast(
41
- vocab_file=f"{os.environ['TOKEN_DIR']}qwen_vocab.json",
42
- merges_file=f"{os.environ['TOKEN_DIR']}qwen_merges.txt",
43
- unk_token=self.text_unk,
44
- eos_token=self.text_end,
45
- pad_token=self.text_pad
46
- )
47
- additional_special_tokens = [
48
- AddedToken(self.text_user, lstrip=False, rstrip=False),
49
- AddedToken(self.text_assistant, lstrip=False, rstrip=False),
50
- AddedToken(self.text_think_start, lstrip=False, rstrip=False),
51
- AddedToken(self.text_think_end, lstrip=False, rstrip=False),
52
- AddedToken(self.text_answer_start, lstrip=False, rstrip=False),
53
- AddedToken(self.text_answer_end, lstrip=False, rstrip=False),
54
- AddedToken(self.text_system, lstrip=False, rstrip=False),
55
- AddedToken(self.text_image, lstrip=False, rstrip=False),
56
- ]
57
-
58
- self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
59
- else:
60
- self.tokenizer = LlamaTokenizerFast.from_pretrained(os.environ['TOKEN_DIR'])
61
- # self.tokenizer = AutoTokenizer.from_pretrained(os.environ['TOKEN_DIR'])
62
- # self.tokenizer = PreTrainedTokenizerFast.from_pretrained(os.environ['TOKEN_DIR'], trust_remote_code=True)
63
-
64
32
  self.end = self.tokenizer.convert_tokens_to_ids(self.text_end)
65
33
 
66
34
  self.pad = self.tokenizer.convert_tokens_to_ids(self.text_pad)
@@ -89,21 +57,36 @@ class Tokenizer:
89
57
  # [x,x,x]
90
58
  encoded = self.tokenizer.encode(text, add_special_tokens=False)
91
59
 
92
- # if self.token_type == TOKEN_TYPE_MISTRAL:
93
- # # 处理MISTRAL每句话前面都会增加一个29473的问题
94
- # if encoded[0] == 29473:
95
- # encoded = encoded[1:]
96
-
97
60
  if unsqueeze:
98
61
  # tensor: [[x,x,x]]
99
- return torch.tensor(encoded).long().unsqueeze(0)
62
+ return torch.tensor(encoded, dtype=torch.long).unsqueeze(0)
100
63
  else:
101
64
  # tensor: # [x,x,x]
102
65
  if covert_tensor:
103
- return torch.tensor(encoded).long()
66
+ return torch.tensor(encoded, dtype=torch.long)
104
67
 
105
68
  return encoded
106
69
 
70
+ def batch_encode(
71
+ self,
72
+ text: List[str],
73
+ padding = False,
74
+ truncation = False,
75
+ covert_tensor: bool = False,
76
+ return_attention_mask: bool = False
77
+ ) -> Union[torch.Tensor, List[List[int]]]:
78
+ encoded = self.tokenizer(
79
+ text,
80
+ padding=padding,
81
+ truncation=truncation,
82
+ return_attention_mask=return_attention_mask
83
+ )['input_ids']
84
+
85
+ if covert_tensor:
86
+ encoded = torch.tensor(encoded, dtype=torch.long)
87
+
88
+ return encoded
89
+
107
90
  def decode(
108
91
  self,
109
92
  token: Union[torch.Tensor, List[int]],
llm_trainer/tools.py CHANGED
@@ -28,7 +28,7 @@ class TrainerTools:
28
28
 
29
29
  self.parallel = self._new_parallel()
30
30
 
31
- self.tokenizer = Tokenizer(os.environ.get('TOKENIZERS_TYPE', 'zh_llama'))
31
+ self.tokenizer = Tokenizer()
32
32
  self.use_amp = 'cuda' in self.parallel.device and not isinstance(self.parallel, DsParallel)
33
33
 
34
34
  log(f'word_size={self.parallel.world_size}, use_amp={self.use_amp}')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: project_llm_trainer
3
- Version: 0.9.2
3
+ Version: 0.9.4
4
4
  Summary: LLM and VLM trainer
5
5
  Author: qibin
6
6
  Author-email: qibin0506@gmail.com
@@ -15,19 +15,19 @@ llm_trainer/parallel_none.py,sha256=TG6Pm829Dg-yQu-97O-EHV3FCARBlNcP47KkGFAs16E,
15
15
  llm_trainer/partition_utils.py,sha256=eEYNhfEIF4hGzZ3OLa6sEBIECz261drptEz_n7fZYtk,8396
16
16
  llm_trainer/scheduler.py,sha256=LAI_0VxClsIQkix0bRoduRD4vPfVuIZDhZgTAT_KK8k,4901
17
17
  llm_trainer/sft_trainer.py,sha256=rSOGZx53jMgOuJdztfxQASYJ62uD0dVaih4IAnSwGBc,1787
18
- llm_trainer/tokenizer.py,sha256=0-xQCMz1xiPTDAZiYsVsiECSoZ_1eIvW9XsZOoFfakQ,7250
19
- llm_trainer/tools.py,sha256=5op5qrjjkK-Lr9oes5VxIVnOVYOYGoAdlIJq9mPUf64,2637
18
+ llm_trainer/tokenizer.py,sha256=_jk4zb9JSHjwiWlWAW40zUqf85AmRrwDfQ_L2jkhRoM,5955
19
+ llm_trainer/tools.py,sha256=Yca2OkXqFto37Jw13Feu1xzAP0s1proUcVKtLvdGxrk,2592
20
20
  llm_trainer/train_configs.py,sha256=afXUZ7M_Uoj0B3c2Nwf5xE-Lv7QAZZHTdW8LBw-QeWE,7704
21
21
  llm_trainer/trainer.py,sha256=bVghqvQY4bvYAZFPgyh2ywX8WanqAC525Lkg8bNv4FQ,29721
22
22
  llm_trainer/utils.py,sha256=xC5plG-8-_Al5yIF5xIU5lroOcBBk98TEhtUJrazZPE,12305
23
- project_llm_trainer-0.9.2.data/scripts/calc_intermediate_size,sha256=AggpgNHokJiJMbEtVdOnolqr_4bH3i1UYuZNEAzC2Gc,460
24
- project_llm_trainer-0.9.2.data/scripts/ddp_train,sha256=eZSud6KYQAoKLsYB5QB-FI2zq5AZm6Apq1azKdupV3o,477
25
- project_llm_trainer-0.9.2.data/scripts/ds_train,sha256=41q4rOxwbvZDUY0FDdAIpG13PEaUWBpthhvFvww8uOc,388
26
- project_llm_trainer-0.9.2.data/scripts/plot_loss,sha256=O9ooioAJ-79-X06LosgqF8XOqQe-beRxYm3LsLunmoU,908
27
- project_llm_trainer-0.9.2.data/scripts/plot_lr,sha256=w_7XR_x3KYYyboeOVAeu_I4fveLFI-C0wBmRrNlmWUI,894
28
- project_llm_trainer-0.9.2.data/scripts/py_train,sha256=tOp9TquORQeU8XN5H7OVIk5O0Ypwi34p_GENxTwgwdk,265
29
- project_llm_trainer-0.9.2.data/scripts/smart_train,sha256=N8dp2n7k6bghGczedBVwOdtf1O66oM_cNPh9QmZt0bM,914
30
- project_llm_trainer-0.9.2.dist-info/METADATA,sha256=hoIO4KbvNU5xaZdzuNljSZcZSb_Iozl_Skp4miE3U6Y,195
31
- project_llm_trainer-0.9.2.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
32
- project_llm_trainer-0.9.2.dist-info/top_level.txt,sha256=LtRFg28i0QIG7iBCD2t095oSco99LCtkijibS9cMGik,12
33
- project_llm_trainer-0.9.2.dist-info/RECORD,,
23
+ project_llm_trainer-0.9.4.data/scripts/calc_intermediate_size,sha256=AggpgNHokJiJMbEtVdOnolqr_4bH3i1UYuZNEAzC2Gc,460
24
+ project_llm_trainer-0.9.4.data/scripts/ddp_train,sha256=eZSud6KYQAoKLsYB5QB-FI2zq5AZm6Apq1azKdupV3o,477
25
+ project_llm_trainer-0.9.4.data/scripts/ds_train,sha256=41q4rOxwbvZDUY0FDdAIpG13PEaUWBpthhvFvww8uOc,388
26
+ project_llm_trainer-0.9.4.data/scripts/plot_loss,sha256=O9ooioAJ-79-X06LosgqF8XOqQe-beRxYm3LsLunmoU,908
27
+ project_llm_trainer-0.9.4.data/scripts/plot_lr,sha256=w_7XR_x3KYYyboeOVAeu_I4fveLFI-C0wBmRrNlmWUI,894
28
+ project_llm_trainer-0.9.4.data/scripts/py_train,sha256=tOp9TquORQeU8XN5H7OVIk5O0Ypwi34p_GENxTwgwdk,265
29
+ project_llm_trainer-0.9.4.data/scripts/smart_train,sha256=N8dp2n7k6bghGczedBVwOdtf1O66oM_cNPh9QmZt0bM,914
30
+ project_llm_trainer-0.9.4.dist-info/METADATA,sha256=OAh8orL_7cliVbV9KK_2YxDheiD0NfmdJh-YU9_G92U,195
31
+ project_llm_trainer-0.9.4.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
32
+ project_llm_trainer-0.9.4.dist-info/top_level.txt,sha256=LtRFg28i0QIG7iBCD2t095oSco99LCtkijibS9cMGik,12
33
+ project_llm_trainer-0.9.4.dist-info/RECORD,,