xinference 0.13.2__py3-none-any.whl → 0.13.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (103) hide show
  1. xinference/__init__.py +0 -1
  2. xinference/_version.py +3 -3
  3. xinference/api/restful_api.py +30 -5
  4. xinference/client/restful/restful_client.py +18 -3
  5. xinference/constants.py +0 -4
  6. xinference/core/chat_interface.py +2 -2
  7. xinference/core/image_interface.py +6 -3
  8. xinference/core/model.py +9 -4
  9. xinference/core/scheduler.py +4 -4
  10. xinference/core/supervisor.py +2 -0
  11. xinference/core/worker.py +7 -0
  12. xinference/deploy/utils.py +6 -0
  13. xinference/model/audio/core.py +9 -4
  14. xinference/model/audio/cosyvoice.py +136 -0
  15. xinference/model/audio/model_spec.json +24 -0
  16. xinference/model/audio/model_spec_modelscope.json +27 -0
  17. xinference/model/core.py +25 -4
  18. xinference/model/embedding/core.py +88 -13
  19. xinference/model/embedding/model_spec.json +8 -0
  20. xinference/model/embedding/model_spec_modelscope.json +8 -0
  21. xinference/model/flexible/core.py +8 -2
  22. xinference/model/flexible/launchers/__init__.py +1 -0
  23. xinference/model/flexible/launchers/image_process_launcher.py +70 -0
  24. xinference/model/image/core.py +8 -5
  25. xinference/model/image/model_spec.json +36 -5
  26. xinference/model/image/model_spec_modelscope.json +21 -3
  27. xinference/model/image/stable_diffusion/core.py +36 -28
  28. xinference/model/llm/core.py +6 -4
  29. xinference/model/llm/ggml/llamacpp.py +7 -5
  30. xinference/model/llm/llm_family.json +802 -82
  31. xinference/model/llm/llm_family.py +6 -6
  32. xinference/model/llm/llm_family_csghub.json +39 -0
  33. xinference/model/llm/llm_family_modelscope.json +295 -47
  34. xinference/model/llm/mlx/core.py +7 -0
  35. xinference/model/llm/pytorch/chatglm.py +246 -5
  36. xinference/model/llm/pytorch/cogvlm2.py +1 -1
  37. xinference/model/llm/pytorch/deepseek_vl.py +2 -1
  38. xinference/model/llm/pytorch/falcon.py +2 -1
  39. xinference/model/llm/pytorch/llama_2.py +4 -2
  40. xinference/model/llm/pytorch/omnilmm.py +2 -1
  41. xinference/model/llm/pytorch/qwen_vl.py +2 -1
  42. xinference/model/llm/pytorch/vicuna.py +2 -1
  43. xinference/model/llm/pytorch/yi_vl.py +2 -1
  44. xinference/model/llm/sglang/core.py +12 -6
  45. xinference/model/llm/utils.py +78 -1
  46. xinference/model/llm/vllm/core.py +9 -5
  47. xinference/model/rerank/core.py +4 -3
  48. xinference/thirdparty/cosyvoice/__init__.py +0 -0
  49. xinference/thirdparty/cosyvoice/bin/__init__.py +0 -0
  50. xinference/thirdparty/cosyvoice/bin/inference.py +114 -0
  51. xinference/thirdparty/cosyvoice/bin/train.py +136 -0
  52. xinference/thirdparty/cosyvoice/cli/__init__.py +0 -0
  53. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +83 -0
  54. xinference/thirdparty/cosyvoice/cli/frontend.py +168 -0
  55. xinference/thirdparty/cosyvoice/cli/model.py +60 -0
  56. xinference/thirdparty/cosyvoice/dataset/__init__.py +0 -0
  57. xinference/thirdparty/cosyvoice/dataset/dataset.py +160 -0
  58. xinference/thirdparty/cosyvoice/dataset/processor.py +369 -0
  59. xinference/thirdparty/cosyvoice/flow/__init__.py +0 -0
  60. xinference/thirdparty/cosyvoice/flow/decoder.py +222 -0
  61. xinference/thirdparty/cosyvoice/flow/flow.py +135 -0
  62. xinference/thirdparty/cosyvoice/flow/flow_matching.py +138 -0
  63. xinference/thirdparty/cosyvoice/flow/length_regulator.py +49 -0
  64. xinference/thirdparty/cosyvoice/hifigan/__init__.py +0 -0
  65. xinference/thirdparty/cosyvoice/hifigan/f0_predictor.py +55 -0
  66. xinference/thirdparty/cosyvoice/hifigan/generator.py +391 -0
  67. xinference/thirdparty/cosyvoice/llm/__init__.py +0 -0
  68. xinference/thirdparty/cosyvoice/llm/llm.py +206 -0
  69. xinference/thirdparty/cosyvoice/transformer/__init__.py +0 -0
  70. xinference/thirdparty/cosyvoice/transformer/activation.py +84 -0
  71. xinference/thirdparty/cosyvoice/transformer/attention.py +326 -0
  72. xinference/thirdparty/cosyvoice/transformer/convolution.py +145 -0
  73. xinference/thirdparty/cosyvoice/transformer/decoder.py +396 -0
  74. xinference/thirdparty/cosyvoice/transformer/decoder_layer.py +132 -0
  75. xinference/thirdparty/cosyvoice/transformer/embedding.py +293 -0
  76. xinference/thirdparty/cosyvoice/transformer/encoder.py +472 -0
  77. xinference/thirdparty/cosyvoice/transformer/encoder_layer.py +236 -0
  78. xinference/thirdparty/cosyvoice/transformer/label_smoothing_loss.py +96 -0
  79. xinference/thirdparty/cosyvoice/transformer/positionwise_feed_forward.py +115 -0
  80. xinference/thirdparty/cosyvoice/transformer/subsampling.py +383 -0
  81. xinference/thirdparty/cosyvoice/utils/__init__.py +0 -0
  82. xinference/thirdparty/cosyvoice/utils/class_utils.py +70 -0
  83. xinference/thirdparty/cosyvoice/utils/common.py +103 -0
  84. xinference/thirdparty/cosyvoice/utils/executor.py +110 -0
  85. xinference/thirdparty/cosyvoice/utils/file_utils.py +41 -0
  86. xinference/thirdparty/cosyvoice/utils/frontend_utils.py +125 -0
  87. xinference/thirdparty/cosyvoice/utils/mask.py +227 -0
  88. xinference/thirdparty/cosyvoice/utils/scheduler.py +739 -0
  89. xinference/thirdparty/cosyvoice/utils/train_utils.py +289 -0
  90. xinference/web/ui/build/asset-manifest.json +3 -3
  91. xinference/web/ui/build/index.html +1 -1
  92. xinference/web/ui/build/static/js/{main.95c1d652.js → main.af906659.js} +3 -3
  93. xinference/web/ui/build/static/js/main.af906659.js.map +1 -0
  94. xinference/web/ui/node_modules/.cache/babel-loader/2cd5e4279ad7e13a1f41d486e9fca7756295bfad5bd77d90992f4ac3e10b496d.json +1 -0
  95. {xinference-0.13.2.dist-info → xinference-0.13.4.dist-info}/METADATA +39 -11
  96. {xinference-0.13.2.dist-info → xinference-0.13.4.dist-info}/RECORD +101 -57
  97. xinference/web/ui/build/static/js/main.95c1d652.js.map +0 -1
  98. xinference/web/ui/node_modules/.cache/babel-loader/709711edada3f1596b309d571285fd31f1c364d66f4425bc28723d0088cc351a.json +0 -1
  99. /xinference/web/ui/build/static/js/{main.95c1d652.js.LICENSE.txt → main.af906659.js.LICENSE.txt} +0 -0
  100. {xinference-0.13.2.dist-info → xinference-0.13.4.dist-info}/LICENSE +0 -0
  101. {xinference-0.13.2.dist-info → xinference-0.13.4.dist-info}/WHEEL +0 -0
  102. {xinference-0.13.2.dist-info → xinference-0.13.4.dist-info}/entry_points.txt +0 -0
  103. {xinference-0.13.2.dist-info → xinference-0.13.4.dist-info}/top_level.txt +0 -0
@@ -699,12 +699,12 @@ def _generate_model_file_names(
699
699
  def _merge_cached_files(
700
700
  cache_dir: str, input_file_names: List[str], output_file_name: str
701
701
  ):
702
- with open(os.path.join(cache_dir, output_file_name), "wb") as output_file:
703
- for file_name in input_file_names:
704
- logger.info(f"Merging file {file_name} into {output_file_name} ...")
705
-
706
- with open(os.path.join(cache_dir, file_name), "rb") as input_file:
707
- shutil.copyfileobj(input_file, output_file)
702
+ # now llama.cpp can find the gguf parts automatically
703
+ # we only need to provide the first part
704
+ # thus we create the symlink to the first part
705
+ symlink_local_file(
706
+ os.path.join(cache_dir, input_file_names[0]), cache_dir, output_file_name
707
+ )
708
708
 
709
709
  logger.info(f"Merge complete.")
710
710
 
@@ -62,5 +62,44 @@
62
62
  "<|im_end|>"
63
63
  ]
64
64
  }
65
+ },
66
+ {
67
+ "version": 1,
68
+ "context_length": 32768,
69
+ "model_name": "csg-wukong-chat-v0.1",
70
+ "model_lang": [
71
+ "en"
72
+ ],
73
+ "model_ability": [
74
+ "chat"
75
+ ],
76
+ "model_description": "csg-wukong-1B is a 1 billion-parameter small language model(SLM) pretrained on 1T tokens.",
77
+ "model_specs": [
78
+ {
79
+ "model_format": "pytorch",
80
+ "model_size_in_billions": 1,
81
+ "quantizations": [
82
+ "none"
83
+ ],
84
+ "model_id": "OpenCSG/csg-wukong-1B-chat-v0.1",
85
+ "model_hub": "csghub"
86
+ }
87
+ ],
88
+ "prompt_style": {
89
+ "style_name": "NO_COLON_TWO",
90
+ "system_prompt": "<|system|>\nYou are a creative super artificial intelligence assistant, possessing all the knowledge of humankind. Your name is csg-wukong, developed by OpenCSG. You need to understand and infer the true intentions of users based on the topics discussed in the chat history, and respond to user questions correctly as required. You enjoy responding to users with accurate and insightful answers. Please pay attention to the appropriate style and format when replying, try to avoid repetitive words and sentences, and keep your responses as concise and profound as possible. You carefully consider the context of the discussion when replying to users. When the user says \"continue,\" please proceed with the continuation of the previous assistant's response.</s>\n",
91
+ "roles": [
92
+ "<|user|>\n",
93
+ "<|assistant|>\n"
94
+ ],
95
+ "intra_message_sep": "</s>\n",
96
+ "inter_message_sep": "</s>\n",
97
+ "stop_token_ids": [
98
+ 2
99
+ ],
100
+ "stop": [
101
+ "</s>"
102
+ ]
103
+ }
65
104
  }
66
105
  ]
@@ -153,6 +153,178 @@
153
153
  ],
154
154
  "model_id": "LLM-Research/Meta-Llama-3-70B-Instruct",
155
155
  "model_hub": "modelscope"
156
+ },
157
+ {
158
+ "model_format": "gptq",
159
+ "model_size_in_billions": 8,
160
+ "quantizations": [
161
+ "Int4",
162
+ "Int8"
163
+ ],
164
+ "model_id": "swift/Meta-Llama-3-8B-Instruct-GPTQ-{quantization}",
165
+ "model_hub": "modelscope"
166
+ },
167
+ {
168
+ "model_format": "gptq",
169
+ "model_size_in_billions": 70,
170
+ "quantizations": [
171
+ "Int4",
172
+ "Int8"
173
+ ],
174
+ "model_id": "swift/Meta-Llama-3-70B-Instruct-GPTQ-{quantization}",
175
+ "model_hub": "modelscope"
176
+ }
177
+ ],
178
+ "prompt_style": {
179
+ "style_name": "LLAMA3",
180
+ "system_prompt": "You are a helpful assistant.",
181
+ "roles": [
182
+ "user",
183
+ "assistant"
184
+ ],
185
+ "intra_message_sep": "\n\n",
186
+ "inter_message_sep": "<|eot_id|>",
187
+ "stop_token_ids": [
188
+ 128001,
189
+ 128009
190
+ ],
191
+ "stop": [
192
+ "<|end_of_text|>",
193
+ "<|eot_id|>"
194
+ ]
195
+ }
196
+ },
197
+ {
198
+ "version": 1,
199
+ "context_length": 131072,
200
+ "model_name": "llama-3.1",
201
+ "model_lang": [
202
+ "en",
203
+ "de",
204
+ "fr",
205
+ "it",
206
+ "pt",
207
+ "hi",
208
+ "es",
209
+ "th"
210
+ ],
211
+ "model_ability": [
212
+ "generate"
213
+ ],
214
+ "model_description": "Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture",
215
+ "model_specs": [
216
+ {
217
+ "model_format": "pytorch",
218
+ "model_size_in_billions": 8,
219
+ "quantizations": [
220
+ "4-bit",
221
+ "8-bit",
222
+ "none"
223
+ ],
224
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B",
225
+ "model_hub": "modelscope"
226
+ },
227
+ {
228
+ "model_format": "pytorch",
229
+ "model_size_in_billions": 70,
230
+ "quantizations": [
231
+ "4-bit",
232
+ "8-bit",
233
+ "none"
234
+ ],
235
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B",
236
+ "model_hub": "modelscope"
237
+ }
238
+ ]
239
+ },
240
+ {
241
+ "version": 1,
242
+ "context_length": 131072,
243
+ "model_name": "llama-3.1-instruct",
244
+ "model_lang": [
245
+ "en",
246
+ "de",
247
+ "fr",
248
+ "it",
249
+ "pt",
250
+ "hi",
251
+ "es",
252
+ "th"
253
+ ],
254
+ "model_ability": [
255
+ "chat"
256
+ ],
257
+ "model_description": "The Llama 3.1 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
258
+ "model_specs": [
259
+ {
260
+ "model_format": "pytorch",
261
+ "model_size_in_billions": 8,
262
+ "quantizations": [
263
+ "none"
264
+ ],
265
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
266
+ "model_hub": "modelscope"
267
+ },
268
+ {
269
+ "model_format": "gptq",
270
+ "model_size_in_billions": 8,
271
+ "quantizations": [
272
+ "Int4"
273
+ ],
274
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4",
275
+ "model_hub": "modelscope"
276
+ },
277
+ {
278
+ "model_format": "awq",
279
+ "model_size_in_billions": 8,
280
+ "quantizations": [
281
+ "Int4"
282
+ ],
283
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
284
+ "model_hub": "modelscope"
285
+ },
286
+ {
287
+ "model_format": "ggufv2",
288
+ "model_size_in_billions": 8,
289
+ "quantizations": [
290
+ "Q3_K_L",
291
+ "Q4_K_M",
292
+ "Q5_K_M",
293
+ "Q6_K",
294
+ "Q8_0"
295
+ ],
296
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-GGUF",
297
+ "model_file_name_template": "Meta-Llama-3.1-8B-Instruct-{quantization}.gguf",
298
+ "model_hub": "modelscope"
299
+ },
300
+ {
301
+ "model_format": "pytorch",
302
+ "model_size_in_billions": 70,
303
+ "quantizations": [
304
+ "4-bit",
305
+ "8-bit",
306
+ "none"
307
+ ],
308
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct",
309
+ "model_hub": "modelscope"
310
+ },
311
+ {
312
+ "model_format": "gptq",
313
+ "model_size_in_billions": 70,
314
+ "quantizations": [
315
+ "Int4"
316
+ ],
317
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4",
318
+ "model_hub": "modelscope"
319
+ },
320
+ {
321
+ "model_format": "awq",
322
+ "model_size_in_billions": 70,
323
+ "quantizations": [
324
+ "Int4"
325
+ ],
326
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct-AWQ-INT4",
327
+ "model_hub": "modelscope"
156
328
  }
157
329
  ],
158
330
  "prompt_style": {
@@ -2078,6 +2250,119 @@
2078
2250
  ]
2079
2251
  }
2080
2252
  },
2253
+ {
2254
+ "version": 1,
2255
+ "context_length": 1024000,
2256
+ "model_name": "mistral-nemo-instruct",
2257
+ "model_lang": [
2258
+ "en",
2259
+ "fr",
2260
+ "de",
2261
+ "es",
2262
+ "it",
2263
+ "pt",
2264
+ "zh",
2265
+ "ru",
2266
+ "ja"
2267
+ ],
2268
+ "model_ability": [
2269
+ "chat"
2270
+ ],
2271
+ "model_description": "The Mistral-Nemo-Instruct-2407 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-Nemo-Base-2407",
2272
+ "model_specs": [
2273
+ {
2274
+ "model_format": "pytorch",
2275
+ "model_size_in_billions": 12,
2276
+ "quantizations": [
2277
+ "none"
2278
+ ],
2279
+ "model_id": "AI-ModelScope/Mistral-Nemo-Instruct-2407",
2280
+ "model_hub": "modelscope"
2281
+ },
2282
+ {
2283
+ "model_format": "gptq",
2284
+ "model_size_in_billions": 12,
2285
+ "quantizations": [
2286
+ "Int4"
2287
+ ],
2288
+ "model_id": "LLM-Research/Mistral-Nemo-Instruct-2407-gptq-4bit",
2289
+ "model_hub": "modelscope"
2290
+ }
2291
+ ],
2292
+ "prompt_style": {
2293
+ "style_name": "mistral-nemo",
2294
+ "system_prompt": "",
2295
+ "roles": [
2296
+ "[INST]",
2297
+ "[/INST]"
2298
+ ],
2299
+ "intra_message_sep": "",
2300
+ "inter_message_sep": "</s>",
2301
+ "stop_token_ids": [
2302
+ 2
2303
+ ],
2304
+ "stop": [
2305
+ "</s>"
2306
+ ]
2307
+ }
2308
+ },
2309
+ {
2310
+ "version": 1,
2311
+ "context_length": 131072,
2312
+ "model_name": "mistral-large-instruct",
2313
+ "model_lang": [
2314
+ "en",
2315
+ "fr",
2316
+ "de",
2317
+ "es",
2318
+ "it",
2319
+ "pt",
2320
+ "zh",
2321
+ "ru",
2322
+ "ja",
2323
+ "ko"
2324
+ ],
2325
+ "model_ability": [
2326
+ "chat"
2327
+ ],
2328
+ "model_description": "Mistral-Large-Instruct-2407 is an advanced dense Large Language Model (LLM) of 123B parameters with state-of-the-art reasoning, knowledge and coding capabilities.",
2329
+ "model_specs": [
2330
+ {
2331
+ "model_format": "pytorch",
2332
+ "model_size_in_billions": 123,
2333
+ "quantizations": [
2334
+ "none"
2335
+ ],
2336
+ "model_id": "LLM-Research/Mistral-Large-Instruct-2407",
2337
+ "model_hub": "modelscope"
2338
+ },
2339
+ {
2340
+ "model_format": "pytorch",
2341
+ "model_size_in_billions": 123,
2342
+ "quantizations": [
2343
+ "4-bit"
2344
+ ],
2345
+ "model_id": "LLM-Research/Mistral-Large-Instruct-2407-bnb-4bit",
2346
+ "model_hub": "modelscope"
2347
+ }
2348
+ ],
2349
+ "prompt_style": {
2350
+ "style_name": "mistral-nemo",
2351
+ "system_prompt": "",
2352
+ "roles": [
2353
+ "[INST]",
2354
+ "[/INST]"
2355
+ ],
2356
+ "intra_message_sep": "",
2357
+ "inter_message_sep": "</s>",
2358
+ "stop_token_ids": [
2359
+ 2
2360
+ ],
2361
+ "stop": [
2362
+ "</s>"
2363
+ ]
2364
+ }
2365
+ },
2081
2366
  {
2082
2367
  "version": 1,
2083
2368
  "context_length": 2048,
@@ -4433,6 +4718,16 @@
4433
4718
  "model_id": "AI-ModelScope/c4ai-command-r-v01",
4434
4719
  "model_revision": "master"
4435
4720
  },
4721
+ {
4722
+ "model_format": "pytorch",
4723
+ "model_size_in_billions": 35,
4724
+ "quantizations": [
4725
+ "4-bit"
4726
+ ],
4727
+ "model_hub": "modelscope",
4728
+ "model_id": "mirror013/c4ai-command-r-v01-4bit",
4729
+ "model_revision": "master"
4730
+ },
4436
4731
  {
4437
4732
  "model_format": "ggufv2",
4438
4733
  "model_size_in_billions": 35,
@@ -4473,53 +4768,6 @@
4473
4768
  ]
4474
4769
  }
4475
4770
  },
4476
- {
4477
- "version": 1,
4478
- "context_length": 131072,
4479
- "model_name": "c4ai-command-r-v01-4bit",
4480
- "model_lang": [
4481
- "en",
4482
- "fr",
4483
- "de",
4484
- "es",
4485
- "it",
4486
- "pt",
4487
- "ja",
4488
- "ko",
4489
- "zh",
4490
- "ar"
4491
- ],
4492
- "model_ability": [
4493
- "chat"
4494
- ],
4495
- "model_description": "This model is 4bit quantized version of C4AI Command-R using bitsandbytes.",
4496
- "model_specs": [
4497
- {
4498
- "model_format": "pytorch",
4499
- "model_size_in_billions": 35,
4500
- "quantizations": [
4501
- "none"
4502
- ],
4503
- "model_hub": "modelscope",
4504
- "model_id": "mirror013/c4ai-command-r-v01-4bit",
4505
- "model_revision": "master"
4506
- }
4507
- ],
4508
- "prompt_style": {
4509
- "style_name": "c4ai-command-r",
4510
- "system_prompt": "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.",
4511
- "roles": [
4512
- "<|USER_TOKEN|>",
4513
- "<|CHATBOT_TOKEN|>"
4514
- ],
4515
- "intra_message_sep": "",
4516
- "inter_message_sep": "<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|>",
4517
- "stop_token_ids": [
4518
- 6,
4519
- 255001
4520
- ]
4521
- }
4522
- },
4523
4771
  {
4524
4772
  "version": 1,
4525
4773
  "context_length": 128000,
@@ -101,6 +101,7 @@ class MLXModel(LLM):
101
101
 
102
102
  def _load_model(self, **kwargs):
103
103
  try:
104
+ import mlx.core as mx
104
105
  from mlx_lm import load
105
106
  except ImportError:
106
107
  error_message = "Failed to import module 'mlx_lm'"
@@ -122,6 +123,11 @@ class MLXModel(LLM):
122
123
  self._model_config,
123
124
  )
124
125
 
126
+ cache_limit_gb = kwargs.get("cache_limit_gb", None)
127
+ if cache_limit_gb:
128
+ logger.debug(f"Setting cache limit to {cache_limit_gb} GB")
129
+ mx.metal.set_cache_limit(cache_limit_gb * 1024 * 1024 * 1024)
130
+
125
131
  return load(
126
132
  self.model_path,
127
133
  tokenizer_config=tokenizer_config,
@@ -134,6 +140,7 @@ class MLXModel(LLM):
134
140
  "revision", self.model_spec.model_revision
135
141
  )
136
142
  kwargs["trust_remote_code"] = self._model_config.get("trust_remote_code")
143
+ kwargs["cache_limit_gb"] = self._model_config.pop("cache_limit_gb", None)
137
144
 
138
145
  self._model, self._tokenizer = self._load_model(**kwargs)
139
146