xinference 0.13.1__py3-none-any.whl → 0.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (82) hide show
  1. xinference/__init__.py +0 -1
  2. xinference/_version.py +3 -3
  3. xinference/api/restful_api.py +99 -5
  4. xinference/client/restful/restful_client.py +98 -1
  5. xinference/core/chat_interface.py +2 -2
  6. xinference/core/model.py +85 -26
  7. xinference/core/scheduler.py +4 -4
  8. xinference/model/audio/chattts.py +40 -8
  9. xinference/model/audio/core.py +5 -2
  10. xinference/model/audio/cosyvoice.py +136 -0
  11. xinference/model/audio/model_spec.json +24 -0
  12. xinference/model/audio/model_spec_modelscope.json +27 -0
  13. xinference/model/flexible/launchers/__init__.py +1 -0
  14. xinference/model/flexible/launchers/image_process_launcher.py +70 -0
  15. xinference/model/image/core.py +3 -0
  16. xinference/model/image/model_spec.json +21 -0
  17. xinference/model/image/stable_diffusion/core.py +49 -7
  18. xinference/model/llm/llm_family.json +1065 -106
  19. xinference/model/llm/llm_family.py +26 -6
  20. xinference/model/llm/llm_family_csghub.json +39 -0
  21. xinference/model/llm/llm_family_modelscope.json +460 -47
  22. xinference/model/llm/pytorch/chatglm.py +243 -5
  23. xinference/model/llm/pytorch/cogvlm2.py +1 -1
  24. xinference/model/llm/sglang/core.py +7 -2
  25. xinference/model/llm/utils.py +78 -1
  26. xinference/model/llm/vllm/core.py +11 -0
  27. xinference/thirdparty/cosyvoice/__init__.py +0 -0
  28. xinference/thirdparty/cosyvoice/bin/__init__.py +0 -0
  29. xinference/thirdparty/cosyvoice/bin/inference.py +114 -0
  30. xinference/thirdparty/cosyvoice/bin/train.py +136 -0
  31. xinference/thirdparty/cosyvoice/cli/__init__.py +0 -0
  32. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +83 -0
  33. xinference/thirdparty/cosyvoice/cli/frontend.py +168 -0
  34. xinference/thirdparty/cosyvoice/cli/model.py +60 -0
  35. xinference/thirdparty/cosyvoice/dataset/__init__.py +0 -0
  36. xinference/thirdparty/cosyvoice/dataset/dataset.py +160 -0
  37. xinference/thirdparty/cosyvoice/dataset/processor.py +369 -0
  38. xinference/thirdparty/cosyvoice/flow/__init__.py +0 -0
  39. xinference/thirdparty/cosyvoice/flow/decoder.py +222 -0
  40. xinference/thirdparty/cosyvoice/flow/flow.py +135 -0
  41. xinference/thirdparty/cosyvoice/flow/flow_matching.py +138 -0
  42. xinference/thirdparty/cosyvoice/flow/length_regulator.py +49 -0
  43. xinference/thirdparty/cosyvoice/hifigan/__init__.py +0 -0
  44. xinference/thirdparty/cosyvoice/hifigan/f0_predictor.py +55 -0
  45. xinference/thirdparty/cosyvoice/hifigan/generator.py +391 -0
  46. xinference/thirdparty/cosyvoice/llm/__init__.py +0 -0
  47. xinference/thirdparty/cosyvoice/llm/llm.py +206 -0
  48. xinference/thirdparty/cosyvoice/transformer/__init__.py +0 -0
  49. xinference/thirdparty/cosyvoice/transformer/activation.py +84 -0
  50. xinference/thirdparty/cosyvoice/transformer/attention.py +326 -0
  51. xinference/thirdparty/cosyvoice/transformer/convolution.py +145 -0
  52. xinference/thirdparty/cosyvoice/transformer/decoder.py +396 -0
  53. xinference/thirdparty/cosyvoice/transformer/decoder_layer.py +132 -0
  54. xinference/thirdparty/cosyvoice/transformer/embedding.py +293 -0
  55. xinference/thirdparty/cosyvoice/transformer/encoder.py +472 -0
  56. xinference/thirdparty/cosyvoice/transformer/encoder_layer.py +236 -0
  57. xinference/thirdparty/cosyvoice/transformer/label_smoothing_loss.py +96 -0
  58. xinference/thirdparty/cosyvoice/transformer/positionwise_feed_forward.py +115 -0
  59. xinference/thirdparty/cosyvoice/transformer/subsampling.py +383 -0
  60. xinference/thirdparty/cosyvoice/utils/__init__.py +0 -0
  61. xinference/thirdparty/cosyvoice/utils/class_utils.py +70 -0
  62. xinference/thirdparty/cosyvoice/utils/common.py +103 -0
  63. xinference/thirdparty/cosyvoice/utils/executor.py +110 -0
  64. xinference/thirdparty/cosyvoice/utils/file_utils.py +41 -0
  65. xinference/thirdparty/cosyvoice/utils/frontend_utils.py +125 -0
  66. xinference/thirdparty/cosyvoice/utils/mask.py +227 -0
  67. xinference/thirdparty/cosyvoice/utils/scheduler.py +739 -0
  68. xinference/thirdparty/cosyvoice/utils/train_utils.py +289 -0
  69. xinference/web/ui/build/asset-manifest.json +3 -3
  70. xinference/web/ui/build/index.html +1 -1
  71. xinference/web/ui/build/static/js/{main.95c1d652.js → main.2ef0cfaf.js} +3 -3
  72. xinference/web/ui/build/static/js/main.2ef0cfaf.js.map +1 -0
  73. xinference/web/ui/node_modules/.cache/babel-loader/b6807ecc0c231fea699533518a0eb2a2bf68a081ce00d452be40600dbffa17a7.json +1 -0
  74. {xinference-0.13.1.dist-info → xinference-0.13.3.dist-info}/METADATA +18 -8
  75. {xinference-0.13.1.dist-info → xinference-0.13.3.dist-info}/RECORD +80 -36
  76. xinference/web/ui/build/static/js/main.95c1d652.js.map +0 -1
  77. xinference/web/ui/node_modules/.cache/babel-loader/709711edada3f1596b309d571285fd31f1c364d66f4425bc28723d0088cc351a.json +0 -1
  78. /xinference/web/ui/build/static/js/{main.95c1d652.js.LICENSE.txt → main.2ef0cfaf.js.LICENSE.txt} +0 -0
  79. {xinference-0.13.1.dist-info → xinference-0.13.3.dist-info}/LICENSE +0 -0
  80. {xinference-0.13.1.dist-info → xinference-0.13.3.dist-info}/WHEEL +0 -0
  81. {xinference-0.13.1.dist-info → xinference-0.13.3.dist-info}/entry_points.txt +0 -0
  82. {xinference-0.13.1.dist-info → xinference-0.13.3.dist-info}/top_level.txt +0 -0
@@ -554,16 +554,36 @@ def _get_cache_dir(
554
554
  quant_suffix = q
555
555
  break
556
556
 
557
- cache_dir_name = (
557
+ # some model name includes ".", e.g. qwen1.5-chat
558
+ # if the model does not require trust_remote_code, it's OK
559
+ # because no need to import modeling_xxx.py from the path
560
+ # but when the model need to trust_remote_code,
561
+ # e.g. internlm2.5-chat, the import will fail,
562
+ # but before the model may have been downloaded,
563
+ # thus we check it first, if exist, return it,
564
+ # otherwise, we replace the "." with "_" in model name
565
+ old_cache_dir_name = (
558
566
  f"{llm_family.model_name}-{llm_spec.model_format}"
559
567
  f"-{llm_spec.model_size_in_billions}b"
560
568
  )
561
569
  if quant_suffix:
562
- cache_dir_name += f"-{quant_suffix}"
563
- cache_dir = os.path.realpath(os.path.join(XINFERENCE_CACHE_DIR, cache_dir_name))
564
- if create_if_not_exist and not os.path.exists(cache_dir):
565
- os.makedirs(cache_dir, exist_ok=True)
566
- return cache_dir
570
+ old_cache_dir_name += f"-{quant_suffix}"
571
+ old_cache_dir = os.path.realpath(
572
+ os.path.join(XINFERENCE_CACHE_DIR, old_cache_dir_name)
573
+ )
574
+ if os.path.exists(old_cache_dir):
575
+ return old_cache_dir
576
+ else:
577
+ cache_dir_name = (
578
+ f"{llm_family.model_name.replace('.', '_')}-{llm_spec.model_format}"
579
+ f"-{llm_spec.model_size_in_billions}b"
580
+ )
581
+ if quant_suffix:
582
+ cache_dir_name += f"-{quant_suffix}"
583
+ cache_dir = os.path.realpath(os.path.join(XINFERENCE_CACHE_DIR, cache_dir_name))
584
+ if create_if_not_exist and not os.path.exists(cache_dir):
585
+ os.makedirs(cache_dir, exist_ok=True)
586
+ return cache_dir
567
587
 
568
588
 
569
589
  def _get_meta_path(
@@ -62,5 +62,44 @@
62
62
  "<|im_end|>"
63
63
  ]
64
64
  }
65
+ },
66
+ {
67
+ "version": 1,
68
+ "context_length": 32768,
69
+ "model_name": "csg-wukong-chat-v0.1",
70
+ "model_lang": [
71
+ "en"
72
+ ],
73
+ "model_ability": [
74
+ "chat"
75
+ ],
76
+ "model_description": "csg-wukong-1B is a 1 billion-parameter small language model(SLM) pretrained on 1T tokens.",
77
+ "model_specs": [
78
+ {
79
+ "model_format": "pytorch",
80
+ "model_size_in_billions": 1,
81
+ "quantizations": [
82
+ "none"
83
+ ],
84
+ "model_id": "OpenCSG/csg-wukong-1B-chat-v0.1",
85
+ "model_hub": "csghub"
86
+ }
87
+ ],
88
+ "prompt_style": {
89
+ "style_name": "NO_COLON_TWO",
90
+ "system_prompt": "<|system|>\nYou are a creative super artificial intelligence assistant, possessing all the knowledge of humankind. Your name is csg-wukong, developed by OpenCSG. You need to understand and infer the true intentions of users based on the topics discussed in the chat history, and respond to user questions correctly as required. You enjoy responding to users with accurate and insightful answers. Please pay attention to the appropriate style and format when replying, try to avoid repetitive words and sentences, and keep your responses as concise and profound as possible. You carefully consider the context of the discussion when replying to users. When the user says \"continue,\" please proceed with the continuation of the previous assistant's response.</s>\n",
91
+ "roles": [
92
+ "<|user|>\n",
93
+ "<|assistant|>\n"
94
+ ],
95
+ "intra_message_sep": "</s>\n",
96
+ "inter_message_sep": "</s>\n",
97
+ "stop_token_ids": [
98
+ 2
99
+ ],
100
+ "stop": [
101
+ "</s>"
102
+ ]
103
+ }
65
104
  }
66
105
  ]
@@ -153,6 +153,178 @@
153
153
  ],
154
154
  "model_id": "LLM-Research/Meta-Llama-3-70B-Instruct",
155
155
  "model_hub": "modelscope"
156
+ },
157
+ {
158
+ "model_format": "gptq",
159
+ "model_size_in_billions": 8,
160
+ "quantizations": [
161
+ "Int4",
162
+ "Int8"
163
+ ],
164
+ "model_id": "swift/Meta-Llama-3-8B-Instruct-GPTQ-{quantization}",
165
+ "model_hub": "modelscope"
166
+ },
167
+ {
168
+ "model_format": "gptq",
169
+ "model_size_in_billions": 70,
170
+ "quantizations": [
171
+ "Int4",
172
+ "Int8"
173
+ ],
174
+ "model_id": "swift/Meta-Llama-3-70B-Instruct-GPTQ-{quantization}",
175
+ "model_hub": "modelscope"
176
+ }
177
+ ],
178
+ "prompt_style": {
179
+ "style_name": "LLAMA3",
180
+ "system_prompt": "You are a helpful assistant.",
181
+ "roles": [
182
+ "user",
183
+ "assistant"
184
+ ],
185
+ "intra_message_sep": "\n\n",
186
+ "inter_message_sep": "<|eot_id|>",
187
+ "stop_token_ids": [
188
+ 128001,
189
+ 128009
190
+ ],
191
+ "stop": [
192
+ "<|end_of_text|>",
193
+ "<|eot_id|>"
194
+ ]
195
+ }
196
+ },
197
+ {
198
+ "version": 1,
199
+ "context_length": 131072,
200
+ "model_name": "llama-3.1",
201
+ "model_lang": [
202
+ "en",
203
+ "de",
204
+ "fr",
205
+ "it",
206
+ "pt",
207
+ "hi",
208
+ "es",
209
+ "th"
210
+ ],
211
+ "model_ability": [
212
+ "generate"
213
+ ],
214
+ "model_description": "Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture",
215
+ "model_specs": [
216
+ {
217
+ "model_format": "pytorch",
218
+ "model_size_in_billions": 8,
219
+ "quantizations": [
220
+ "4-bit",
221
+ "8-bit",
222
+ "none"
223
+ ],
224
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B",
225
+ "model_hub": "modelscope"
226
+ },
227
+ {
228
+ "model_format": "pytorch",
229
+ "model_size_in_billions": 70,
230
+ "quantizations": [
231
+ "4-bit",
232
+ "8-bit",
233
+ "none"
234
+ ],
235
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B",
236
+ "model_hub": "modelscope"
237
+ }
238
+ ]
239
+ },
240
+ {
241
+ "version": 1,
242
+ "context_length": 131072,
243
+ "model_name": "llama-3.1-instruct",
244
+ "model_lang": [
245
+ "en",
246
+ "de",
247
+ "fr",
248
+ "it",
249
+ "pt",
250
+ "hi",
251
+ "es",
252
+ "th"
253
+ ],
254
+ "model_ability": [
255
+ "chat"
256
+ ],
257
+ "model_description": "The Llama 3.1 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
258
+ "model_specs": [
259
+ {
260
+ "model_format": "pytorch",
261
+ "model_size_in_billions": 8,
262
+ "quantizations": [
263
+ "none"
264
+ ],
265
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
266
+ "model_hub": "modelscope"
267
+ },
268
+ {
269
+ "model_format": "gptq",
270
+ "model_size_in_billions": 8,
271
+ "quantizations": [
272
+ "Int4"
273
+ ],
274
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4",
275
+ "model_hub": "modelscope"
276
+ },
277
+ {
278
+ "model_format": "awq",
279
+ "model_size_in_billions": 8,
280
+ "quantizations": [
281
+ "Int4"
282
+ ],
283
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
284
+ "model_hub": "modelscope"
285
+ },
286
+ {
287
+ "model_format": "ggufv2",
288
+ "model_size_in_billions": 8,
289
+ "quantizations": [
290
+ "Q3_K_L",
291
+ "Q4_K_M",
292
+ "Q5_K_M",
293
+ "Q6_K",
294
+ "Q8_0"
295
+ ],
296
+ "model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-GGUF",
297
+ "model_file_name_template": "Meta-Llama-3.1-8B-Instruct-{quantization}.gguf",
298
+ "model_hub": "modelscope"
299
+ },
300
+ {
301
+ "model_format": "pytorch",
302
+ "model_size_in_billions": 70,
303
+ "quantizations": [
304
+ "4-bit",
305
+ "8-bit",
306
+ "none"
307
+ ],
308
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct",
309
+ "model_hub": "modelscope"
310
+ },
311
+ {
312
+ "model_format": "gptq",
313
+ "model_size_in_billions": 70,
314
+ "quantizations": [
315
+ "Int4"
316
+ ],
317
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4",
318
+ "model_hub": "modelscope"
319
+ },
320
+ {
321
+ "model_format": "awq",
322
+ "model_size_in_billions": 70,
323
+ "quantizations": [
324
+ "Int4"
325
+ ],
326
+ "model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct-AWQ-INT4",
327
+ "model_hub": "modelscope"
156
328
  }
157
329
  ],
158
330
  "prompt_style": {
@@ -688,6 +860,66 @@
688
860
  ]
689
861
  }
690
862
  },
863
+ {
864
+ "version": 1,
865
+ "context_length": 131072,
866
+ "model_name": "codegeex4",
867
+ "model_lang": [
868
+ "en",
869
+ "zh"
870
+ ],
871
+ "model_ability": [
872
+ "chat"
873
+ ],
874
+ "model_description": "the open-source version of the latest CodeGeeX4 model series",
875
+ "model_specs": [
876
+ {
877
+ "model_format": "pytorch",
878
+ "model_size_in_billions": 9,
879
+ "quantizations": [
880
+ "4-bit",
881
+ "8-bit",
882
+ "none"
883
+ ],
884
+ "model_id": "ZhipuAI/codegeex4-all-9b",
885
+ "model_hub": "modelscope",
886
+ "model_revision": "master"
887
+ },
888
+ {
889
+ "model_format": "ggufv2",
890
+ "model_size_in_billions": 9,
891
+ "quantizations": [
892
+ "IQ2_M",
893
+ "IQ3_M",
894
+ "Q4_K_M",
895
+ "Q5_K_M",
896
+ "Q6_K_L",
897
+ "Q8_0"
898
+ ],
899
+ "model_file_name_template": "codegeex4-all-9b-{quantization}.gguf",
900
+ "model_id": "ZhipuAI/codegeex4-all-9b-GGUF",
901
+ "model_hub": "modelscope"
902
+ }
903
+ ],
904
+ "prompt_style": {
905
+ "style_name": "CHATGLM3",
906
+ "system_prompt": "",
907
+ "roles": [
908
+ "user",
909
+ "assistant"
910
+ ],
911
+ "stop_token_ids": [
912
+ 151329,
913
+ 151336,
914
+ 151338
915
+ ],
916
+ "stop": [
917
+ "<|endoftext|>",
918
+ "<|user|>",
919
+ "<|observation|>"
920
+ ]
921
+ }
922
+ },
691
923
  {
692
924
  "version": 1,
693
925
  "context_length": 2048,
@@ -928,6 +1160,88 @@
928
1160
  ]
929
1161
  }
930
1162
  },
1163
+ {
1164
+ "version": 1,
1165
+ "context_length": 32768,
1166
+ "model_name": "internlm2.5-chat",
1167
+ "model_lang": [
1168
+ "en",
1169
+ "zh"
1170
+ ],
1171
+ "model_ability": [
1172
+ "chat"
1173
+ ],
1174
+ "model_description": "InternLM2.5 series of the InternLM model.",
1175
+ "model_specs": [
1176
+ {
1177
+ "model_format": "pytorch",
1178
+ "model_size_in_billions": 7,
1179
+ "quantizations": [
1180
+ "none"
1181
+ ],
1182
+ "model_id": "Shanghai_AI_Laboratory/internlm2_5-7b-chat",
1183
+ "model_hub": "modelscope"
1184
+ }
1185
+ ],
1186
+ "prompt_style": {
1187
+ "style_name": "INTERNLM2",
1188
+ "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
1189
+ "roles": [
1190
+ "<|im_start|>user",
1191
+ "<|im_start|>assistant"
1192
+ ],
1193
+ "intra_message_sep": "<|im_end|>",
1194
+ "stop_token_ids": [
1195
+ 2,
1196
+ 92542
1197
+ ],
1198
+ "stop": [
1199
+ "</s>",
1200
+ "<|im_end|>"
1201
+ ]
1202
+ }
1203
+ },
1204
+ {
1205
+ "version": 1,
1206
+ "context_length": 262144,
1207
+ "model_name": "internlm2.5-chat-1m",
1208
+ "model_lang": [
1209
+ "en",
1210
+ "zh"
1211
+ ],
1212
+ "model_ability": [
1213
+ "chat"
1214
+ ],
1215
+ "model_description": "InternLM2.5 series of the InternLM model supports 1M long-context",
1216
+ "model_specs": [
1217
+ {
1218
+ "model_format": "pytorch",
1219
+ "model_size_in_billions": 7,
1220
+ "quantizations": [
1221
+ "none"
1222
+ ],
1223
+ "model_id": "Shanghai_AI_Laboratory/internlm2_5-7b-chat-1m",
1224
+ "model_hub": "modelscope"
1225
+ }
1226
+ ],
1227
+ "prompt_style": {
1228
+ "style_name": "INTERNLM2",
1229
+ "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
1230
+ "roles": [
1231
+ "<|im_start|>user",
1232
+ "<|im_start|>assistant"
1233
+ ],
1234
+ "intra_message_sep": "<|im_end|>",
1235
+ "stop_token_ids": [
1236
+ 2,
1237
+ 92542
1238
+ ],
1239
+ "stop": [
1240
+ "</s>",
1241
+ "<|im_end|>"
1242
+ ]
1243
+ }
1244
+ },
931
1245
  {
932
1246
  "version": 1,
933
1247
  "context_length": 100000,
@@ -1936,6 +2250,119 @@
1936
2250
  ]
1937
2251
  }
1938
2252
  },
2253
+ {
2254
+ "version": 1,
2255
+ "context_length": 1024000,
2256
+ "model_name": "mistral-nemo-instruct",
2257
+ "model_lang": [
2258
+ "en",
2259
+ "fr",
2260
+ "de",
2261
+ "es",
2262
+ "it",
2263
+ "pt",
2264
+ "zh",
2265
+ "ru",
2266
+ "ja"
2267
+ ],
2268
+ "model_ability": [
2269
+ "chat"
2270
+ ],
2271
+ "model_description": "The Mistral-Nemo-Instruct-2407 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-Nemo-Base-2407",
2272
+ "model_specs": [
2273
+ {
2274
+ "model_format": "pytorch",
2275
+ "model_size_in_billions": 12,
2276
+ "quantizations": [
2277
+ "none"
2278
+ ],
2279
+ "model_id": "AI-ModelScope/Mistral-Nemo-Instruct-2407",
2280
+ "model_hub": "modelscope"
2281
+ },
2282
+ {
2283
+ "model_format": "gptq",
2284
+ "model_size_in_billions": 12,
2285
+ "quantizations": [
2286
+ "Int4"
2287
+ ],
2288
+ "model_id": "LLM-Research/Mistral-Nemo-Instruct-2407-gptq-4bit",
2289
+ "model_hub": "modelscope"
2290
+ }
2291
+ ],
2292
+ "prompt_style": {
2293
+ "style_name": "mistral-nemo",
2294
+ "system_prompt": "",
2295
+ "roles": [
2296
+ "[INST]",
2297
+ "[/INST]"
2298
+ ],
2299
+ "intra_message_sep": "",
2300
+ "inter_message_sep": "</s>",
2301
+ "stop_token_ids": [
2302
+ 2
2303
+ ],
2304
+ "stop": [
2305
+ "</s>"
2306
+ ]
2307
+ }
2308
+ },
2309
+ {
2310
+ "version": 1,
2311
+ "context_length": 131072,
2312
+ "model_name": "mistral-large-instruct",
2313
+ "model_lang": [
2314
+ "en",
2315
+ "fr",
2316
+ "de",
2317
+ "es",
2318
+ "it",
2319
+ "pt",
2320
+ "zh",
2321
+ "ru",
2322
+ "ja",
2323
+ "ko"
2324
+ ],
2325
+ "model_ability": [
2326
+ "chat"
2327
+ ],
2328
+ "model_description": "Mistral-Large-Instruct-2407 is an advanced dense Large Language Model (LLM) of 123B parameters with state-of-the-art reasoning, knowledge and coding capabilities.",
2329
+ "model_specs": [
2330
+ {
2331
+ "model_format": "pytorch",
2332
+ "model_size_in_billions": 123,
2333
+ "quantizations": [
2334
+ "none"
2335
+ ],
2336
+ "model_id": "LLM-Research/Mistral-Large-Instruct-2407",
2337
+ "model_hub": "modelscope"
2338
+ },
2339
+ {
2340
+ "model_format": "pytorch",
2341
+ "model_size_in_billions": 123,
2342
+ "quantizations": [
2343
+ "4-bit"
2344
+ ],
2345
+ "model_id": "LLM-Research/Mistral-Large-Instruct-2407-bnb-4bit",
2346
+ "model_hub": "modelscope"
2347
+ }
2348
+ ],
2349
+ "prompt_style": {
2350
+ "style_name": "mistral-nemo",
2351
+ "system_prompt": "",
2352
+ "roles": [
2353
+ "[INST]",
2354
+ "[/INST]"
2355
+ ],
2356
+ "intra_message_sep": "",
2357
+ "inter_message_sep": "</s>",
2358
+ "stop_token_ids": [
2359
+ 2
2360
+ ],
2361
+ "stop": [
2362
+ "</s>"
2363
+ ]
2364
+ }
2365
+ },
1939
2366
  {
1940
2367
  "version": 1,
1941
2368
  "context_length": 2048,
@@ -3799,6 +4226,29 @@
3799
4226
  ],
3800
4227
  "model_id": "AI-ModelScope/gemma-2-27b-it",
3801
4228
  "model_hub": "modelscope"
4229
+ },
4230
+ {
4231
+ "model_format": "ggufv2",
4232
+ "model_size_in_billions": 9,
4233
+ "quantizations": [
4234
+ "Q2_K",
4235
+ "Q3_K_L",
4236
+ "Q3_K_M",
4237
+ "Q3_K_S",
4238
+ "Q4_K_L",
4239
+ "Q4_K_M",
4240
+ "Q4_K_S",
4241
+ "Q5_K_L",
4242
+ "Q5_K_M",
4243
+ "Q5_K_S",
4244
+ "Q6_K",
4245
+ "Q6_K_L",
4246
+ "Q8_0",
4247
+ "f32"
4248
+ ],
4249
+ "model_id": "LLM-Research/gemma-2-9b-it-GGUF",
4250
+ "model_file_name_template": "gemma-2-9b-it-{quantization}.gguf",
4251
+ "model_hub": "modelscope"
3802
4252
  }
3803
4253
  ],
3804
4254
  "prompt_style": {
@@ -4268,6 +4718,16 @@
4268
4718
  "model_id": "AI-ModelScope/c4ai-command-r-v01",
4269
4719
  "model_revision": "master"
4270
4720
  },
4721
+ {
4722
+ "model_format": "pytorch",
4723
+ "model_size_in_billions": 35,
4724
+ "quantizations": [
4725
+ "4-bit"
4726
+ ],
4727
+ "model_hub": "modelscope",
4728
+ "model_id": "mirror013/c4ai-command-r-v01-4bit",
4729
+ "model_revision": "master"
4730
+ },
4271
4731
  {
4272
4732
  "model_format": "ggufv2",
4273
4733
  "model_size_in_billions": 35,
@@ -4308,53 +4768,6 @@
4308
4768
  ]
4309
4769
  }
4310
4770
  },
4311
- {
4312
- "version": 1,
4313
- "context_length": 131072,
4314
- "model_name": "c4ai-command-r-v01-4bit",
4315
- "model_lang": [
4316
- "en",
4317
- "fr",
4318
- "de",
4319
- "es",
4320
- "it",
4321
- "pt",
4322
- "ja",
4323
- "ko",
4324
- "zh",
4325
- "ar"
4326
- ],
4327
- "model_ability": [
4328
- "chat"
4329
- ],
4330
- "model_description": "This model is 4bit quantized version of C4AI Command-R using bitsandbytes.",
4331
- "model_specs": [
4332
- {
4333
- "model_format": "pytorch",
4334
- "model_size_in_billions": 35,
4335
- "quantizations": [
4336
- "none"
4337
- ],
4338
- "model_hub": "modelscope",
4339
- "model_id": "mirror013/c4ai-command-r-v01-4bit",
4340
- "model_revision": "master"
4341
- }
4342
- ],
4343
- "prompt_style": {
4344
- "style_name": "c4ai-command-r",
4345
- "system_prompt": "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.",
4346
- "roles": [
4347
- "<|USER_TOKEN|>",
4348
- "<|CHATBOT_TOKEN|>"
4349
- ],
4350
- "intra_message_sep": "",
4351
- "inter_message_sep": "<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|>",
4352
- "stop_token_ids": [
4353
- 6,
4354
- 255001
4355
- ]
4356
- }
4357
- },
4358
4771
  {
4359
4772
  "version": 1,
4360
4773
  "context_length": 128000,