xinference 0.13.2__py3-none-any.whl → 0.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/__init__.py +0 -1
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +26 -4
- xinference/client/restful/restful_client.py +16 -1
- xinference/core/chat_interface.py +2 -2
- xinference/core/model.py +8 -3
- xinference/core/scheduler.py +4 -4
- xinference/model/audio/core.py +5 -2
- xinference/model/audio/cosyvoice.py +136 -0
- xinference/model/audio/model_spec.json +24 -0
- xinference/model/audio/model_spec_modelscope.json +27 -0
- xinference/model/flexible/launchers/__init__.py +1 -0
- xinference/model/flexible/launchers/image_process_launcher.py +70 -0
- xinference/model/image/model_spec.json +7 -0
- xinference/model/image/stable_diffusion/core.py +6 -1
- xinference/model/llm/llm_family.json +802 -82
- xinference/model/llm/llm_family_csghub.json +39 -0
- xinference/model/llm/llm_family_modelscope.json +295 -47
- xinference/model/llm/pytorch/chatglm.py +243 -5
- xinference/model/llm/pytorch/cogvlm2.py +1 -1
- xinference/model/llm/utils.py +78 -1
- xinference/model/llm/vllm/core.py +8 -0
- xinference/thirdparty/cosyvoice/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/bin/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/bin/inference.py +114 -0
- xinference/thirdparty/cosyvoice/bin/train.py +136 -0
- xinference/thirdparty/cosyvoice/cli/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/cli/cosyvoice.py +83 -0
- xinference/thirdparty/cosyvoice/cli/frontend.py +168 -0
- xinference/thirdparty/cosyvoice/cli/model.py +60 -0
- xinference/thirdparty/cosyvoice/dataset/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/dataset/dataset.py +160 -0
- xinference/thirdparty/cosyvoice/dataset/processor.py +369 -0
- xinference/thirdparty/cosyvoice/flow/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/flow/decoder.py +222 -0
- xinference/thirdparty/cosyvoice/flow/flow.py +135 -0
- xinference/thirdparty/cosyvoice/flow/flow_matching.py +138 -0
- xinference/thirdparty/cosyvoice/flow/length_regulator.py +49 -0
- xinference/thirdparty/cosyvoice/hifigan/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/hifigan/f0_predictor.py +55 -0
- xinference/thirdparty/cosyvoice/hifigan/generator.py +391 -0
- xinference/thirdparty/cosyvoice/llm/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/llm/llm.py +206 -0
- xinference/thirdparty/cosyvoice/transformer/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/transformer/activation.py +84 -0
- xinference/thirdparty/cosyvoice/transformer/attention.py +326 -0
- xinference/thirdparty/cosyvoice/transformer/convolution.py +145 -0
- xinference/thirdparty/cosyvoice/transformer/decoder.py +396 -0
- xinference/thirdparty/cosyvoice/transformer/decoder_layer.py +132 -0
- xinference/thirdparty/cosyvoice/transformer/embedding.py +293 -0
- xinference/thirdparty/cosyvoice/transformer/encoder.py +472 -0
- xinference/thirdparty/cosyvoice/transformer/encoder_layer.py +236 -0
- xinference/thirdparty/cosyvoice/transformer/label_smoothing_loss.py +96 -0
- xinference/thirdparty/cosyvoice/transformer/positionwise_feed_forward.py +115 -0
- xinference/thirdparty/cosyvoice/transformer/subsampling.py +383 -0
- xinference/thirdparty/cosyvoice/utils/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/utils/class_utils.py +70 -0
- xinference/thirdparty/cosyvoice/utils/common.py +103 -0
- xinference/thirdparty/cosyvoice/utils/executor.py +110 -0
- xinference/thirdparty/cosyvoice/utils/file_utils.py +41 -0
- xinference/thirdparty/cosyvoice/utils/frontend_utils.py +125 -0
- xinference/thirdparty/cosyvoice/utils/mask.py +227 -0
- xinference/thirdparty/cosyvoice/utils/scheduler.py +739 -0
- xinference/thirdparty/cosyvoice/utils/train_utils.py +289 -0
- xinference/web/ui/build/asset-manifest.json +3 -3
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/js/{main.95c1d652.js → main.2ef0cfaf.js} +3 -3
- xinference/web/ui/build/static/js/main.2ef0cfaf.js.map +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/b6807ecc0c231fea699533518a0eb2a2bf68a081ce00d452be40600dbffa17a7.json +1 -0
- {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/METADATA +16 -8
- {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/RECORD +76 -32
- xinference/web/ui/build/static/js/main.95c1d652.js.map +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/709711edada3f1596b309d571285fd31f1c364d66f4425bc28723d0088cc351a.json +0 -1
- /xinference/web/ui/build/static/js/{main.95c1d652.js.LICENSE.txt → main.2ef0cfaf.js.LICENSE.txt} +0 -0
- {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/LICENSE +0 -0
- {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/WHEEL +0 -0
- {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/entry_points.txt +0 -0
- {xinference-0.13.2.dist-info → xinference-0.13.3.dist-info}/top_level.txt +0 -0
|
@@ -62,5 +62,44 @@
|
|
|
62
62
|
"<|im_end|>"
|
|
63
63
|
]
|
|
64
64
|
}
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
"version": 1,
|
|
68
|
+
"context_length": 32768,
|
|
69
|
+
"model_name": "csg-wukong-chat-v0.1",
|
|
70
|
+
"model_lang": [
|
|
71
|
+
"en"
|
|
72
|
+
],
|
|
73
|
+
"model_ability": [
|
|
74
|
+
"chat"
|
|
75
|
+
],
|
|
76
|
+
"model_description": "csg-wukong-1B is a 1 billion-parameter small language model(SLM) pretrained on 1T tokens.",
|
|
77
|
+
"model_specs": [
|
|
78
|
+
{
|
|
79
|
+
"model_format": "pytorch",
|
|
80
|
+
"model_size_in_billions": 1,
|
|
81
|
+
"quantizations": [
|
|
82
|
+
"none"
|
|
83
|
+
],
|
|
84
|
+
"model_id": "OpenCSG/csg-wukong-1B-chat-v0.1",
|
|
85
|
+
"model_hub": "csghub"
|
|
86
|
+
}
|
|
87
|
+
],
|
|
88
|
+
"prompt_style": {
|
|
89
|
+
"style_name": "NO_COLON_TWO",
|
|
90
|
+
"system_prompt": "<|system|>\nYou are a creative super artificial intelligence assistant, possessing all the knowledge of humankind. Your name is csg-wukong, developed by OpenCSG. You need to understand and infer the true intentions of users based on the topics discussed in the chat history, and respond to user questions correctly as required. You enjoy responding to users with accurate and insightful answers. Please pay attention to the appropriate style and format when replying, try to avoid repetitive words and sentences, and keep your responses as concise and profound as possible. You carefully consider the context of the discussion when replying to users. When the user says \"continue,\" please proceed with the continuation of the previous assistant's response.</s>\n",
|
|
91
|
+
"roles": [
|
|
92
|
+
"<|user|>\n",
|
|
93
|
+
"<|assistant|>\n"
|
|
94
|
+
],
|
|
95
|
+
"intra_message_sep": "</s>\n",
|
|
96
|
+
"inter_message_sep": "</s>\n",
|
|
97
|
+
"stop_token_ids": [
|
|
98
|
+
2
|
|
99
|
+
],
|
|
100
|
+
"stop": [
|
|
101
|
+
"</s>"
|
|
102
|
+
]
|
|
103
|
+
}
|
|
65
104
|
}
|
|
66
105
|
]
|
|
@@ -153,6 +153,178 @@
|
|
|
153
153
|
],
|
|
154
154
|
"model_id": "LLM-Research/Meta-Llama-3-70B-Instruct",
|
|
155
155
|
"model_hub": "modelscope"
|
|
156
|
+
},
|
|
157
|
+
{
|
|
158
|
+
"model_format": "gptq",
|
|
159
|
+
"model_size_in_billions": 8,
|
|
160
|
+
"quantizations": [
|
|
161
|
+
"Int4",
|
|
162
|
+
"Int8"
|
|
163
|
+
],
|
|
164
|
+
"model_id": "swift/Meta-Llama-3-8B-Instruct-GPTQ-{quantization}",
|
|
165
|
+
"model_hub": "modelscope"
|
|
166
|
+
},
|
|
167
|
+
{
|
|
168
|
+
"model_format": "gptq",
|
|
169
|
+
"model_size_in_billions": 70,
|
|
170
|
+
"quantizations": [
|
|
171
|
+
"Int4",
|
|
172
|
+
"Int8"
|
|
173
|
+
],
|
|
174
|
+
"model_id": "swift/Meta-Llama-3-70B-Instruct-GPTQ-{quantization}",
|
|
175
|
+
"model_hub": "modelscope"
|
|
176
|
+
}
|
|
177
|
+
],
|
|
178
|
+
"prompt_style": {
|
|
179
|
+
"style_name": "LLAMA3",
|
|
180
|
+
"system_prompt": "You are a helpful assistant.",
|
|
181
|
+
"roles": [
|
|
182
|
+
"user",
|
|
183
|
+
"assistant"
|
|
184
|
+
],
|
|
185
|
+
"intra_message_sep": "\n\n",
|
|
186
|
+
"inter_message_sep": "<|eot_id|>",
|
|
187
|
+
"stop_token_ids": [
|
|
188
|
+
128001,
|
|
189
|
+
128009
|
|
190
|
+
],
|
|
191
|
+
"stop": [
|
|
192
|
+
"<|end_of_text|>",
|
|
193
|
+
"<|eot_id|>"
|
|
194
|
+
]
|
|
195
|
+
}
|
|
196
|
+
},
|
|
197
|
+
{
|
|
198
|
+
"version": 1,
|
|
199
|
+
"context_length": 131072,
|
|
200
|
+
"model_name": "llama-3.1",
|
|
201
|
+
"model_lang": [
|
|
202
|
+
"en",
|
|
203
|
+
"de",
|
|
204
|
+
"fr",
|
|
205
|
+
"it",
|
|
206
|
+
"pt",
|
|
207
|
+
"hi",
|
|
208
|
+
"es",
|
|
209
|
+
"th"
|
|
210
|
+
],
|
|
211
|
+
"model_ability": [
|
|
212
|
+
"generate"
|
|
213
|
+
],
|
|
214
|
+
"model_description": "Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture",
|
|
215
|
+
"model_specs": [
|
|
216
|
+
{
|
|
217
|
+
"model_format": "pytorch",
|
|
218
|
+
"model_size_in_billions": 8,
|
|
219
|
+
"quantizations": [
|
|
220
|
+
"4-bit",
|
|
221
|
+
"8-bit",
|
|
222
|
+
"none"
|
|
223
|
+
],
|
|
224
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-8B",
|
|
225
|
+
"model_hub": "modelscope"
|
|
226
|
+
},
|
|
227
|
+
{
|
|
228
|
+
"model_format": "pytorch",
|
|
229
|
+
"model_size_in_billions": 70,
|
|
230
|
+
"quantizations": [
|
|
231
|
+
"4-bit",
|
|
232
|
+
"8-bit",
|
|
233
|
+
"none"
|
|
234
|
+
],
|
|
235
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-70B",
|
|
236
|
+
"model_hub": "modelscope"
|
|
237
|
+
}
|
|
238
|
+
]
|
|
239
|
+
},
|
|
240
|
+
{
|
|
241
|
+
"version": 1,
|
|
242
|
+
"context_length": 131072,
|
|
243
|
+
"model_name": "llama-3.1-instruct",
|
|
244
|
+
"model_lang": [
|
|
245
|
+
"en",
|
|
246
|
+
"de",
|
|
247
|
+
"fr",
|
|
248
|
+
"it",
|
|
249
|
+
"pt",
|
|
250
|
+
"hi",
|
|
251
|
+
"es",
|
|
252
|
+
"th"
|
|
253
|
+
],
|
|
254
|
+
"model_ability": [
|
|
255
|
+
"chat"
|
|
256
|
+
],
|
|
257
|
+
"model_description": "The Llama 3.1 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
|
|
258
|
+
"model_specs": [
|
|
259
|
+
{
|
|
260
|
+
"model_format": "pytorch",
|
|
261
|
+
"model_size_in_billions": 8,
|
|
262
|
+
"quantizations": [
|
|
263
|
+
"none"
|
|
264
|
+
],
|
|
265
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct",
|
|
266
|
+
"model_hub": "modelscope"
|
|
267
|
+
},
|
|
268
|
+
{
|
|
269
|
+
"model_format": "gptq",
|
|
270
|
+
"model_size_in_billions": 8,
|
|
271
|
+
"quantizations": [
|
|
272
|
+
"Int4"
|
|
273
|
+
],
|
|
274
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4",
|
|
275
|
+
"model_hub": "modelscope"
|
|
276
|
+
},
|
|
277
|
+
{
|
|
278
|
+
"model_format": "awq",
|
|
279
|
+
"model_size_in_billions": 8,
|
|
280
|
+
"quantizations": [
|
|
281
|
+
"Int4"
|
|
282
|
+
],
|
|
283
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
284
|
+
"model_hub": "modelscope"
|
|
285
|
+
},
|
|
286
|
+
{
|
|
287
|
+
"model_format": "ggufv2",
|
|
288
|
+
"model_size_in_billions": 8,
|
|
289
|
+
"quantizations": [
|
|
290
|
+
"Q3_K_L",
|
|
291
|
+
"Q4_K_M",
|
|
292
|
+
"Q5_K_M",
|
|
293
|
+
"Q6_K",
|
|
294
|
+
"Q8_0"
|
|
295
|
+
],
|
|
296
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-8B-Instruct-GGUF",
|
|
297
|
+
"model_file_name_template": "Meta-Llama-3.1-8B-Instruct-{quantization}.gguf",
|
|
298
|
+
"model_hub": "modelscope"
|
|
299
|
+
},
|
|
300
|
+
{
|
|
301
|
+
"model_format": "pytorch",
|
|
302
|
+
"model_size_in_billions": 70,
|
|
303
|
+
"quantizations": [
|
|
304
|
+
"4-bit",
|
|
305
|
+
"8-bit",
|
|
306
|
+
"none"
|
|
307
|
+
],
|
|
308
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct",
|
|
309
|
+
"model_hub": "modelscope"
|
|
310
|
+
},
|
|
311
|
+
{
|
|
312
|
+
"model_format": "gptq",
|
|
313
|
+
"model_size_in_billions": 70,
|
|
314
|
+
"quantizations": [
|
|
315
|
+
"Int4"
|
|
316
|
+
],
|
|
317
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct-GPTQ-INT4",
|
|
318
|
+
"model_hub": "modelscope"
|
|
319
|
+
},
|
|
320
|
+
{
|
|
321
|
+
"model_format": "awq",
|
|
322
|
+
"model_size_in_billions": 70,
|
|
323
|
+
"quantizations": [
|
|
324
|
+
"Int4"
|
|
325
|
+
],
|
|
326
|
+
"model_id": "LLM-Research/Meta-Llama-3.1-70B-Instruct-AWQ-INT4",
|
|
327
|
+
"model_hub": "modelscope"
|
|
156
328
|
}
|
|
157
329
|
],
|
|
158
330
|
"prompt_style": {
|
|
@@ -2078,6 +2250,119 @@
|
|
|
2078
2250
|
]
|
|
2079
2251
|
}
|
|
2080
2252
|
},
|
|
2253
|
+
{
|
|
2254
|
+
"version": 1,
|
|
2255
|
+
"context_length": 1024000,
|
|
2256
|
+
"model_name": "mistral-nemo-instruct",
|
|
2257
|
+
"model_lang": [
|
|
2258
|
+
"en",
|
|
2259
|
+
"fr",
|
|
2260
|
+
"de",
|
|
2261
|
+
"es",
|
|
2262
|
+
"it",
|
|
2263
|
+
"pt",
|
|
2264
|
+
"zh",
|
|
2265
|
+
"ru",
|
|
2266
|
+
"ja"
|
|
2267
|
+
],
|
|
2268
|
+
"model_ability": [
|
|
2269
|
+
"chat"
|
|
2270
|
+
],
|
|
2271
|
+
"model_description": "The Mistral-Nemo-Instruct-2407 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-Nemo-Base-2407",
|
|
2272
|
+
"model_specs": [
|
|
2273
|
+
{
|
|
2274
|
+
"model_format": "pytorch",
|
|
2275
|
+
"model_size_in_billions": 12,
|
|
2276
|
+
"quantizations": [
|
|
2277
|
+
"none"
|
|
2278
|
+
],
|
|
2279
|
+
"model_id": "AI-ModelScope/Mistral-Nemo-Instruct-2407",
|
|
2280
|
+
"model_hub": "modelscope"
|
|
2281
|
+
},
|
|
2282
|
+
{
|
|
2283
|
+
"model_format": "gptq",
|
|
2284
|
+
"model_size_in_billions": 12,
|
|
2285
|
+
"quantizations": [
|
|
2286
|
+
"Int4"
|
|
2287
|
+
],
|
|
2288
|
+
"model_id": "LLM-Research/Mistral-Nemo-Instruct-2407-gptq-4bit",
|
|
2289
|
+
"model_hub": "modelscope"
|
|
2290
|
+
}
|
|
2291
|
+
],
|
|
2292
|
+
"prompt_style": {
|
|
2293
|
+
"style_name": "mistral-nemo",
|
|
2294
|
+
"system_prompt": "",
|
|
2295
|
+
"roles": [
|
|
2296
|
+
"[INST]",
|
|
2297
|
+
"[/INST]"
|
|
2298
|
+
],
|
|
2299
|
+
"intra_message_sep": "",
|
|
2300
|
+
"inter_message_sep": "</s>",
|
|
2301
|
+
"stop_token_ids": [
|
|
2302
|
+
2
|
|
2303
|
+
],
|
|
2304
|
+
"stop": [
|
|
2305
|
+
"</s>"
|
|
2306
|
+
]
|
|
2307
|
+
}
|
|
2308
|
+
},
|
|
2309
|
+
{
|
|
2310
|
+
"version": 1,
|
|
2311
|
+
"context_length": 131072,
|
|
2312
|
+
"model_name": "mistral-large-instruct",
|
|
2313
|
+
"model_lang": [
|
|
2314
|
+
"en",
|
|
2315
|
+
"fr",
|
|
2316
|
+
"de",
|
|
2317
|
+
"es",
|
|
2318
|
+
"it",
|
|
2319
|
+
"pt",
|
|
2320
|
+
"zh",
|
|
2321
|
+
"ru",
|
|
2322
|
+
"ja",
|
|
2323
|
+
"ko"
|
|
2324
|
+
],
|
|
2325
|
+
"model_ability": [
|
|
2326
|
+
"chat"
|
|
2327
|
+
],
|
|
2328
|
+
"model_description": "Mistral-Large-Instruct-2407 is an advanced dense Large Language Model (LLM) of 123B parameters with state-of-the-art reasoning, knowledge and coding capabilities.",
|
|
2329
|
+
"model_specs": [
|
|
2330
|
+
{
|
|
2331
|
+
"model_format": "pytorch",
|
|
2332
|
+
"model_size_in_billions": 123,
|
|
2333
|
+
"quantizations": [
|
|
2334
|
+
"none"
|
|
2335
|
+
],
|
|
2336
|
+
"model_id": "LLM-Research/Mistral-Large-Instruct-2407",
|
|
2337
|
+
"model_hub": "modelscope"
|
|
2338
|
+
},
|
|
2339
|
+
{
|
|
2340
|
+
"model_format": "pytorch",
|
|
2341
|
+
"model_size_in_billions": 123,
|
|
2342
|
+
"quantizations": [
|
|
2343
|
+
"4-bit"
|
|
2344
|
+
],
|
|
2345
|
+
"model_id": "LLM-Research/Mistral-Large-Instruct-2407-bnb-4bit",
|
|
2346
|
+
"model_hub": "modelscope"
|
|
2347
|
+
}
|
|
2348
|
+
],
|
|
2349
|
+
"prompt_style": {
|
|
2350
|
+
"style_name": "mistral-nemo",
|
|
2351
|
+
"system_prompt": "",
|
|
2352
|
+
"roles": [
|
|
2353
|
+
"[INST]",
|
|
2354
|
+
"[/INST]"
|
|
2355
|
+
],
|
|
2356
|
+
"intra_message_sep": "",
|
|
2357
|
+
"inter_message_sep": "</s>",
|
|
2358
|
+
"stop_token_ids": [
|
|
2359
|
+
2
|
|
2360
|
+
],
|
|
2361
|
+
"stop": [
|
|
2362
|
+
"</s>"
|
|
2363
|
+
]
|
|
2364
|
+
}
|
|
2365
|
+
},
|
|
2081
2366
|
{
|
|
2082
2367
|
"version": 1,
|
|
2083
2368
|
"context_length": 2048,
|
|
@@ -4433,6 +4718,16 @@
|
|
|
4433
4718
|
"model_id": "AI-ModelScope/c4ai-command-r-v01",
|
|
4434
4719
|
"model_revision": "master"
|
|
4435
4720
|
},
|
|
4721
|
+
{
|
|
4722
|
+
"model_format": "pytorch",
|
|
4723
|
+
"model_size_in_billions": 35,
|
|
4724
|
+
"quantizations": [
|
|
4725
|
+
"4-bit"
|
|
4726
|
+
],
|
|
4727
|
+
"model_hub": "modelscope",
|
|
4728
|
+
"model_id": "mirror013/c4ai-command-r-v01-4bit",
|
|
4729
|
+
"model_revision": "master"
|
|
4730
|
+
},
|
|
4436
4731
|
{
|
|
4437
4732
|
"model_format": "ggufv2",
|
|
4438
4733
|
"model_size_in_billions": 35,
|
|
@@ -4473,53 +4768,6 @@
|
|
|
4473
4768
|
]
|
|
4474
4769
|
}
|
|
4475
4770
|
},
|
|
4476
|
-
{
|
|
4477
|
-
"version": 1,
|
|
4478
|
-
"context_length": 131072,
|
|
4479
|
-
"model_name": "c4ai-command-r-v01-4bit",
|
|
4480
|
-
"model_lang": [
|
|
4481
|
-
"en",
|
|
4482
|
-
"fr",
|
|
4483
|
-
"de",
|
|
4484
|
-
"es",
|
|
4485
|
-
"it",
|
|
4486
|
-
"pt",
|
|
4487
|
-
"ja",
|
|
4488
|
-
"ko",
|
|
4489
|
-
"zh",
|
|
4490
|
-
"ar"
|
|
4491
|
-
],
|
|
4492
|
-
"model_ability": [
|
|
4493
|
-
"chat"
|
|
4494
|
-
],
|
|
4495
|
-
"model_description": "This model is 4bit quantized version of C4AI Command-R using bitsandbytes.",
|
|
4496
|
-
"model_specs": [
|
|
4497
|
-
{
|
|
4498
|
-
"model_format": "pytorch",
|
|
4499
|
-
"model_size_in_billions": 35,
|
|
4500
|
-
"quantizations": [
|
|
4501
|
-
"none"
|
|
4502
|
-
],
|
|
4503
|
-
"model_hub": "modelscope",
|
|
4504
|
-
"model_id": "mirror013/c4ai-command-r-v01-4bit",
|
|
4505
|
-
"model_revision": "master"
|
|
4506
|
-
}
|
|
4507
|
-
],
|
|
4508
|
-
"prompt_style": {
|
|
4509
|
-
"style_name": "c4ai-command-r",
|
|
4510
|
-
"system_prompt": "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.",
|
|
4511
|
-
"roles": [
|
|
4512
|
-
"<|USER_TOKEN|>",
|
|
4513
|
-
"<|CHATBOT_TOKEN|>"
|
|
4514
|
-
],
|
|
4515
|
-
"intra_message_sep": "",
|
|
4516
|
-
"inter_message_sep": "<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|>",
|
|
4517
|
-
"stop_token_ids": [
|
|
4518
|
-
6,
|
|
4519
|
-
255001
|
|
4520
|
-
]
|
|
4521
|
-
}
|
|
4522
|
-
},
|
|
4523
4771
|
{
|
|
4524
4772
|
"version": 1,
|
|
4525
4773
|
"context_length": 128000,
|