xinference 1.9.0__py3-none-any.whl → 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (74) hide show
  1. xinference/_version.py +3 -3
  2. xinference/core/model.py +3 -4
  3. xinference/core/worker.py +4 -1
  4. xinference/deploy/cmdline.py +2 -0
  5. xinference/deploy/test/test_cmdline.py +1 -1
  6. xinference/model/audio/cosyvoice.py +0 -1
  7. xinference/model/audio/model_spec.json +44 -20
  8. xinference/model/embedding/flag/core.py +5 -0
  9. xinference/model/embedding/llama_cpp/core.py +22 -19
  10. xinference/model/embedding/sentence_transformers/core.py +15 -0
  11. xinference/model/embedding/vllm/core.py +33 -7
  12. xinference/model/image/cache_manager.py +56 -0
  13. xinference/model/image/core.py +9 -0
  14. xinference/model/image/model_spec.json +114 -6
  15. xinference/model/image/stable_diffusion/core.py +141 -31
  16. xinference/model/llm/llama_cpp/core.py +41 -40
  17. xinference/model/llm/llm_family.json +395 -3
  18. xinference/model/llm/transformers/core.py +5 -11
  19. xinference/model/llm/utils.py +1 -1
  20. xinference/model/llm/vllm/core.py +6 -0
  21. xinference/model/rerank/core.py +3 -0
  22. xinference/model/rerank/sentence_transformers/core.py +1 -1
  23. xinference/model/rerank/vllm/core.py +56 -6
  24. xinference/model/utils.py +1 -2
  25. xinference/model/video/model_spec.json +95 -1
  26. xinference/thirdparty/cosyvoice/bin/export_jit.py +3 -4
  27. xinference/thirdparty/cosyvoice/bin/export_onnx.py +49 -126
  28. xinference/thirdparty/cosyvoice/bin/{inference.py → inference_deprecated.py} +1 -0
  29. xinference/thirdparty/cosyvoice/bin/train.py +23 -3
  30. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +8 -4
  31. xinference/thirdparty/cosyvoice/cli/frontend.py +4 -4
  32. xinference/thirdparty/cosyvoice/cli/model.py +53 -75
  33. xinference/thirdparty/cosyvoice/dataset/dataset.py +5 -18
  34. xinference/thirdparty/cosyvoice/dataset/processor.py +24 -25
  35. xinference/thirdparty/cosyvoice/flow/decoder.py +24 -433
  36. xinference/thirdparty/cosyvoice/flow/flow.py +6 -14
  37. xinference/thirdparty/cosyvoice/flow/flow_matching.py +33 -145
  38. xinference/thirdparty/cosyvoice/hifigan/generator.py +169 -1
  39. xinference/thirdparty/cosyvoice/llm/llm.py +108 -17
  40. xinference/thirdparty/cosyvoice/transformer/upsample_encoder.py +14 -115
  41. xinference/thirdparty/cosyvoice/utils/common.py +20 -0
  42. xinference/thirdparty/cosyvoice/utils/executor.py +8 -4
  43. xinference/thirdparty/cosyvoice/utils/file_utils.py +45 -1
  44. xinference/thirdparty/cosyvoice/utils/losses.py +37 -0
  45. xinference/thirdparty/cosyvoice/utils/mask.py +35 -1
  46. xinference/thirdparty/cosyvoice/utils/train_utils.py +24 -6
  47. xinference/thirdparty/cosyvoice/vllm/cosyvoice2.py +103 -0
  48. xinference/ui/gradio/chat_interface.py +2 -0
  49. xinference/ui/gradio/media_interface.py +353 -7
  50. xinference/ui/web/ui/build/asset-manifest.json +3 -3
  51. xinference/ui/web/ui/build/index.html +1 -1
  52. xinference/ui/web/ui/build/static/js/main.1086c759.js +3 -0
  53. xinference/ui/web/ui/build/static/js/main.1086c759.js.map +1 -0
  54. xinference/ui/web/ui/node_modules/.cache/babel-loader/3c5758bd12fa334294b1de0ff6b1a4bac8d963c45472eab9dc3e530d82aa6b3f.json +1 -0
  55. xinference/ui/web/ui/node_modules/.cache/babel-loader/a3eb18af328280b139693c9092dff2a0ef8c9a967e6c8956ceee0996611f1984.json +1 -0
  56. xinference/ui/web/ui/node_modules/.cache/babel-loader/d5c224be7081f18cba1678b7874a9782eba895df004874ff8f243f94ba79942a.json +1 -0
  57. xinference/ui/web/ui/node_modules/.cache/babel-loader/f7f18bfb539b036a6a342176dd98a85df5057a884a8da978d679f2a0264883d0.json +1 -0
  58. xinference/ui/web/ui/src/locales/en.json +2 -0
  59. xinference/ui/web/ui/src/locales/ja.json +2 -0
  60. xinference/ui/web/ui/src/locales/ko.json +2 -0
  61. xinference/ui/web/ui/src/locales/zh.json +2 -0
  62. {xinference-1.9.0.dist-info → xinference-1.9.1.dist-info}/METADATA +10 -10
  63. {xinference-1.9.0.dist-info → xinference-1.9.1.dist-info}/RECORD +68 -67
  64. xinference/ui/web/ui/build/static/js/main.4918643a.js +0 -3
  65. xinference/ui/web/ui/build/static/js/main.4918643a.js.map +0 -1
  66. xinference/ui/web/ui/node_modules/.cache/babel-loader/3d2a89f0eccc1f90fc5036c9a1d587c2120e6a6b128aae31d1db7d6bad52722b.json +0 -1
  67. xinference/ui/web/ui/node_modules/.cache/babel-loader/89179f8f51887b9167721860a12412549ff04f78162e921a7b6aa6532646deb2.json +0 -1
  68. xinference/ui/web/ui/node_modules/.cache/babel-loader/8e5cb82c2ff3299c6a44563fe6b1c5515c9750613c51bb63abee0b1d70fc5019.json +0 -1
  69. xinference/ui/web/ui/node_modules/.cache/babel-loader/9dc5cfc67dd0617b0272aeef8651f1589b2155a4ff1fd72ad3166b217089b619.json +0 -1
  70. /xinference/ui/web/ui/build/static/js/{main.4918643a.js.LICENSE.txt → main.1086c759.js.LICENSE.txt} +0 -0
  71. {xinference-1.9.0.dist-info → xinference-1.9.1.dist-info}/WHEEL +0 -0
  72. {xinference-1.9.0.dist-info → xinference-1.9.1.dist-info}/entry_points.txt +0 -0
  73. {xinference-1.9.0.dist-info → xinference-1.9.1.dist-info}/licenses/LICENSE +0 -0
  74. {xinference-1.9.0.dist-info → xinference-1.9.1.dist-info}/top_level.txt +0 -0
@@ -4767,6 +4767,7 @@
4767
4767
  {
4768
4768
  "model_format": "pytorch",
4769
4769
  "model_size_in_billions": 671,
4770
+ "activated_size_in_billions": 37,
4770
4771
  "model_src": {
4771
4772
  "huggingface": {
4772
4773
  "quantizations": [
@@ -4846,6 +4847,7 @@
4846
4847
  {
4847
4848
  "model_format": "pytorch",
4848
4849
  "model_size_in_billions": 671,
4850
+ "activated_size_in_billions": 37,
4849
4851
  "model_src": {
4850
4852
  "huggingface": {
4851
4853
  "quantizations": [
@@ -4866,6 +4868,7 @@
4866
4868
  {
4867
4869
  "model_format": "awq",
4868
4870
  "model_size_in_billions": 671,
4871
+ "activated_size_in_billions": 37,
4869
4872
  "model_src": {
4870
4873
  "huggingface": {
4871
4874
  "quantizations": [
@@ -4885,6 +4888,7 @@
4885
4888
  {
4886
4889
  "model_format": "ggufv2",
4887
4890
  "model_size_in_billions": 671,
4891
+ "activated_size_in_billions": 37,
4888
4892
  "model_src": {
4889
4893
  "huggingface": {
4890
4894
  "quantizations": [
@@ -5215,6 +5219,7 @@
5215
5219
  {
5216
5220
  "model_format": "mlx",
5217
5221
  "model_size_in_billions": 671,
5222
+ "activated_size_in_billions": 37,
5218
5223
  "model_src": {
5219
5224
  "huggingface": {
5220
5225
  "quantizations": [
@@ -5263,6 +5268,7 @@
5263
5268
  {
5264
5269
  "model_format": "pytorch",
5265
5270
  "model_size_in_billions": 671,
5271
+ "activated_size_in_billions": 37,
5266
5272
  "model_src": {
5267
5273
  "huggingface": {
5268
5274
  "quantizations": [
@@ -5281,6 +5287,7 @@
5281
5287
  {
5282
5288
  "model_format": "gptq",
5283
5289
  "model_size_in_billions": 671,
5290
+ "activated_size_in_billions": 37,
5284
5291
  "model_src": {
5285
5292
  "huggingface": {
5286
5293
  "quantizations": [
@@ -5311,6 +5318,116 @@
5311
5318
  "reasoning_start_tag": "<think>",
5312
5319
  "reasoning_end_tag": "</think>"
5313
5320
  },
5321
+ {
5322
+ "version": 2,
5323
+ "context_length": 131072,
5324
+ "model_name": "Deepseek-V3.1",
5325
+ "model_lang": [
5326
+ "en",
5327
+ "zh"
5328
+ ],
5329
+ "model_ability": [
5330
+ "chat",
5331
+ "reasoning",
5332
+ "hybrid",
5333
+ "tools"
5334
+ ],
5335
+ "model_description": "DeepSeek-V3.1 is a hybrid model that supports both thinking mode and non-thinking mode.",
5336
+ "model_specs": [
5337
+ {
5338
+ "model_format": "pytorch",
5339
+ "model_size_in_billions": 671,
5340
+ "activated_size_in_billions": 37,
5341
+ "model_src": {
5342
+ "huggingface": {
5343
+ "quantizations": [
5344
+ "none"
5345
+ ],
5346
+ "model_id": "deepseek-ai/DeepSeek-V3.1"
5347
+ },
5348
+ "modelscope": {
5349
+ "quantizations": [
5350
+ "none"
5351
+ ],
5352
+ "model_id": "deepseek-ai/DeepSeek-V3.1"
5353
+ }
5354
+ }
5355
+ },
5356
+ {
5357
+ "model_format": "gptq",
5358
+ "model_size_in_billions": 671,
5359
+ "activated_size_in_billions": 37,
5360
+ "model_src": {
5361
+ "huggingface": {
5362
+ "quantizations": [
5363
+ "Int4"
5364
+ ],
5365
+ "model_id": "cpatonn/DeepSeek-V3.1-GPTQ-4bit"
5366
+ },
5367
+ "modelscope": {
5368
+ "quantizations": [
5369
+ "Int4"
5370
+ ],
5371
+ "model_id": "cpatonn/DeepSeek-V3.1-GPTQ-4bit"
5372
+ }
5373
+ }
5374
+ },
5375
+ {
5376
+ "model_format": "awq",
5377
+ "model_size_in_billions": 671,
5378
+ "activated_size_in_billions": 37,
5379
+ "model_src": {
5380
+ "huggingface": {
5381
+ "quantizations": [
5382
+ "Int4"
5383
+ ],
5384
+ "model_id": "QuantTrio/DeepSeek-V3.1-AWQ"
5385
+ },
5386
+ "modelscope": {
5387
+ "quantizations": [
5388
+ "Int4"
5389
+ ],
5390
+ "model_id": "tclf90/DeepSeek-V3.1-AWQ"
5391
+ }
5392
+ }
5393
+ },
5394
+ {
5395
+ "model_format": "mlx",
5396
+ "model_size_in_billions": 671,
5397
+ "activated_size_in_billions": 37,
5398
+ "model_src": {
5399
+ "huggingface": {
5400
+ "quantizations": [
5401
+ "8bit",
5402
+ "4bit"
5403
+ ],
5404
+ "model_id": "mlx-community/DeepSeek-V3.1-{quantization}"
5405
+ },
5406
+ "modelscope": {
5407
+ "quantizations": [
5408
+ "8bit",
5409
+ "4bit"
5410
+ ],
5411
+ "model_id": "mlx-community/DeepSeek-V3.1-{quantization}"
5412
+ }
5413
+ }
5414
+ }
5415
+ ],
5416
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if not thinking is defined %}{% set thinking = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, system_prompt='', is_first_sp=true, is_last_user=false) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{ bos_token }}{{ ns.system_prompt }}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{%- set ns.is_first = false -%}{%- set ns.is_last_user = true -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}{%- if ns.is_last_user %}{{'<|Assistant|></think>'}}{%- endif %}{%- set ns.is_last_user = false -%}{%- set ns.is_first = false %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls'] %}{%- if not ns.is_first %}{%- if message['content'] is none %}{{'<|tool▁calls▁begin|><|tool▁call▁begin|>'+ tool['function']['name'] + '<|tool▁sep|>' + tool['function']['arguments'] + '<|tool▁call▁end|>'}}{%- else %}{{message['content'] + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['function']['name'] + '<|tool▁sep|>' + tool['function']['arguments'] + '<|tool▁call▁end|>'}}{%- endif %}{%- set ns.is_first = true -%}{%- else %}{{'<|tool▁call▁begin|>'+ tool['function']['name'] + '<|tool▁sep|>' + tool['function']['arguments'] + '<|tool▁call▁end|>'}}{%- endif %}{%- endfor %}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none) %}{%- if ns.is_last_user %}{{'<|Assistant|>'}}{%- if message['prefix'] is defined and message['prefix'] and thinking %}{{'<think>'}} {%- else %}{{'</think>'}}{%- endif %}{%- endif %}{%- set ns.is_last_user = false -%}{%- if ns.is_tool %}{{message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{%- set content = message['content'] -%}{%- if '</think>' in content %}{%- set content = content.split('</think>', 1)[1] -%}{%- endif %}{{content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_last_user = false -%}{%- set ns.is_tool = true -%}{{'<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endfor -%}{%- if add_generation_prompt and ns.is_last_user and not ns.is_tool %}{{'<|Assistant|>'}}{%- if not thinking %}{{'</think>'}}{%- else %}{{'<think>'}}{%- endif %}{% endif %}",
5417
+ "stop_token_ids": [
5418
+ 1
5419
+ ],
5420
+ "stop": [
5421
+ "<|end▁of▁sentence|>"
5422
+ ],
5423
+ "reasoning_start_tag": "<think>",
5424
+ "reasoning_end_tag": "</think>",
5425
+ "virtualenv": {
5426
+ "packages": [
5427
+ "transformers==4.53.0"
5428
+ ]
5429
+ }
5430
+ },
5314
5431
  {
5315
5432
  "version": 2,
5316
5433
  "context_length": 131072,
@@ -6242,6 +6359,7 @@
6242
6359
  {
6243
6360
  "model_format": "pytorch",
6244
6361
  "model_size_in_billions": 671,
6362
+ "activated_size_in_billions": 37,
6245
6363
  "model_src": {
6246
6364
  "huggingface": {
6247
6365
  "quantizations": [
@@ -6262,6 +6380,7 @@
6262
6380
  {
6263
6381
  "model_format": "awq",
6264
6382
  "model_size_in_billions": 671,
6383
+ "activated_size_in_billions": 37,
6265
6384
  "model_src": {
6266
6385
  "huggingface": {
6267
6386
  "quantizations": [
@@ -6281,6 +6400,7 @@
6281
6400
  {
6282
6401
  "model_format": "ggufv2",
6283
6402
  "model_size_in_billions": 671,
6403
+ "activated_size_in_billions": 37,
6284
6404
  "model_src": {
6285
6405
  "huggingface": {
6286
6406
  "quantizations": [
@@ -6475,6 +6595,7 @@
6475
6595
  {
6476
6596
  "model_format": "mlx",
6477
6597
  "model_size_in_billions": 671,
6598
+ "activated_size_in_billions": 37,
6478
6599
  "model_src": {
6479
6600
  "huggingface": {
6480
6601
  "quantizations": [
@@ -6517,6 +6638,7 @@
6517
6638
  {
6518
6639
  "model_format": "pytorch",
6519
6640
  "model_size_in_billions": 671,
6641
+ "activated_size_in_billions": 37,
6520
6642
  "model_src": {
6521
6643
  "huggingface": {
6522
6644
  "quantizations": [
@@ -6535,6 +6657,7 @@
6535
6657
  {
6536
6658
  "model_format": "awq",
6537
6659
  "model_size_in_billions": 671,
6660
+ "activated_size_in_billions": 37,
6538
6661
  "model_src": {
6539
6662
  "huggingface": {
6540
6663
  "quantizations": [
@@ -6553,6 +6676,7 @@
6553
6676
  {
6554
6677
  "model_format": "mlx",
6555
6678
  "model_size_in_billions": 671,
6679
+ "activated_size_in_billions": 37,
6556
6680
  "model_src": {
6557
6681
  "huggingface": {
6558
6682
  "quantizations": [
@@ -7687,7 +7811,7 @@
7687
7811
  "packages": [
7688
7812
  "transformers>=4.51.3",
7689
7813
  "mlx-lm>=0.23.1 ; sys_platform=='darwin'",
7690
- "numpy==1.26.4"
7814
+ "#system_numpy#"
7691
7815
  ]
7692
7816
  }
7693
7817
  },
@@ -15521,7 +15645,7 @@
15521
15645
  "virtualenv": {
15522
15646
  "packages": [
15523
15647
  "git+https://github.com/huggingface/transformers@v4.51.3-Qwen2.5-Omni-preview",
15524
- "numpy==1.26.4",
15648
+ "#system_numpy#",
15525
15649
  "qwen_omni_utils",
15526
15650
  "soundfile"
15527
15651
  ]
@@ -17302,7 +17426,7 @@
17302
17426
  "packages": [
17303
17427
  "transformers>=4.51.0",
17304
17428
  "mlx-lm>=0.24.0 ; sys_platform=='darwin'",
17305
- "numpy==1.26.4"
17429
+ "#system_numpy#"
17306
17430
  ]
17307
17431
  }
17308
17432
  },
@@ -21137,5 +21261,273 @@
21137
21261
  "#system_numpy#"
21138
21262
  ]
21139
21263
  }
21264
+ },
21265
+ {
21266
+ "version": 2,
21267
+ "context_length": 131072,
21268
+ "model_name": "KAT-V1",
21269
+ "model_lang": [
21270
+ "en",
21271
+ "zh"
21272
+ ],
21273
+ "model_ability": [
21274
+ "chat"
21275
+ ],
21276
+ "model_description": "Kwaipilot-AutoThink ranks first among all open-source models on LiveCodeBench Pro, a challenging benchmark explicitly designed to prevent data leakage, and even surpasses strong proprietary systems such as Seed and o3-mini.",
21277
+ "model_specs": [
21278
+ {
21279
+ "model_format": "pytorch",
21280
+ "model_size_in_billions": 40,
21281
+ "model_src": {
21282
+ "huggingface": {
21283
+ "quantizations": [
21284
+ "none"
21285
+ ],
21286
+ "model_id": "Kwaipilot/KAT-V1-40B"
21287
+ },
21288
+ "modelscope": {
21289
+ "quantizations": [
21290
+ "none"
21291
+ ],
21292
+ "model_id": "Kwaipilot/KAT-V1-40B"
21293
+ }
21294
+ }
21295
+ },
21296
+ {
21297
+ "model_format": "gptq",
21298
+ "model_size_in_billions": 40,
21299
+ "model_src": {
21300
+ "huggingface": {
21301
+ "quantizations": [
21302
+ "Int4-Int8Mix"
21303
+ ],
21304
+ "model_id": "QuantTrio/KAT-V1-40B-GPTQ-Int4-Int8Mix"
21305
+ },
21306
+ "modelscope": {
21307
+ "quantizations": [
21308
+ "Int4-Int8Mix"
21309
+ ],
21310
+ "model_id": "tclf90/KAT-V1-40B-GPTQ-Int4-Int8Mix"
21311
+ }
21312
+ }
21313
+ },
21314
+ {
21315
+ "model_format": "awq",
21316
+ "model_size_in_billions": 40,
21317
+ "model_src": {
21318
+ "huggingface": {
21319
+ "quantizations": [
21320
+ "Int4"
21321
+ ],
21322
+ "model_id": "QuantTrio/KAT-V1-40B-AWQ"
21323
+ },
21324
+ "modelscope": {
21325
+ "quantizations": [
21326
+ "Int4"
21327
+ ],
21328
+ "model_id": "tclf90/KAT-V1-40B-AWQ"
21329
+ }
21330
+ }
21331
+ }
21332
+ ],
21333
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- '' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" and not message.tool_calls %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set answer_blocks = message.content.split('<answer>\\n') %}\n {%- if answer_blocks|length > 1 %}\n {%- set last_answer_block = answer_blocks[-1] %}\n {%- if '\\n</answer>' in last_answer_block %}\n {%- set content = last_answer_block.split('\\n</answer>')[0] %}\n {%- else %}\n {%- set content = message.content.split('<think_off>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- else %}\n {%- set content = message.content.split('<think_off>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set answer_blocks = message.content.split('<answer>\\n') %}\n {%- if answer_blocks|length > 1 %}\n {%- set last_answer_block = answer_blocks[-1] %}\n {%- if '\\n</answer>' in last_answer_block %}\n {%- set content = last_answer_block.split('\\n</answer>')[0] %}\n {%- else %}\n {%- set content = message.content.split('<think_off>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- else %}\n {%- set content = message.content.split('<think_off>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\\\"name\\\": \\\"' }}\n {{- tool_call.name }}\n {{- '\\\", \\\"arguments\\\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<judge>\\n' }}\n{%- endif %}",
21334
+ "stop_token_ids": [
21335
+ 151643,
21336
+ 151645
21337
+ ],
21338
+ "stop": [
21339
+ "<|endoftext|>",
21340
+ "<|im_end|>"
21341
+ ]
21342
+ },
21343
+ {
21344
+ "version": 2,
21345
+ "context_length": 524288,
21346
+ "model_name": "seed-oss",
21347
+ "model_lang": [
21348
+ "en",
21349
+ "zh"
21350
+ ],
21351
+ "model_ability": [
21352
+ "chat",
21353
+ "reasoning",
21354
+ "tools"
21355
+ ],
21356
+ "model_description": "Seed-OSS is a series of open-source large language models developed by ByteDance's Seed Team, designed for powerful long-context, reasoning, agent and general capabilities, and versatile developer-friendly features. Although trained with only 12T tokens, Seed-OSS achieves excellent performance on several popular open benchmarks.",
21357
+ "model_specs": [
21358
+ {
21359
+ "model_format": "pytorch",
21360
+ "model_size_in_billions": 36,
21361
+ "model_src": {
21362
+ "huggingface": {
21363
+ "quantizations": [
21364
+ "none"
21365
+ ],
21366
+ "model_id": "ByteDance-Seed/Seed-OSS-36B-Instruct"
21367
+ },
21368
+ "modelscope": {
21369
+ "quantizations": [
21370
+ "none"
21371
+ ],
21372
+ "model_id": "ByteDance-Seed/Seed-OSS-36B-Instruct"
21373
+ }
21374
+ }
21375
+ },
21376
+ {
21377
+ "model_format": "gptq",
21378
+ "model_size_in_billions": 36,
21379
+ "model_src": {
21380
+ "huggingface": {
21381
+ "quantizations": [
21382
+ "Int8",
21383
+ "Int4",
21384
+ "Int3"
21385
+ ],
21386
+ "model_id": "QuantTrio/Seed-OSS-36B-Instruct-GPTQ-{quantization}"
21387
+ },
21388
+ "modelscope": {
21389
+ "quantizations": [
21390
+ "Int8",
21391
+ "Int4",
21392
+ "Int3"
21393
+ ],
21394
+ "model_id": "tclf90/Seed-OSS-36B-Instruct-GPTQ-{quantization}"
21395
+ }
21396
+ }
21397
+ },
21398
+ {
21399
+ "model_format": "awq",
21400
+ "model_size_in_billions": 36,
21401
+ "model_src": {
21402
+ "huggingface": {
21403
+ "quantizations": [
21404
+ "Int4"
21405
+ ],
21406
+ "model_id": "QuantTrio/Seed-OSS-36B-Instruct-AWQ"
21407
+ },
21408
+ "modelscope": {
21409
+ "quantizations": [
21410
+ "Int4"
21411
+ ],
21412
+ "model_id": "tclf90/Seed-OSS-36B-Instruct-AWQ"
21413
+ }
21414
+ }
21415
+ },
21416
+ {
21417
+ "model_format": "mlx",
21418
+ "model_size_in_billions": 36,
21419
+ "model_src": {
21420
+ "huggingface": {
21421
+ "quantizations": [
21422
+ "4bit"
21423
+ ],
21424
+ "model_id": "mlx-community/Seed-OSS-36B-Instruct-4bit"
21425
+ },
21426
+ "modelscope": {
21427
+ "quantizations": [
21428
+ "4bit"
21429
+ ],
21430
+ "model_id": "mlx-community/Seed-OSS-36B-Instruct-4bit"
21431
+ }
21432
+ }
21433
+ },
21434
+ {
21435
+ "model_format": "ggufv2",
21436
+ "model_size_in_billions": 36,
21437
+ "model_src": {
21438
+ "huggingface": {
21439
+ "quantizations": [
21440
+ "BF16",
21441
+ "IQ4_NL",
21442
+ "IQ4_XS",
21443
+ "Q2_K",
21444
+ "Q2_K_L",
21445
+ "Q3_K_M",
21446
+ "Q3_K_S",
21447
+ "Q4_0",
21448
+ "Q4_1",
21449
+ "Q4_K_M",
21450
+ "Q4_K_S",
21451
+ "Q5_K_M",
21452
+ "Q5_K_S",
21453
+ "Q6_K",
21454
+ "Q8_0",
21455
+ "UD-IQ1_M",
21456
+ "UD-IQ1_S",
21457
+ "UD-IQ2_M",
21458
+ "UD-IQ2_XXS",
21459
+ "UD-IQ3_XXS",
21460
+ "UD-Q2_K_XL",
21461
+ "UD-Q3_K_XL",
21462
+ "UD-Q4_K_XL",
21463
+ "UD-Q5_K_XL",
21464
+ "UD-Q6_K_XL",
21465
+ "UD-Q8_K_XL"
21466
+ ],
21467
+ "quantization_parts": {
21468
+ "BF16": [
21469
+ "00001-of-00002",
21470
+ "00002-of-00002"
21471
+ ]
21472
+ },
21473
+ "model_id": "unsloth/Seed-OSS-36B-Instruct-GGUF",
21474
+ "model_file_name_template": "Seed-OSS-36B-Instruct-{quantization}.gguf",
21475
+ "model_file_name_split_template": "{quantization}/Seed-OSS-36B-Instruct-{quantization}-{part}.gguf"
21476
+ },
21477
+ "modelscope": {
21478
+ "quantizations": [
21479
+ "BF16",
21480
+ "IQ4_NL",
21481
+ "IQ4_XS",
21482
+ "Q2_K",
21483
+ "Q2_K_L",
21484
+ "Q3_K_M",
21485
+ "Q3_K_S",
21486
+ "Q4_0",
21487
+ "Q4_1",
21488
+ "Q4_K_M",
21489
+ "Q4_K_S",
21490
+ "Q5_K_M",
21491
+ "Q5_K_S",
21492
+ "Q6_K",
21493
+ "Q8_0",
21494
+ "UD-IQ1_M",
21495
+ "UD-IQ1_S",
21496
+ "UD-IQ2_M",
21497
+ "UD-IQ2_XXS",
21498
+ "UD-IQ3_XXS",
21499
+ "UD-Q2_K_XL",
21500
+ "UD-Q3_K_XL",
21501
+ "UD-Q4_K_XL",
21502
+ "UD-Q5_K_XL",
21503
+ "UD-Q6_K_XL",
21504
+ "UD-Q8_K_XL"
21505
+ ],
21506
+ "quantization_parts": {
21507
+ "BF16": [
21508
+ "00001-of-00002",
21509
+ "00002-of-00002"
21510
+ ]
21511
+ },
21512
+ "model_id": "unsloth/Seed-OSS-36B-Instruct-GGUF",
21513
+ "model_file_name_template": "Seed-OSS-36B-Instruct-{quantization}.gguf",
21514
+ "model_file_name_split_template": "{quantization}/Seed-OSS-36B-Instruct-{quantization}-{part}.gguf"
21515
+ }
21516
+ }
21517
+ }
21518
+ ],
21519
+ "chat_template": "{# ------------- special token variables ------------- #}{%- set bos_token = '<seed:bos>' -%}{%- set eos_token = '<seed:eos>' -%}{%- set pad_token = '<seed:pad>' -%}{%- set toolcall_begin_token = '<seed:tool_call>' -%}{%- set toolcall_end_token = '</seed:tool_call>' -%}{%- set think_begin_token = '<seed:think>' -%}{%- set think_end_token = '</seed:think>' -%}{%- set budget_begin_token = '<seed:cot_budget_reflect>'-%}{%- set budget_end_token = '</seed:cot_budget_reflect>'-%}{# -------------- reflection-interval lookup -------------- #}{%- if not thinking_budget is defined %}{%- set thinking_budget = -1 -%}{%- endif -%}{%- set budget_reflections_v05 = { 0: 0, 512: 128, 1024: 256, 2048: 512, 4096: 512, 8192: 1024, 16384: 1024} -%}{%- set ns = namespace(interval = None) -%}{%- for k, v in budget_reflections_v05 | dictsort -%} {%- if ns.interval is none and thinking_budget <= k -%} {%- set ns.interval = v -%} {%- endif -%}{%- endfor -%}{%- if ns.interval is none -%} {%- set ns.interval = budget_reflections_v05[16384] -%}{%- endif -%}{%- if messages[0][\"role\"] == \"system\" %}{%- set system_message = messages[0][\"content\"] %}{%- set loop_messages = messages[1:] %}{%- else %}{%- set loop_messages = messages %}{%- endif %}{%- if not tools is defined or tools is none %}{%- set tools = [] %}{%- endif %}{%- macro py_type(t) -%} {%- if t == \"string\" -%}str {%- elif t in (\"number\", \"integer\") -%}int {%- elif t == \"boolean\" -%}bool {%- elif t == \"array\" -%}list {%- else -%}Any{%- endif -%}{%- endmacro -%}{%- if system_message is defined %}{{ bos_token + \"system\\n\" + system_message }}{%- else %}{%- if tools is iterable and tools | length > 0 %}{{ bos_token + \"system\\nYou are Doubao, a helpful AI assistant. You may call one or more functions to assist with the user query.\" }}{%- endif %}{%- endif %}{%- if use_json_tooldef is defined and use_json_tooldef %}{{\"Tool List:\\nYou are authorized to use the following tools (described in JSON Schema format). Before performing any task, you must decide how to call them based on the descriptions and parameters of these tools.\"}}{{ tools | tojson(ensure_ascii=False) }}{%- else %}{%- for item in tools if item.type == \"function\" %}Function:def {{ item.function.name }}({%- for name, spec in item.function.parameters.properties.items() %} {{- name }}: {{ py_type(spec.type) }}{% if not loop.last %},{% endif %}{%- endfor %}): \"\"\" {{ item.function.description | trim }} {%- if item.function.parameters.properties %} Args: {%- for name, spec in item.function.parameters.properties.items() %} - {{ name }} ({{ py_type(spec.type) }}) {%- if name in item.function.parameters.required %} [必填]{% else %} [选填]{% endif %}: {{- \" \" ~ (spec.description or \"\") }} {%- endfor %} {%- endif %} {%- if item.function.returns is defined and item.function.returns.properties is defined and item.function.returns.properties %} Returns: {%- for name, spec in item.function.returns.properties.items() %} - {{ name }} ({{ py_type(spec.type) }}): {{- \" \" ~ (spec.description or \"\") }} {%- endfor %} {%- endif %} \"\"\"{%- endfor %}{%- endif %}{%- if tools is iterable and tools | length > 0 %}{{\"工具调用请遵循如下格式:\\n<seed:tool_call>\\n<function=example_function_name>\\n<parameter=example_parameter_1>value_1</parameter>\\n<parameter=example_parameter_2>This is the value for the second parameter\\nthat can span\\nmultiple lines</parameter>\\n</function>\\n</seed:tool_call>\\n\"}}{%- endif %}{%- if system_message is defined or tools is iterable and tools | length > 0 %}{{ eos_token }}{%- endif %}{%- if thinking_budget is defined %}{%- if thinking_budget == 0 %}{{ bos_token+\"system\" }}{{ \"You are an intelligent assistant that can answer questions in one step without the need for reasoning and thinking, that is, your thinking budget is 0. Next, please skip the thinking process and directly start answering the user's questions.\" }}{{ eos_token }}{%- elif not thinking_budget == -1 %}{{ bos_token+\"system\" }}{{ \"You are an intelligent assistant with reflective ability. In the process of thinking and reasoning, you need to strictly follow the thinking budget, which is \"}}{{thinking_budget}}{{\". That is, you need to complete your thinking within \"}}{{thinking_budget}}{{\" tokens and start answering the user's questions. You will reflect on your thinking process every \"}}{{ns.interval}}{{\" tokens, stating how many tokens have been used and how many are left.\"}}{{ eos_token }}{%- endif %}{%- endif %}{%- for message in loop_messages %}{%- if message.role == \"assistant\" and message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}{{ bos_token + message.role }}{%- if message.reasoning_content is defined and message.reasoning_content is string and message.reasoning_content | trim | length > 0 %}{{ \"\\n\" + think_begin_token + message.reasoning_content | trim + think_end_token }}{%- endif %}{%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}{{ \"\\n\" + message.content | trim + \"\\n\" }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{% set tool_call = tool_call.function %}{% endif %}{{ \"\\n\" + toolcall_begin_token + \"\\n<function=\" + tool_call.name + \">\\n\" }}{%- if tool_call.arguments is defined %}{%- for arg_name, arg_value in tool_call.arguments | items %}{{ \"<parameter=\" + arg_name + \">\" }}{%- set arg_value = arg_value if arg_value is string else arg_value | string %}{{ arg_value+\"</parameter>\\n\" }}{%- endfor %}{%- endif %}{{ \"</function>\\n\" + toolcall_end_token }}{%- endfor %}{{ eos_token }}{%- elif message.role in [\"user\", \"system\"] %}{{ bos_token + message.role + \"\\n\" + message.content + eos_token }}{%- elif message.role == \"assistant\" %}{{ bos_token + message.role }}{%- if message.reasoning_content is defined and message.reasoning_content is string and message.reasoning_content | trim | length > 0 %}{{ \"\\n\" + think_begin_token + message.reasoning_content | trim + think_end_token }}{%- endif %}{%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}{{ \"\\n\" + message.content | trim + eos_token }}{%- endif %}{%- else %}{{ bos_token + message.role + \"\\n\" + message.content + eos_token }}{%- endif %}{%- endfor %}{%- if add_generation_prompt %}{{ bos_token+\"assistant\\n\" }}{%- if thinking_budget == 0 %}{{ think_begin_token + \"\\n\" + budget_begin_token + \"The current thinking budget is 0, so I will directly start answering the question.\" + budget_end_token + \"\\n\" + think_end_token }}{%- endif %}{%- endif %}",
21520
+ "stop_token_ids": [
21521
+ 0,
21522
+ 1,
21523
+ 2
21524
+ ],
21525
+ "stop": [
21526
+ "<seed:bos>",
21527
+ "<seed:pad>",
21528
+ "<seed:eos>"
21529
+ ],
21530
+ "reasoning_start_tag": "<think>",
21531
+ "reasoning_end_tag": "</think>"
21140
21532
  }
21141
21533
  ]
@@ -547,15 +547,13 @@ class PytorchModel(LLM):
547
547
  So we need pad `0` on the left again.
548
548
  """
549
549
  data = []
550
+ max_len = max(r.extra_kwargs["attention_mask_seq_len"] for r in reqs) + 1
550
551
  for r in reqs:
551
552
  r.extra_kwargs["attention_mask_seq_len"] += 1
553
+ real_len = r.extra_kwargs["attention_mask_seq_len"]
554
+ pad_len = max_len - real_len
555
+
552
556
  if self._tokenizer.padding_side == "left":
553
- attention_mask_seq_len = r.extra_kwargs["attention_mask_seq_len"]
554
- pad_len = seq_length - attention_mask_seq_len
555
- assert pad_len >= 0, (
556
- f"pad_len must be greater equal 0, got {pad_len} = "
557
- f"seq_length({seq_length}) - attention_mask_seq_len({attention_mask_seq_len})"
558
- )
559
557
  x = torch.cat(
560
558
  [
561
559
  (
@@ -563,14 +561,10 @@ class PytorchModel(LLM):
563
561
  if pad_len > 0
564
562
  else torch.tensor([], dtype=torch.long)
565
563
  ),
566
- torch.ones((attention_mask_seq_len,), dtype=torch.long),
564
+ torch.ones((real_len,), dtype=torch.long),
567
565
  ]
568
566
  )
569
567
  else:
570
- max_len = max(r.extra_kwargs["attention_mask_seq_len"] for r in reqs)
571
- real_len = r.extra_kwargs["attention_mask_seq_len"]
572
- pad_len = max_len - real_len
573
-
574
568
  x = torch.cat(
575
569
  [
576
570
  torch.ones((real_len,), dtype=torch.long),
@@ -82,7 +82,7 @@ LLAMA3_TOOL_CALL_FAMILY = [
82
82
  "HuatuoGPT-o1-LLaMA-3.1",
83
83
  ]
84
84
 
85
- DEEPSEEK_TOOL_CALL_FAMILY = ["deepseek-v3", "deepseek-r1-0528"]
85
+ DEEPSEEK_TOOL_CALL_FAMILY = ["deepseek-v3", "deepseek-r1-0528", "Deepseek-V3.1"]
86
86
 
87
87
  TOOL_CALL_FAMILY = (
88
88
  QWEN_TOOL_CALL_FAMILY
@@ -273,13 +273,19 @@ if VLLM_INSTALLED and VLLM_VERSION >= version.parse("0.9.2"):
273
273
  VLLM_SUPPORTED_CHAT_MODELS.append("Qwen3-Instruct")
274
274
  VLLM_SUPPORTED_CHAT_MODELS.append("Qwen3-Thinking")
275
275
  VLLM_SUPPORTED_CHAT_MODELS.append("Qwen3-Coder")
276
+ VLLM_SUPPORTED_CHAT_MODELS.append("Deepseek-V3.1")
276
277
 
277
278
  if VLLM_INSTALLED and VLLM_VERSION >= version.parse("0.10.0"):
278
279
  VLLM_SUPPORTED_CHAT_MODELS.append("glm-4.5")
279
280
  VLLM_SUPPORTED_VISION_MODEL_LIST.append("glm-4.5v")
281
+ VLLM_SUPPORTED_CHAT_MODELS.append("KAT-V1")
280
282
 
281
283
  if VLLM_INSTALLED and VLLM_VERSION > version.parse("0.10.0"):
282
284
  VLLM_SUPPORTED_CHAT_MODELS.append("gpt-oss")
285
+ VLLM_SUPPORTED_CHAT_MODELS.append("seed-oss")
286
+
287
+ if VLLM_INSTALLED and VLLM_VERSION > version.parse("0.10.1.1"):
288
+ VLLM_SUPPORTED_CHAT_MODELS.append("seed-oss")
283
289
 
284
290
 
285
291
  class VLLMModel(LLM):
@@ -97,6 +97,8 @@ class RerankModel:
97
97
  model_uid: str,
98
98
  model_path: str,
99
99
  model_family: RerankModelFamilyV2,
100
+ quantization: Optional[str],
101
+ *,
100
102
  device: Optional[str] = None,
101
103
  use_fp16: bool = False,
102
104
  **kwargs,
@@ -105,6 +107,7 @@ class RerankModel:
105
107
  self._model_spec = model_family.model_specs[0]
106
108
  self._model_uid = model_uid
107
109
  self._model_path = model_path
110
+ self._quantization = quantization
108
111
  self._device = device
109
112
  self._use_fp16 = use_fp16
110
113
  self._model = None
@@ -72,7 +72,7 @@ class SentenceTransformerRerankModel(RerankModel):
72
72
  enable_flash_attn = self._kwargs.pop(
73
73
  "enable_flash_attn", is_flash_attn_available()
74
74
  )
75
- if self._auto_detect_type(self._model_path) != "normal" and enable_flash_attn:
75
+ if enable_flash_attn:
76
76
  logger.warning(
77
77
  "flash_attn can only support fp16 and bf16, will force set `use_fp16` to True"
78
78
  )