xinference 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (94) hide show
  1. xinference/_compat.py +22 -2
  2. xinference/_version.py +3 -3
  3. xinference/api/restful_api.py +91 -6
  4. xinference/client/restful/restful_client.py +39 -0
  5. xinference/core/model.py +41 -13
  6. xinference/deploy/cmdline.py +3 -1
  7. xinference/deploy/test/test_cmdline.py +56 -0
  8. xinference/isolation.py +24 -0
  9. xinference/model/audio/__init__.py +12 -0
  10. xinference/model/audio/core.py +26 -4
  11. xinference/model/audio/f5tts.py +195 -0
  12. xinference/model/audio/fish_speech.py +71 -35
  13. xinference/model/audio/model_spec.json +88 -0
  14. xinference/model/audio/model_spec_modelscope.json +9 -0
  15. xinference/model/audio/whisper_mlx.py +208 -0
  16. xinference/model/embedding/core.py +322 -6
  17. xinference/model/embedding/model_spec.json +8 -1
  18. xinference/model/embedding/model_spec_modelscope.json +9 -1
  19. xinference/model/llm/__init__.py +4 -2
  20. xinference/model/llm/llm_family.json +479 -53
  21. xinference/model/llm/llm_family_modelscope.json +423 -17
  22. xinference/model/llm/mlx/core.py +230 -50
  23. xinference/model/llm/sglang/core.py +2 -0
  24. xinference/model/llm/transformers/chatglm.py +9 -5
  25. xinference/model/llm/transformers/core.py +1 -0
  26. xinference/model/llm/transformers/glm_edge_v.py +230 -0
  27. xinference/model/llm/transformers/utils.py +16 -8
  28. xinference/model/llm/utils.py +23 -1
  29. xinference/model/llm/vllm/core.py +89 -2
  30. xinference/thirdparty/f5_tts/__init__.py +0 -0
  31. xinference/thirdparty/f5_tts/api.py +166 -0
  32. xinference/thirdparty/f5_tts/configs/E2TTS_Base_train.yaml +44 -0
  33. xinference/thirdparty/f5_tts/configs/E2TTS_Small_train.yaml +44 -0
  34. xinference/thirdparty/f5_tts/configs/F5TTS_Base_train.yaml +46 -0
  35. xinference/thirdparty/f5_tts/configs/F5TTS_Small_train.yaml +46 -0
  36. xinference/thirdparty/f5_tts/eval/README.md +49 -0
  37. xinference/thirdparty/f5_tts/eval/ecapa_tdnn.py +330 -0
  38. xinference/thirdparty/f5_tts/eval/eval_infer_batch.py +207 -0
  39. xinference/thirdparty/f5_tts/eval/eval_infer_batch.sh +13 -0
  40. xinference/thirdparty/f5_tts/eval/eval_librispeech_test_clean.py +84 -0
  41. xinference/thirdparty/f5_tts/eval/eval_seedtts_testset.py +84 -0
  42. xinference/thirdparty/f5_tts/eval/utils_eval.py +405 -0
  43. xinference/thirdparty/f5_tts/infer/README.md +191 -0
  44. xinference/thirdparty/f5_tts/infer/SHARED.md +74 -0
  45. xinference/thirdparty/f5_tts/infer/examples/basic/basic.toml +11 -0
  46. xinference/thirdparty/f5_tts/infer/examples/basic/basic_ref_en.wav +0 -0
  47. xinference/thirdparty/f5_tts/infer/examples/basic/basic_ref_zh.wav +0 -0
  48. xinference/thirdparty/f5_tts/infer/examples/multi/country.flac +0 -0
  49. xinference/thirdparty/f5_tts/infer/examples/multi/main.flac +0 -0
  50. xinference/thirdparty/f5_tts/infer/examples/multi/story.toml +19 -0
  51. xinference/thirdparty/f5_tts/infer/examples/multi/story.txt +1 -0
  52. xinference/thirdparty/f5_tts/infer/examples/multi/town.flac +0 -0
  53. xinference/thirdparty/f5_tts/infer/examples/vocab.txt +2545 -0
  54. xinference/thirdparty/f5_tts/infer/infer_cli.py +226 -0
  55. xinference/thirdparty/f5_tts/infer/infer_gradio.py +851 -0
  56. xinference/thirdparty/f5_tts/infer/speech_edit.py +193 -0
  57. xinference/thirdparty/f5_tts/infer/utils_infer.py +538 -0
  58. xinference/thirdparty/f5_tts/model/__init__.py +10 -0
  59. xinference/thirdparty/f5_tts/model/backbones/README.md +20 -0
  60. xinference/thirdparty/f5_tts/model/backbones/dit.py +163 -0
  61. xinference/thirdparty/f5_tts/model/backbones/mmdit.py +146 -0
  62. xinference/thirdparty/f5_tts/model/backbones/unett.py +219 -0
  63. xinference/thirdparty/f5_tts/model/cfm.py +285 -0
  64. xinference/thirdparty/f5_tts/model/dataset.py +319 -0
  65. xinference/thirdparty/f5_tts/model/modules.py +658 -0
  66. xinference/thirdparty/f5_tts/model/trainer.py +366 -0
  67. xinference/thirdparty/f5_tts/model/utils.py +185 -0
  68. xinference/thirdparty/f5_tts/scripts/count_max_epoch.py +33 -0
  69. xinference/thirdparty/f5_tts/scripts/count_params_gflops.py +39 -0
  70. xinference/thirdparty/f5_tts/socket_server.py +159 -0
  71. xinference/thirdparty/f5_tts/train/README.md +77 -0
  72. xinference/thirdparty/f5_tts/train/datasets/prepare_csv_wavs.py +139 -0
  73. xinference/thirdparty/f5_tts/train/datasets/prepare_emilia.py +230 -0
  74. xinference/thirdparty/f5_tts/train/datasets/prepare_libritts.py +92 -0
  75. xinference/thirdparty/f5_tts/train/datasets/prepare_ljspeech.py +65 -0
  76. xinference/thirdparty/f5_tts/train/datasets/prepare_wenetspeech4tts.py +125 -0
  77. xinference/thirdparty/f5_tts/train/finetune_cli.py +174 -0
  78. xinference/thirdparty/f5_tts/train/finetune_gradio.py +1846 -0
  79. xinference/thirdparty/f5_tts/train/train.py +75 -0
  80. xinference/types.py +2 -1
  81. xinference/web/ui/build/asset-manifest.json +3 -3
  82. xinference/web/ui/build/index.html +1 -1
  83. xinference/web/ui/build/static/js/{main.2f269bb3.js → main.4eb4ee80.js} +3 -3
  84. xinference/web/ui/build/static/js/main.4eb4ee80.js.map +1 -0
  85. xinference/web/ui/node_modules/.cache/babel-loader/8c5eeb02f772d02cbe8b89c05428d0dd41a97866f75f7dc1c2164a67f5a1cf98.json +1 -0
  86. {xinference-1.0.0.dist-info → xinference-1.1.0.dist-info}/METADATA +39 -18
  87. {xinference-1.0.0.dist-info → xinference-1.1.0.dist-info}/RECORD +92 -39
  88. {xinference-1.0.0.dist-info → xinference-1.1.0.dist-info}/WHEEL +1 -1
  89. xinference/web/ui/build/static/js/main.2f269bb3.js.map +0 -1
  90. xinference/web/ui/node_modules/.cache/babel-loader/bd6ad8159341315a1764c397621a560809f7eb7219ab5174c801fca7e969d943.json +0 -1
  91. /xinference/web/ui/build/static/js/{main.2f269bb3.js.LICENSE.txt → main.4eb4ee80.js.LICENSE.txt} +0 -0
  92. {xinference-1.0.0.dist-info → xinference-1.1.0.dist-info}/LICENSE +0 -0
  93. {xinference-1.0.0.dist-info → xinference-1.1.0.dist-info}/entry_points.txt +0 -0
  94. {xinference-1.0.0.dist-info → xinference-1.1.0.dist-info}/top_level.txt +0 -0
@@ -454,6 +454,72 @@
454
454
  }
455
455
  ]
456
456
  },
457
+ {
458
+ "version": 1,
459
+ "context_length": 131072,
460
+ "model_name": "llama-3.3-instruct",
461
+ "model_lang": [
462
+ "en",
463
+ "de",
464
+ "fr",
465
+ "it",
466
+ "pt",
467
+ "hi",
468
+ "es",
469
+ "th"
470
+ ],
471
+ "model_ability": [
472
+ "chat",
473
+ "tools"
474
+ ],
475
+ "model_description": "The Llama 3.3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
476
+ "model_specs": [
477
+ {
478
+ "model_format": "pytorch",
479
+ "model_size_in_billions": 70,
480
+ "quantizations": [
481
+ "none"
482
+ ],
483
+ "model_id": "LLM-Research/Llama-3.3-70B-Instruct",
484
+ "model_hub": "modelscope"
485
+ },
486
+ {
487
+ "model_format": "ggufv2",
488
+ "model_size_in_billions": 70,
489
+ "quantizations": [
490
+ "Q3_K_L",
491
+ "Q4_K_M",
492
+ "Q6_K",
493
+ "Q8_0"
494
+ ],
495
+ "quantization_parts": {
496
+ "Q6_K": [
497
+ "00001-of-00002",
498
+ "00002-of-00002"
499
+ ],
500
+ "Q8_0": [
501
+ "00001-of-00002",
502
+ "00002-of-00002"
503
+ ]
504
+ },
505
+ "model_id": "lmstudio-community/Llama-3.3-70B-Instruct-GGUF",
506
+ "model_file_name_template": "Llama-3.3-70B-Instruct-{quantization}.gguf",
507
+ "model_file_name_split_template": "Llama-3.3-70B-Instruct-{quantization}-{part}.gguf",
508
+ "model_hub": "modelscope"
509
+ }
510
+ ],
511
+ "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
512
+ "stop_token_ids": [
513
+ 128001,
514
+ 128008,
515
+ 128009
516
+ ],
517
+ "stop": [
518
+ "<|end_of_text|>",
519
+ "<|eot_id|>",
520
+ "<|eom_id|>"
521
+ ]
522
+ },
457
523
  {
458
524
  "version": 1,
459
525
  "context_length": 2048,
@@ -586,7 +652,7 @@
586
652
  "none"
587
653
  ],
588
654
  "model_hub": "modelscope",
589
- "model_id": "ZhipuAI/glm-4-9b-chat",
655
+ "model_id": "ZhipuAI/glm-4-9b-chat-hf",
590
656
  "model_revision": "master"
591
657
  },
592
658
  {
@@ -652,7 +718,7 @@
652
718
  "none"
653
719
  ],
654
720
  "model_hub": "modelscope",
655
- "model_id": "ZhipuAI/glm-4-9b-chat-1m",
721
+ "model_id": "ZhipuAI/glm-4-9b-chat-1m-hf",
656
722
  "model_revision": "master"
657
723
  },
658
724
  {
@@ -2837,7 +2903,7 @@
2837
2903
  "model_format": "mlx",
2838
2904
  "model_size_in_billions": "0_5",
2839
2905
  "quantizations": [
2840
- "4-bit"
2906
+ "4bit"
2841
2907
  ],
2842
2908
  "model_id": "qwen/Qwen2-0.5B-Instruct-MLX",
2843
2909
  "model_hub": "modelscope"
@@ -2846,7 +2912,7 @@
2846
2912
  "model_format": "mlx",
2847
2913
  "model_size_in_billions": "1_5",
2848
2914
  "quantizations": [
2849
- "4-bit"
2915
+ "4bit"
2850
2916
  ],
2851
2917
  "model_id": "qwen/Qwen2-1.5B-Instruct-MLX",
2852
2918
  "model_hub": "modelscope"
@@ -2855,7 +2921,7 @@
2855
2921
  "model_format": "mlx",
2856
2922
  "model_size_in_billions": 7,
2857
2923
  "quantizations": [
2858
- "4-bit"
2924
+ "4bit"
2859
2925
  ],
2860
2926
  "model_id": "qwen/Qwen2-7B-Instruct-MLX",
2861
2927
  "model_hub": "modelscope"
@@ -4649,6 +4715,16 @@
4649
4715
  "model_id":"qwen/Qwen2-VL-7B-Instruct-AWQ",
4650
4716
  "model_revision":"master"
4651
4717
  },
4718
+ {
4719
+ "model_format":"mlx",
4720
+ "model_size_in_billions":7,
4721
+ "quantizations":[
4722
+ "8bit"
4723
+ ],
4724
+ "model_hub": "modelscope",
4725
+ "model_id":"okwinds/Qwen2-VL-7B-Instruct-MLX-8bit",
4726
+ "model_revision":"master"
4727
+ },
4652
4728
  {
4653
4729
  "model_format":"pytorch",
4654
4730
  "model_size_in_billions":2,
@@ -4689,6 +4765,16 @@
4689
4765
  "model_id":"qwen/Qwen2-VL-2B-Instruct-AWQ",
4690
4766
  "model_revision":"master"
4691
4767
  },
4768
+ {
4769
+ "model_format":"mlx",
4770
+ "model_size_in_billions":2,
4771
+ "quantizations":[
4772
+ "8bit"
4773
+ ],
4774
+ "model_hub": "modelscope",
4775
+ "model_id":"okwinds/Qwen2-VL-2B-Instruct-MLX-8bit",
4776
+ "model_revision":"master"
4777
+ },
4692
4778
  {
4693
4779
  "model_format":"pytorch",
4694
4780
  "model_size_in_billions":72,
@@ -4716,6 +4802,17 @@
4716
4802
  ],
4717
4803
  "model_id":"qwen/Qwen2-VL-72B-Instruct-GPTQ-{quantization}",
4718
4804
  "model_hub": "modelscope"
4805
+ },
4806
+ {
4807
+ "model_format":"mlx",
4808
+ "model_size_in_billions":72,
4809
+ "quantizations":[
4810
+ "4bit",
4811
+ "8bit"
4812
+ ],
4813
+ "model_hub": "modelscope",
4814
+ "model_id":"okwinds/Qwen2-VL-72B-Instruct-MLX-{quantization}",
4815
+ "model_revision":"master"
4719
4816
  }
4720
4817
  ],
4721
4818
  "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
@@ -5777,7 +5874,7 @@
5777
5874
  "model_format": "mlx",
5778
5875
  "model_size_in_billions": 3,
5779
5876
  "quantizations": [
5780
- "4-bit"
5877
+ "4bit"
5781
5878
  ],
5782
5879
  "model_id": "okwinds/Qwen2.5-3B-Instruct-MLX-4bit",
5783
5880
  "model_hub": "modelscope"
@@ -5786,7 +5883,7 @@
5786
5883
  "model_format": "mlx",
5787
5884
  "model_size_in_billions": 3,
5788
5885
  "quantizations": [
5789
- "8-bit"
5886
+ "8bit"
5790
5887
  ],
5791
5888
  "model_id": "okwinds/Qwen2.5-3B-Instruct-MLX-8bit",
5792
5889
  "model_hub": "modelscope"
@@ -5795,7 +5892,7 @@
5795
5892
  "model_format": "mlx",
5796
5893
  "model_size_in_billions": 7,
5797
5894
  "quantizations": [
5798
- "4-bit"
5895
+ "4bit"
5799
5896
  ],
5800
5897
  "model_id": "okwinds/Qwen2.5-7B-Instruct-MLX-4bit",
5801
5898
  "model_hub": "modelscope"
@@ -5804,7 +5901,7 @@
5804
5901
  "model_format": "mlx",
5805
5902
  "model_size_in_billions": 7,
5806
5903
  "quantizations": [
5807
- "8-bit"
5904
+ "8bit"
5808
5905
  ],
5809
5906
  "model_id": "okwinds/Qwen2.5-7B-Instruct-MLX-8bit",
5810
5907
  "model_hub": "modelscope"
@@ -5813,7 +5910,7 @@
5813
5910
  "model_format": "mlx",
5814
5911
  "model_size_in_billions": 14,
5815
5912
  "quantizations": [
5816
- "4-bit"
5913
+ "4bit"
5817
5914
  ],
5818
5915
  "model_id": "okwinds/Qwen2.5-14B-Instruct-MLX-4bit",
5819
5916
  "model_hub": "modelscope"
@@ -5822,7 +5919,7 @@
5822
5919
  "model_format": "mlx",
5823
5920
  "model_size_in_billions": 14,
5824
5921
  "quantizations": [
5825
- "8-bit"
5922
+ "8bit"
5826
5923
  ],
5827
5924
  "model_id": "okwinds/Qwen2.5-14B-Instruct-MLX-8bit",
5828
5925
  "model_hub": "modelscope"
@@ -5831,7 +5928,7 @@
5831
5928
  "model_format": "mlx",
5832
5929
  "model_size_in_billions": 32,
5833
5930
  "quantizations": [
5834
- "2-bit"
5931
+ "2bit"
5835
5932
  ],
5836
5933
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-2bit",
5837
5934
  "model_hub": "modelscope"
@@ -5840,7 +5937,7 @@
5840
5937
  "model_format": "mlx",
5841
5938
  "model_size_in_billions": 32,
5842
5939
  "quantizations": [
5843
- "4-bit"
5940
+ "4bit"
5844
5941
  ],
5845
5942
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-4bit",
5846
5943
  "model_hub": "modelscope"
@@ -5849,7 +5946,7 @@
5849
5946
  "model_format": "mlx",
5850
5947
  "model_size_in_billions": 32,
5851
5948
  "quantizations": [
5852
- "8-bit"
5949
+ "8bit"
5853
5950
  ],
5854
5951
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-8bit",
5855
5952
  "model_hub": "modelscope"
@@ -5858,7 +5955,7 @@
5858
5955
  "model_format": "mlx",
5859
5956
  "model_size_in_billions": 72,
5860
5957
  "quantizations": [
5861
- "2-bit"
5958
+ "2bit"
5862
5959
  ],
5863
5960
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-2bit",
5864
5961
  "model_hub": "modelscope"
@@ -5867,7 +5964,7 @@
5867
5964
  "model_format": "mlx",
5868
5965
  "model_size_in_billions": 72,
5869
5966
  "quantizations": [
5870
- "4-bit"
5967
+ "4bit"
5871
5968
  ],
5872
5969
  "model_id": "okwinds/Qwen2.5-72B-Instruct-MLX-4bit",
5873
5970
  "model_hub": "modelscope"
@@ -5876,7 +5973,7 @@
5876
5973
  "model_format": "mlx",
5877
5974
  "model_size_in_billions": 72,
5878
5975
  "quantizations": [
5879
- "8-bit"
5976
+ "8bit"
5880
5977
  ],
5881
5978
  "model_id": "okwinds/Qwen2.5-72B-Instruct-MLX-8bit",
5882
5979
  "model_hub": "modelscope"
@@ -6267,5 +6364,314 @@
6267
6364
  "<|im_start|>",
6268
6365
  "<|im_end|>"
6269
6366
  ]
6367
+ },
6368
+ {
6369
+ "version": 1,
6370
+ "context_length": 32768,
6371
+ "model_name": "QwQ-32B-Preview",
6372
+ "model_lang": [
6373
+ "en",
6374
+ "zh"
6375
+ ],
6376
+ "model_ability": [
6377
+ "chat"
6378
+ ],
6379
+ "model_description": "QwQ-32B-Preview is an experimental research model developed by the Qwen Team, focused on advancing AI reasoning capabilities.",
6380
+ "model_specs": [
6381
+ {
6382
+ "model_format": "pytorch",
6383
+ "model_size_in_billions": 32,
6384
+ "quantizations": [
6385
+ "4-bit",
6386
+ "8-bit",
6387
+ "none"
6388
+ ],
6389
+ "model_id": "Qwen/QwQ-32B-Preview",
6390
+ "model_hub": "modelscope"
6391
+ },
6392
+ {
6393
+ "model_format": "mlx",
6394
+ "model_size_in_billions": 32,
6395
+ "quantizations": [
6396
+ "4bit"
6397
+ ],
6398
+ "model_id": "okwinds/QwQ-32B-Preview-MLX-4bit",
6399
+ "model_hub": "modelscope"
6400
+ },
6401
+ {
6402
+ "model_format": "mlx",
6403
+ "model_size_in_billions": 32,
6404
+ "quantizations": [
6405
+ "8bit"
6406
+ ],
6407
+ "model_id": "okwinds/QwQ-32B-Preview-MLX-8bit",
6408
+ "model_hub": "modelscope"
6409
+ },
6410
+ {
6411
+ "model_format": "ggufv2",
6412
+ "model_size_in_billions": 32,
6413
+ "quantizations": [
6414
+ "Q3_K_L",
6415
+ "Q4_K_M",
6416
+ "Q6_K",
6417
+ "Q8_0"
6418
+ ],
6419
+ "model_id": "AI-ModelScope/QwQ-32B-Preview-GGUF",
6420
+ "model_file_name_template": "QwQ-32B-Preview-{quantization}.gguf",
6421
+ "model_hub": "modelscope"
6422
+ }
6423
+ ],
6424
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
6425
+ "stop_token_ids": [
6426
+ 151643,
6427
+ 151644,
6428
+ 151645
6429
+ ],
6430
+ "stop": [
6431
+ "<|endoftext|>",
6432
+ "<|im_start|>",
6433
+ "<|im_end|>"
6434
+ ]
6435
+ },
6436
+ {
6437
+ "version": 1,
6438
+ "context_length": 8192,
6439
+ "model_name": "glm-edge-chat",
6440
+ "model_lang": [
6441
+ "en",
6442
+ "zh"
6443
+ ],
6444
+ "model_ability": [
6445
+ "chat"
6446
+ ],
6447
+ "model_description": "The GLM-Edge series is our attempt to face the end-side real-life scenarios, which consists of two sizes of large-language dialogue models and multimodal comprehension models (GLM-Edge-1.5B-Chat, GLM-Edge-4B-Chat, GLM-Edge-V-2B, GLM-Edge-V-5B). Among them, the 1.5B / 2B model is mainly for platforms such as mobile phones and cars, and the 4B / 5B model is mainly for platforms such as PCs.",
6448
+ "model_specs": [
6449
+ {
6450
+ "model_format": "pytorch",
6451
+ "model_size_in_billions": "1_5",
6452
+ "quantizations": [
6453
+ "4-bit",
6454
+ "8-bit",
6455
+ "none"
6456
+ ],
6457
+ "model_id": "ZhipuAI/glm-edge-1.5b-chat",
6458
+ "model_hub": "modelscope"
6459
+ },
6460
+ {
6461
+ "model_format": "pytorch",
6462
+ "model_size_in_billions": "4",
6463
+ "quantizations": [
6464
+ "4-bit",
6465
+ "8-bit",
6466
+ "none"
6467
+ ],
6468
+ "model_id": "ZhipuAI/glm-edge-4b-chat",
6469
+ "model_hub": "modelscope"
6470
+ },
6471
+ {
6472
+ "model_format": "ggufv2",
6473
+ "model_size_in_billions": "1_5",
6474
+ "quantizations": [
6475
+ "Q4_0",
6476
+ "Q4_1",
6477
+ "Q4_K",
6478
+ "Q4_K_M",
6479
+ "Q4_K_S",
6480
+ "Q5_0",
6481
+ "Q5_1",
6482
+ "Q5_K",
6483
+ "Q5_K_M",
6484
+ "Q5_K_S",
6485
+ "Q6_K",
6486
+ "Q8_0"
6487
+ ],
6488
+ "model_file_name_template": "ggml-model-{quantization}.gguf",
6489
+ "model_hub": "modelscope",
6490
+ "model_id": "ZhipuAI/glm-edge-1.5b-chat-gguf"
6491
+ },
6492
+ {
6493
+ "model_format": "ggufv2",
6494
+ "model_size_in_billions": "1_5",
6495
+ "quantizations": [
6496
+ "F16"
6497
+ ],
6498
+ "model_file_name_template": "glm-edge-1.5B-chat-{quantization}.gguf",
6499
+ "model_hub": "modelscope",
6500
+ "model_id": "ZhipuAI/glm-edge-1.5b-chat-gguf"
6501
+ },
6502
+ {
6503
+ "model_format": "ggufv2",
6504
+ "model_size_in_billions": "4",
6505
+ "quantizations": [
6506
+ "Q4_0",
6507
+ "Q4_1",
6508
+ "Q4_K",
6509
+ "Q4_K_M",
6510
+ "Q4_K_S",
6511
+ "Q5_0",
6512
+ "Q5_1",
6513
+ "Q5_K",
6514
+ "Q5_K_M",
6515
+ "Q5_K_S",
6516
+ "Q6_K",
6517
+ "Q8_0"
6518
+ ],
6519
+ "model_file_name_template": "ggml-model-{quantization}.gguf",
6520
+ "model_hub": "modelscope",
6521
+ "model_id": "ZhipuAI/glm-edge-4b-chat-gguf"
6522
+ },
6523
+ {
6524
+ "model_format": "ggufv2",
6525
+ "model_size_in_billions": "4",
6526
+ "quantizations": [
6527
+ "F16"
6528
+ ],
6529
+ "model_file_name_template": "glm-edge-4B-chat-{quantization}.gguf",
6530
+ "model_hub": "modelscope",
6531
+ "model_id": "ZhipuAI/glm-edge-4b-chat-gguf"
6532
+ }
6533
+ ],
6534
+ "chat_template": "{% for item in messages %}{% if item['role'] == 'system' %}<|system|>\n{{ item['content'] }}{% elif item['role'] == 'user' %}<|user|>\n{{ item['content'] }}{% elif item['role'] == 'assistant' %}<|assistant|>\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>\n{% endif %}",
6535
+ "stop_token_ids": [
6536
+ 59246,
6537
+ 59253,
6538
+ 59255
6539
+ ],
6540
+ "stop": [
6541
+ "<|endoftext|>",
6542
+ "<|user|>",
6543
+ "<|observation|>"
6544
+ ]
6545
+ },
6546
+ {
6547
+ "version": 1,
6548
+ "context_length": 8192,
6549
+ "model_name": "glm-edge-v",
6550
+ "model_lang": [
6551
+ "en",
6552
+ "zh"
6553
+ ],
6554
+ "model_ability": [
6555
+ "chat",
6556
+ "vision"
6557
+ ],
6558
+ "model_description": "The GLM-Edge series is our attempt to face the end-side real-life scenarios, which consists of two sizes of large-language dialogue models and multimodal comprehension models (GLM-Edge-1.5B-Chat, GLM-Edge-4B-Chat, GLM-Edge-V-2B, GLM-Edge-V-5B). Among them, the 1.5B / 2B model is mainly for platforms such as mobile phones and cars, and the 4B / 5B model is mainly for platforms such as PCs.",
6559
+ "model_specs": [
6560
+ {
6561
+ "model_format": "pytorch",
6562
+ "model_size_in_billions": "2",
6563
+ "quantizations": [
6564
+ "4-bit",
6565
+ "8-bit",
6566
+ "none"
6567
+ ],
6568
+ "model_id": "ZhipuAI/glm-edge-v-2b",
6569
+ "model_hub": "modelscope"
6570
+ },
6571
+ {
6572
+ "model_format": "pytorch",
6573
+ "model_size_in_billions": "5",
6574
+ "quantizations": [
6575
+ "4-bit",
6576
+ "8-bit",
6577
+ "none"
6578
+ ],
6579
+ "model_id": "ZhipuAI/glm-edge-v-5b",
6580
+ "model_hub": "modelscope"
6581
+ },
6582
+ {
6583
+ "model_format": "ggufv2",
6584
+ "model_size_in_billions": "2",
6585
+ "quantizations": [
6586
+ "Q4_0",
6587
+ "Q4_1",
6588
+ "Q4_K",
6589
+ "Q4_K_M",
6590
+ "Q4_K_S",
6591
+ "Q5_0",
6592
+ "Q5_1",
6593
+ "Q5_K",
6594
+ "Q5_K_M",
6595
+ "Q5_K_S",
6596
+ "Q6_K",
6597
+ "Q8_0"
6598
+ ],
6599
+ "model_file_name_template": "ggml-model-{quantization}.gguf",
6600
+ "model_hub": "modelscope",
6601
+ "model_id": "ZhipuAI/glm-edge-v-2b-gguf"
6602
+ },
6603
+ {
6604
+ "model_format": "ggufv2",
6605
+ "model_size_in_billions": "2",
6606
+ "quantizations": [
6607
+ "F16"
6608
+ ],
6609
+ "model_file_name_template": "glm-edge-v-2B-{quantization}.gguf",
6610
+ "model_hub": "modelscope",
6611
+ "model_id": "ZhipuAI/glm-edge-v-2b-gguf"
6612
+ },
6613
+ {
6614
+ "model_format": "ggufv2",
6615
+ "model_size_in_billions": "2",
6616
+ "quantizations": [
6617
+ "f16"
6618
+ ],
6619
+ "model_file_name_template": "mmproj-model-{quantization}.gguf",
6620
+ "model_hub": "modelscope",
6621
+ "model_id": "ZhipuAI/glm-edge-v-2b-gguf"
6622
+ },
6623
+ {
6624
+ "model_format": "ggufv2",
6625
+ "model_size_in_billions": "5",
6626
+ "quantizations": [
6627
+ "Q4_0",
6628
+ "Q4_1",
6629
+ "Q4_K",
6630
+ "Q4_K_M",
6631
+ "Q4_K_S",
6632
+ "Q5_0",
6633
+ "Q5_1",
6634
+ "Q5_K",
6635
+ "Q5_K_M",
6636
+ "Q5_K_S",
6637
+ "Q6_K",
6638
+ "Q8_0"
6639
+ ],
6640
+ "model_file_name_template": "ggml-model-{quantization}.gguf",
6641
+ "model_hub": "modelscope",
6642
+ "model_id": "ZhipuAI/glm-edge-v-5b-gguf"
6643
+ },
6644
+ {
6645
+ "model_format": "ggufv2",
6646
+ "model_size_in_billions": "5",
6647
+ "quantizations": [
6648
+ "F16"
6649
+ ],
6650
+ "model_file_name_template": "glm-edge-v-5B-{quantization}.gguf",
6651
+ "model_hub": "modelscope",
6652
+ "model_id": "ZhipuAI/glm-edge-v-5b-gguf"
6653
+ },
6654
+ {
6655
+ "model_format": "ggufv2",
6656
+ "model_size_in_billions": "5",
6657
+ "quantizations": [
6658
+ "f16"
6659
+ ],
6660
+ "model_file_name_template": "mmproj-model-{quantization}.gguf",
6661
+ "model_hub": "modelscope",
6662
+ "model_id": "ZhipuAI/glm-edge-v-5b-gguf"
6663
+ }
6664
+ ],
6665
+ "chat_template": "{% for item in messages %}{% if item['role'] != 'system' %}<|{{ item['role'] }}|>\n{% for content in item['content'] %}{% if content['type'] == 'image' %}{% for _ in range(578) %}<|begin_of_image|>{% endfor %}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>\n{% endif %}",
6666
+ "stop_token_ids": [
6667
+ 59246,
6668
+ 59253,
6669
+ 59255
6670
+ ],
6671
+ "stop": [
6672
+ "<|endoftext|>",
6673
+ "<|user|>",
6674
+ "<|observation|>"
6675
+ ]
6270
6676
  }
6271
6677
  ]