xinference 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (87) hide show
  1. xinference/_version.py +3 -3
  2. xinference/api/restful_api.py +5 -5
  3. xinference/core/model.py +6 -1
  4. xinference/deploy/cmdline.py +3 -1
  5. xinference/deploy/test/test_cmdline.py +56 -0
  6. xinference/isolation.py +24 -0
  7. xinference/model/audio/core.py +5 -0
  8. xinference/model/audio/f5tts.py +195 -0
  9. xinference/model/audio/fish_speech.py +2 -1
  10. xinference/model/audio/model_spec.json +8 -0
  11. xinference/model/audio/model_spec_modelscope.json +9 -0
  12. xinference/model/embedding/core.py +203 -142
  13. xinference/model/embedding/model_spec.json +7 -0
  14. xinference/model/embedding/model_spec_modelscope.json +8 -0
  15. xinference/model/llm/__init__.py +2 -2
  16. xinference/model/llm/llm_family.json +172 -53
  17. xinference/model/llm/llm_family_modelscope.json +118 -20
  18. xinference/model/llm/mlx/core.py +230 -49
  19. xinference/model/llm/sglang/core.py +1 -0
  20. xinference/model/llm/transformers/chatglm.py +9 -5
  21. xinference/model/llm/transformers/utils.py +16 -8
  22. xinference/model/llm/utils.py +4 -1
  23. xinference/model/llm/vllm/core.py +5 -0
  24. xinference/thirdparty/f5_tts/__init__.py +0 -0
  25. xinference/thirdparty/f5_tts/api.py +166 -0
  26. xinference/thirdparty/f5_tts/configs/E2TTS_Base_train.yaml +44 -0
  27. xinference/thirdparty/f5_tts/configs/E2TTS_Small_train.yaml +44 -0
  28. xinference/thirdparty/f5_tts/configs/F5TTS_Base_train.yaml +46 -0
  29. xinference/thirdparty/f5_tts/configs/F5TTS_Small_train.yaml +46 -0
  30. xinference/thirdparty/f5_tts/eval/README.md +49 -0
  31. xinference/thirdparty/f5_tts/eval/ecapa_tdnn.py +330 -0
  32. xinference/thirdparty/f5_tts/eval/eval_infer_batch.py +207 -0
  33. xinference/thirdparty/f5_tts/eval/eval_infer_batch.sh +13 -0
  34. xinference/thirdparty/f5_tts/eval/eval_librispeech_test_clean.py +84 -0
  35. xinference/thirdparty/f5_tts/eval/eval_seedtts_testset.py +84 -0
  36. xinference/thirdparty/f5_tts/eval/utils_eval.py +405 -0
  37. xinference/thirdparty/f5_tts/infer/README.md +191 -0
  38. xinference/thirdparty/f5_tts/infer/SHARED.md +74 -0
  39. xinference/thirdparty/f5_tts/infer/examples/basic/basic.toml +11 -0
  40. xinference/thirdparty/f5_tts/infer/examples/basic/basic_ref_en.wav +0 -0
  41. xinference/thirdparty/f5_tts/infer/examples/basic/basic_ref_zh.wav +0 -0
  42. xinference/thirdparty/f5_tts/infer/examples/multi/country.flac +0 -0
  43. xinference/thirdparty/f5_tts/infer/examples/multi/main.flac +0 -0
  44. xinference/thirdparty/f5_tts/infer/examples/multi/story.toml +19 -0
  45. xinference/thirdparty/f5_tts/infer/examples/multi/story.txt +1 -0
  46. xinference/thirdparty/f5_tts/infer/examples/multi/town.flac +0 -0
  47. xinference/thirdparty/f5_tts/infer/examples/vocab.txt +2545 -0
  48. xinference/thirdparty/f5_tts/infer/infer_cli.py +226 -0
  49. xinference/thirdparty/f5_tts/infer/infer_gradio.py +851 -0
  50. xinference/thirdparty/f5_tts/infer/speech_edit.py +193 -0
  51. xinference/thirdparty/f5_tts/infer/utils_infer.py +538 -0
  52. xinference/thirdparty/f5_tts/model/__init__.py +10 -0
  53. xinference/thirdparty/f5_tts/model/backbones/README.md +20 -0
  54. xinference/thirdparty/f5_tts/model/backbones/dit.py +163 -0
  55. xinference/thirdparty/f5_tts/model/backbones/mmdit.py +146 -0
  56. xinference/thirdparty/f5_tts/model/backbones/unett.py +219 -0
  57. xinference/thirdparty/f5_tts/model/cfm.py +285 -0
  58. xinference/thirdparty/f5_tts/model/dataset.py +319 -0
  59. xinference/thirdparty/f5_tts/model/modules.py +658 -0
  60. xinference/thirdparty/f5_tts/model/trainer.py +366 -0
  61. xinference/thirdparty/f5_tts/model/utils.py +185 -0
  62. xinference/thirdparty/f5_tts/scripts/count_max_epoch.py +33 -0
  63. xinference/thirdparty/f5_tts/scripts/count_params_gflops.py +39 -0
  64. xinference/thirdparty/f5_tts/socket_server.py +159 -0
  65. xinference/thirdparty/f5_tts/train/README.md +77 -0
  66. xinference/thirdparty/f5_tts/train/datasets/prepare_csv_wavs.py +139 -0
  67. xinference/thirdparty/f5_tts/train/datasets/prepare_emilia.py +230 -0
  68. xinference/thirdparty/f5_tts/train/datasets/prepare_libritts.py +92 -0
  69. xinference/thirdparty/f5_tts/train/datasets/prepare_ljspeech.py +65 -0
  70. xinference/thirdparty/f5_tts/train/datasets/prepare_wenetspeech4tts.py +125 -0
  71. xinference/thirdparty/f5_tts/train/finetune_cli.py +174 -0
  72. xinference/thirdparty/f5_tts/train/finetune_gradio.py +1846 -0
  73. xinference/thirdparty/f5_tts/train/train.py +75 -0
  74. xinference/web/ui/build/asset-manifest.json +3 -3
  75. xinference/web/ui/build/index.html +1 -1
  76. xinference/web/ui/build/static/js/{main.2f269bb3.js → main.4eb4ee80.js} +3 -3
  77. xinference/web/ui/build/static/js/main.4eb4ee80.js.map +1 -0
  78. xinference/web/ui/node_modules/.cache/babel-loader/8c5eeb02f772d02cbe8b89c05428d0dd41a97866f75f7dc1c2164a67f5a1cf98.json +1 -0
  79. {xinference-1.0.1.dist-info → xinference-1.1.0.dist-info}/METADATA +33 -14
  80. {xinference-1.0.1.dist-info → xinference-1.1.0.dist-info}/RECORD +85 -34
  81. xinference/web/ui/build/static/js/main.2f269bb3.js.map +0 -1
  82. xinference/web/ui/node_modules/.cache/babel-loader/bd6ad8159341315a1764c397621a560809f7eb7219ab5174c801fca7e969d943.json +0 -1
  83. /xinference/web/ui/build/static/js/{main.2f269bb3.js.LICENSE.txt → main.4eb4ee80.js.LICENSE.txt} +0 -0
  84. {xinference-1.0.1.dist-info → xinference-1.1.0.dist-info}/LICENSE +0 -0
  85. {xinference-1.0.1.dist-info → xinference-1.1.0.dist-info}/WHEEL +0 -0
  86. {xinference-1.0.1.dist-info → xinference-1.1.0.dist-info}/entry_points.txt +0 -0
  87. {xinference-1.0.1.dist-info → xinference-1.1.0.dist-info}/top_level.txt +0 -0
@@ -454,6 +454,72 @@
454
454
  }
455
455
  ]
456
456
  },
457
+ {
458
+ "version": 1,
459
+ "context_length": 131072,
460
+ "model_name": "llama-3.3-instruct",
461
+ "model_lang": [
462
+ "en",
463
+ "de",
464
+ "fr",
465
+ "it",
466
+ "pt",
467
+ "hi",
468
+ "es",
469
+ "th"
470
+ ],
471
+ "model_ability": [
472
+ "chat",
473
+ "tools"
474
+ ],
475
+ "model_description": "The Llama 3.3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
476
+ "model_specs": [
477
+ {
478
+ "model_format": "pytorch",
479
+ "model_size_in_billions": 70,
480
+ "quantizations": [
481
+ "none"
482
+ ],
483
+ "model_id": "LLM-Research/Llama-3.3-70B-Instruct",
484
+ "model_hub": "modelscope"
485
+ },
486
+ {
487
+ "model_format": "ggufv2",
488
+ "model_size_in_billions": 70,
489
+ "quantizations": [
490
+ "Q3_K_L",
491
+ "Q4_K_M",
492
+ "Q6_K",
493
+ "Q8_0"
494
+ ],
495
+ "quantization_parts": {
496
+ "Q6_K": [
497
+ "00001-of-00002",
498
+ "00002-of-00002"
499
+ ],
500
+ "Q8_0": [
501
+ "00001-of-00002",
502
+ "00002-of-00002"
503
+ ]
504
+ },
505
+ "model_id": "lmstudio-community/Llama-3.3-70B-Instruct-GGUF",
506
+ "model_file_name_template": "Llama-3.3-70B-Instruct-{quantization}.gguf",
507
+ "model_file_name_split_template": "Llama-3.3-70B-Instruct-{quantization}-{part}.gguf",
508
+ "model_hub": "modelscope"
509
+ }
510
+ ],
511
+ "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
512
+ "stop_token_ids": [
513
+ 128001,
514
+ 128008,
515
+ 128009
516
+ ],
517
+ "stop": [
518
+ "<|end_of_text|>",
519
+ "<|eot_id|>",
520
+ "<|eom_id|>"
521
+ ]
522
+ },
457
523
  {
458
524
  "version": 1,
459
525
  "context_length": 2048,
@@ -586,7 +652,7 @@
586
652
  "none"
587
653
  ],
588
654
  "model_hub": "modelscope",
589
- "model_id": "ZhipuAI/glm-4-9b-chat",
655
+ "model_id": "ZhipuAI/glm-4-9b-chat-hf",
590
656
  "model_revision": "master"
591
657
  },
592
658
  {
@@ -652,7 +718,7 @@
652
718
  "none"
653
719
  ],
654
720
  "model_hub": "modelscope",
655
- "model_id": "ZhipuAI/glm-4-9b-chat-1m",
721
+ "model_id": "ZhipuAI/glm-4-9b-chat-1m-hf",
656
722
  "model_revision": "master"
657
723
  },
658
724
  {
@@ -2837,7 +2903,7 @@
2837
2903
  "model_format": "mlx",
2838
2904
  "model_size_in_billions": "0_5",
2839
2905
  "quantizations": [
2840
- "4-bit"
2906
+ "4bit"
2841
2907
  ],
2842
2908
  "model_id": "qwen/Qwen2-0.5B-Instruct-MLX",
2843
2909
  "model_hub": "modelscope"
@@ -2846,7 +2912,7 @@
2846
2912
  "model_format": "mlx",
2847
2913
  "model_size_in_billions": "1_5",
2848
2914
  "quantizations": [
2849
- "4-bit"
2915
+ "4bit"
2850
2916
  ],
2851
2917
  "model_id": "qwen/Qwen2-1.5B-Instruct-MLX",
2852
2918
  "model_hub": "modelscope"
@@ -2855,7 +2921,7 @@
2855
2921
  "model_format": "mlx",
2856
2922
  "model_size_in_billions": 7,
2857
2923
  "quantizations": [
2858
- "4-bit"
2924
+ "4bit"
2859
2925
  ],
2860
2926
  "model_id": "qwen/Qwen2-7B-Instruct-MLX",
2861
2927
  "model_hub": "modelscope"
@@ -4649,6 +4715,16 @@
4649
4715
  "model_id":"qwen/Qwen2-VL-7B-Instruct-AWQ",
4650
4716
  "model_revision":"master"
4651
4717
  },
4718
+ {
4719
+ "model_format":"mlx",
4720
+ "model_size_in_billions":7,
4721
+ "quantizations":[
4722
+ "8bit"
4723
+ ],
4724
+ "model_hub": "modelscope",
4725
+ "model_id":"okwinds/Qwen2-VL-7B-Instruct-MLX-8bit",
4726
+ "model_revision":"master"
4727
+ },
4652
4728
  {
4653
4729
  "model_format":"pytorch",
4654
4730
  "model_size_in_billions":2,
@@ -4689,6 +4765,16 @@
4689
4765
  "model_id":"qwen/Qwen2-VL-2B-Instruct-AWQ",
4690
4766
  "model_revision":"master"
4691
4767
  },
4768
+ {
4769
+ "model_format":"mlx",
4770
+ "model_size_in_billions":2,
4771
+ "quantizations":[
4772
+ "8bit"
4773
+ ],
4774
+ "model_hub": "modelscope",
4775
+ "model_id":"okwinds/Qwen2-VL-2B-Instruct-MLX-8bit",
4776
+ "model_revision":"master"
4777
+ },
4692
4778
  {
4693
4779
  "model_format":"pytorch",
4694
4780
  "model_size_in_billions":72,
@@ -4716,6 +4802,17 @@
4716
4802
  ],
4717
4803
  "model_id":"qwen/Qwen2-VL-72B-Instruct-GPTQ-{quantization}",
4718
4804
  "model_hub": "modelscope"
4805
+ },
4806
+ {
4807
+ "model_format":"mlx",
4808
+ "model_size_in_billions":72,
4809
+ "quantizations":[
4810
+ "4bit",
4811
+ "8bit"
4812
+ ],
4813
+ "model_hub": "modelscope",
4814
+ "model_id":"okwinds/Qwen2-VL-72B-Instruct-MLX-{quantization}",
4815
+ "model_revision":"master"
4719
4816
  }
4720
4817
  ],
4721
4818
  "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
@@ -5777,7 +5874,7 @@
5777
5874
  "model_format": "mlx",
5778
5875
  "model_size_in_billions": 3,
5779
5876
  "quantizations": [
5780
- "4-bit"
5877
+ "4bit"
5781
5878
  ],
5782
5879
  "model_id": "okwinds/Qwen2.5-3B-Instruct-MLX-4bit",
5783
5880
  "model_hub": "modelscope"
@@ -5786,7 +5883,7 @@
5786
5883
  "model_format": "mlx",
5787
5884
  "model_size_in_billions": 3,
5788
5885
  "quantizations": [
5789
- "8-bit"
5886
+ "8bit"
5790
5887
  ],
5791
5888
  "model_id": "okwinds/Qwen2.5-3B-Instruct-MLX-8bit",
5792
5889
  "model_hub": "modelscope"
@@ -5795,7 +5892,7 @@
5795
5892
  "model_format": "mlx",
5796
5893
  "model_size_in_billions": 7,
5797
5894
  "quantizations": [
5798
- "4-bit"
5895
+ "4bit"
5799
5896
  ],
5800
5897
  "model_id": "okwinds/Qwen2.5-7B-Instruct-MLX-4bit",
5801
5898
  "model_hub": "modelscope"
@@ -5804,7 +5901,7 @@
5804
5901
  "model_format": "mlx",
5805
5902
  "model_size_in_billions": 7,
5806
5903
  "quantizations": [
5807
- "8-bit"
5904
+ "8bit"
5808
5905
  ],
5809
5906
  "model_id": "okwinds/Qwen2.5-7B-Instruct-MLX-8bit",
5810
5907
  "model_hub": "modelscope"
@@ -5813,7 +5910,7 @@
5813
5910
  "model_format": "mlx",
5814
5911
  "model_size_in_billions": 14,
5815
5912
  "quantizations": [
5816
- "4-bit"
5913
+ "4bit"
5817
5914
  ],
5818
5915
  "model_id": "okwinds/Qwen2.5-14B-Instruct-MLX-4bit",
5819
5916
  "model_hub": "modelscope"
@@ -5822,7 +5919,7 @@
5822
5919
  "model_format": "mlx",
5823
5920
  "model_size_in_billions": 14,
5824
5921
  "quantizations": [
5825
- "8-bit"
5922
+ "8bit"
5826
5923
  ],
5827
5924
  "model_id": "okwinds/Qwen2.5-14B-Instruct-MLX-8bit",
5828
5925
  "model_hub": "modelscope"
@@ -5831,7 +5928,7 @@
5831
5928
  "model_format": "mlx",
5832
5929
  "model_size_in_billions": 32,
5833
5930
  "quantizations": [
5834
- "2-bit"
5931
+ "2bit"
5835
5932
  ],
5836
5933
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-2bit",
5837
5934
  "model_hub": "modelscope"
@@ -5840,7 +5937,7 @@
5840
5937
  "model_format": "mlx",
5841
5938
  "model_size_in_billions": 32,
5842
5939
  "quantizations": [
5843
- "4-bit"
5940
+ "4bit"
5844
5941
  ],
5845
5942
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-4bit",
5846
5943
  "model_hub": "modelscope"
@@ -5849,7 +5946,7 @@
5849
5946
  "model_format": "mlx",
5850
5947
  "model_size_in_billions": 32,
5851
5948
  "quantizations": [
5852
- "8-bit"
5949
+ "8bit"
5853
5950
  ],
5854
5951
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-8bit",
5855
5952
  "model_hub": "modelscope"
@@ -5858,7 +5955,7 @@
5858
5955
  "model_format": "mlx",
5859
5956
  "model_size_in_billions": 72,
5860
5957
  "quantizations": [
5861
- "2-bit"
5958
+ "2bit"
5862
5959
  ],
5863
5960
  "model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-2bit",
5864
5961
  "model_hub": "modelscope"
@@ -5867,7 +5964,7 @@
5867
5964
  "model_format": "mlx",
5868
5965
  "model_size_in_billions": 72,
5869
5966
  "quantizations": [
5870
- "4-bit"
5967
+ "4bit"
5871
5968
  ],
5872
5969
  "model_id": "okwinds/Qwen2.5-72B-Instruct-MLX-4bit",
5873
5970
  "model_hub": "modelscope"
@@ -5876,7 +5973,7 @@
5876
5973
  "model_format": "mlx",
5877
5974
  "model_size_in_billions": 72,
5878
5975
  "quantizations": [
5879
- "8-bit"
5976
+ "8bit"
5880
5977
  ],
5881
5978
  "model_id": "okwinds/Qwen2.5-72B-Instruct-MLX-8bit",
5882
5979
  "model_hub": "modelscope"
@@ -6296,7 +6393,7 @@
6296
6393
  "model_format": "mlx",
6297
6394
  "model_size_in_billions": 32,
6298
6395
  "quantizations": [
6299
- "4-bit"
6396
+ "4bit"
6300
6397
  ],
6301
6398
  "model_id": "okwinds/QwQ-32B-Preview-MLX-4bit",
6302
6399
  "model_hub": "modelscope"
@@ -6305,7 +6402,7 @@
6305
6402
  "model_format": "mlx",
6306
6403
  "model_size_in_billions": 32,
6307
6404
  "quantizations": [
6308
- "8-bit"
6405
+ "8bit"
6309
6406
  ],
6310
6407
  "model_id": "okwinds/QwQ-32B-Preview-MLX-8bit",
6311
6408
  "model_hub": "modelscope"
@@ -6320,7 +6417,8 @@
6320
6417
  "Q8_0"
6321
6418
  ],
6322
6419
  "model_id": "AI-ModelScope/QwQ-32B-Preview-GGUF",
6323
- "model_file_name_template": "QwQ-32B-Preview-{quantization}.gguf"
6420
+ "model_file_name_template": "QwQ-32B-Preview-{quantization}.gguf",
6421
+ "model_hub": "modelscope"
6324
6422
  }
6325
6423
  ],
6326
6424
  "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
@@ -168,6 +168,9 @@ class MLXModel(LLM):
168
168
  return False
169
169
  if "generate" not in llm_family.model_ability:
170
170
  return False
171
+ if "chat" in llm_family.model_ability or "vision" in llm_family.model_ability:
172
+ # do not process chat or vision
173
+ return False
171
174
  return True
172
175
 
173
176
  def _get_prompt_cache(self, prompt, lora_name: Optional[str] = None):
@@ -191,18 +194,35 @@ class MLXModel(LLM):
191
194
  self._prompt_cache.tokens.extend(prompt)
192
195
  return prompt
193
196
 
194
- def _generate_stream(self, prompt: str, kwargs: MLXGenerateConfig):
195
- import mlx.core as mx
196
- from mlx_lm.utils import generate_step
197
+ def _generate_stream_inner(self, **kwargs):
198
+ from mlx_lm.utils import make_sampler, stream_generate
199
+
200
+ sampler = make_sampler(
201
+ temp=kwargs.pop("temperature"), top_p=kwargs.pop("top_p")
202
+ )
203
+ prompt_token_ids = kwargs.pop("prompt_token_ids")
204
+ yield from stream_generate(
205
+ self._model, self._tokenizer, prompt_token_ids, sampler=sampler, **kwargs
206
+ )
207
+
208
+ def _prepare_inputs(
209
+ self, prompt: Union[str, Dict[str, Any]], kwargs
210
+ ) -> Tuple[Any, int]:
211
+ prompt_token_ids = self._tokenizer.encode(prompt)
212
+ prompt_token_ids = self._get_prompt_cache(
213
+ prompt_token_ids, kwargs.get("lora_name")
214
+ )
215
+ return prompt_token_ids, len(prompt_token_ids)
197
216
 
198
- model = self._model
217
+ def _generate_stream(
218
+ self, prompt: Union[str, Dict[str, Any]], kwargs: MLXGenerateConfig
219
+ ):
199
220
  model_uid = self.model_uid
200
221
  tokenizer = self._tokenizer
201
222
  max_tokens = kwargs["max_tokens"]
202
223
  chunk_id = str(uuid.uuid4())
203
224
  stop_token_ids = kwargs.get("stop_token_ids", [])
204
225
  stream = kwargs.get("stream", False)
205
- lora_name = kwargs.get("lora_name")
206
226
  stream_options = kwargs.pop("stream_options", None)
207
227
  include_usage = (
208
228
  stream_options["include_usage"]
@@ -210,39 +230,28 @@ class MLXModel(LLM):
210
230
  else False
211
231
  )
212
232
 
213
- prompt_token_ids = tokenizer.encode(prompt)
214
- prompt_token_ids = self._get_prompt_cache(prompt_token_ids, lora_name)
215
- prompt_tokens = mx.array(prompt_token_ids)
216
- input_echo_len = len(prompt_tokens)
233
+ prompt_token_ids, input_echo_len = self._prepare_inputs(prompt, kwargs)
217
234
 
218
235
  i = 0
219
236
  start = time.time()
220
237
  output = ""
221
238
  tokens = []
222
- for (token, _), i in zip(
223
- generate_step(
224
- prompt_tokens,
225
- model,
226
- temp=kwargs["temperature"],
239
+ for chunk_resp, i in zip(
240
+ self._generate_stream_inner(
241
+ prompt_token_ids=prompt_token_ids,
242
+ max_tokens=max_tokens,
243
+ temperature=kwargs["temperature"],
244
+ top_p=kwargs["top_p"],
227
245
  repetition_penalty=kwargs["repetition_penalty"],
228
246
  repetition_context_size=kwargs["repetition_context_size"],
229
- top_p=kwargs["top_p"],
230
- prompt_cache=self._prompt_cache.cache, # type: ignore
247
+ prompt_cache=self._prompt_cache.cache if self._prompt_cache else None, # type: ignore
231
248
  ),
232
249
  range(max_tokens),
233
250
  ):
251
+ token = chunk_resp.token
234
252
  tokens.append(token)
235
- if token == tokenizer.eos_token_id or token in stop_token_ids: # type: ignore
236
- break
237
-
238
- # Yield the last segment if streaming
239
- out = tokenizer.decode(
240
- token,
241
- skip_special_tokens=True,
242
- spaces_between_special_tokens=False,
243
- clean_up_tokenization_spaces=True,
244
- )
245
253
 
254
+ out = chunk_resp.text
246
255
  if stream:
247
256
  # this special character is mainly for qwen
248
257
  out = out.strip("�")
@@ -266,11 +275,15 @@ class MLXModel(LLM):
266
275
  total_tokens=(input_echo_len + i),
267
276
  ), completion_usage
268
277
 
278
+ if token == tokenizer.eos_token_id or token in stop_token_ids: # type: ignore
279
+ break
280
+
269
281
  logger.info(
270
282
  f"Average generation speed: {i / (time.time() - start):.2f} tokens/s."
271
283
  )
272
284
 
273
- self._prompt_cache.tokens.extend(tokens) # type: ignore
285
+ if self._prompt_cache:
286
+ self._prompt_cache.tokens.extend(tokens) # type: ignore
274
287
 
275
288
  if i == max_tokens - 1:
276
289
  finish_reason = "length"
@@ -314,10 +327,12 @@ class MLXModel(LLM):
314
327
  yield completion_chunk, completion_usage
315
328
 
316
329
  def generate(
317
- self, prompt: str, generate_config: Optional[MLXGenerateConfig] = None
330
+ self,
331
+ prompt: Union[str, Dict[str, Any]],
332
+ generate_config: Optional[MLXGenerateConfig] = None,
318
333
  ) -> Union[Completion, Iterator[CompletionChunk]]:
319
334
  def generator_wrapper(
320
- prompt: str, generate_config: MLXGenerateConfig
335
+ prompt: Union[str, Dict[str, Any]], generate_config: MLXGenerateConfig
321
336
  ) -> Iterator[CompletionChunk]:
322
337
  for completion_chunk, completion_usage in self._generate_stream(
323
338
  prompt,
@@ -356,26 +371,6 @@ class MLXModel(LLM):
356
371
 
357
372
 
358
373
  class MLXChatModel(MLXModel, ChatModelMixin):
359
- def __init__(
360
- self,
361
- model_uid: str,
362
- model_family: "LLMFamilyV1",
363
- model_spec: "LLMSpecV1",
364
- quantization: str,
365
- model_path: str,
366
- model_config: Optional[MLXModelConfig] = None,
367
- peft_model: Optional[List[LoRA]] = None,
368
- ):
369
- super().__init__(
370
- model_uid,
371
- model_family,
372
- model_spec,
373
- quantization,
374
- model_path,
375
- model_config,
376
- peft_model,
377
- )
378
-
379
374
  def _sanitize_generate_config(
380
375
  self,
381
376
  generate_config: Optional[MLXGenerateConfig],
@@ -402,6 +397,9 @@ class MLXChatModel(MLXModel, ChatModelMixin):
402
397
  return False
403
398
  if "chat" not in llm_family.model_ability:
404
399
  return False
400
+ if "vision" in llm_family.model_ability:
401
+ # do not process vision
402
+ return False
405
403
  return True
406
404
 
407
405
  def chat(
@@ -432,3 +430,186 @@ class MLXChatModel(MLXModel, ChatModelMixin):
432
430
  if tools:
433
431
  return self._tool_calls_completion(self.model_family, self.model_uid, c)
434
432
  return self._to_chat_completion(c)
433
+
434
+
435
+ class MLXVisionModel(MLXModel, ChatModelMixin):
436
+ @classmethod
437
+ def match(
438
+ cls, llm_family: "LLMFamilyV1", llm_spec: "LLMSpecV1", quantization: str
439
+ ) -> bool:
440
+ if llm_spec.model_format not in ["mlx"]:
441
+ return False
442
+ if sys.platform != "darwin" or platform.processor() != "arm":
443
+ # only work for Mac M chips
444
+ return False
445
+ if "vision" not in llm_family.model_ability:
446
+ return False
447
+ return True
448
+
449
+ def _load_model(self, **kwargs):
450
+ try:
451
+ from mlx_vlm import load
452
+ except ImportError:
453
+ error_message = "Failed to import module 'mlx_vlm'"
454
+ installation_guide = [
455
+ "Please make sure 'mlx_vlm' is installed. ",
456
+ "You can install it by `pip install mlx_vlm`\n",
457
+ ]
458
+
459
+ raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}")
460
+
461
+ return load(self.model_path)
462
+
463
+ def load(self):
464
+ kwargs = {}
465
+ kwargs["revision"] = self._model_config.get(
466
+ "revision", self.model_spec.model_revision
467
+ )
468
+ kwargs["trust_remote_code"] = self._model_config.get("trust_remote_code")
469
+ kwargs["cache_limit_gb"] = self._model_config.pop("cache_limit_gb", None)
470
+
471
+ self._model, self._processor = self._load_model(**kwargs)
472
+ self._tokenizer = self._processor.tokenizer
473
+
474
+ def _generate_stream_inner(self, **kwargs):
475
+ import mlx.core as mx
476
+ from mlx_lm.utils import GenerationResponse
477
+ from mlx_vlm.utils import generate_step
478
+
479
+ max_tokens = kwargs.pop("max_tokens")
480
+ inputs = kwargs["prompt_token_ids"]
481
+ input_ids, pixel_values, mask = inputs[:3]
482
+
483
+ kwargs = {
484
+ k: v
485
+ for k, v in zip(
486
+ [
487
+ "image_grid_thw",
488
+ "image_sizes",
489
+ "aspect_ratio_ids",
490
+ "aspect_ratio_mask",
491
+ "cross_attention_mask",
492
+ ],
493
+ inputs[3:],
494
+ )
495
+ }
496
+
497
+ tokenizer = self._processor.tokenizer
498
+ detokenizer = self._processor.detokenizer
499
+
500
+ detokenizer.reset()
501
+ tic = time.perf_counter()
502
+ for (token, logprobs), n in zip(
503
+ generate_step(input_ids, self._model, pixel_values, mask, **kwargs),
504
+ range(max_tokens),
505
+ ):
506
+ if n == 0:
507
+ prompt_time = time.perf_counter() - tic
508
+ prompt_tps = len(input_ids) / prompt_time
509
+ tic = time.perf_counter()
510
+ if token == tokenizer.eos_token_id:
511
+ break
512
+ detokenizer.add_token(token)
513
+
514
+ # Yield the last segment if streaming
515
+ yield GenerationResponse(
516
+ text=detokenizer.last_segment,
517
+ token=token,
518
+ logprobs=logprobs,
519
+ prompt_tokens=len(input_ids),
520
+ prompt_tps=prompt_tps,
521
+ generation_tokens=n + 1,
522
+ generation_tps=(n + 1) / (time.perf_counter() - tic),
523
+ peak_memory=mx.metal.get_peak_memory() / 1e9,
524
+ )
525
+
526
+ detokenizer.finalize()
527
+ yield GenerationResponse(
528
+ text=detokenizer.last_segment,
529
+ token=token,
530
+ logprobs=logprobs,
531
+ prompt_tokens=len(input_ids),
532
+ prompt_tps=prompt_tps,
533
+ generation_tokens=n + 1,
534
+ generation_tps=(n + 1) / (time.perf_counter() - tic),
535
+ peak_memory=mx.metal.get_peak_memory() / 1e9,
536
+ )
537
+
538
+ def _prepare_inputs(
539
+ self, prompt: Union[str, Dict[str, Any]], kwargs
540
+ ) -> Tuple[Any, int]:
541
+ from mlx_vlm import prepare_inputs
542
+
543
+ prompt_str = prompt.get("prompt") # type: ignore
544
+ images = prompt.get("multi_modal_data", {}).get("image") # type: ignore
545
+ if images and not isinstance(images, list):
546
+ images = [images]
547
+ if hasattr(self._model.config, "image_token_index"):
548
+ image_token_index = self._model.config.image_token_index
549
+ else:
550
+ image_token_index = None
551
+
552
+ inputs = prepare_inputs(
553
+ None,
554
+ self._processor,
555
+ images,
556
+ prompt_str,
557
+ image_token_index,
558
+ kwargs.get("resize_shape"),
559
+ )
560
+ input_ids = inputs[0]
561
+ return inputs, len(input_ids)
562
+
563
+ def chat(
564
+ self,
565
+ messages: List[Dict],
566
+ generate_config: Optional[MLXGenerateConfig] = None,
567
+ ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
568
+ messages = self._transform_messages(messages) # type: ignore
569
+ tools = generate_config.pop("tools", []) if generate_config else None
570
+
571
+ model_family = self.model_family.model_family or self.model_family.model_name
572
+
573
+ if "internvl2" not in model_family.lower():
574
+ from qwen_vl_utils import process_vision_info
575
+
576
+ full_context_kwargs = {}
577
+ if tools and model_family in QWEN_TOOL_CALL_FAMILY:
578
+ full_context_kwargs["tools"] = tools
579
+ assert self.model_family.chat_template is not None
580
+ prompt = self.get_full_context(
581
+ messages, self.model_family.chat_template, **full_context_kwargs
582
+ )
583
+ images, video_inputs = process_vision_info(messages)
584
+ if video_inputs:
585
+ raise ValueError("Not support video input now.")
586
+ else:
587
+ prompt, images = self.get_specific_prompt(model_family, messages) # type: ignore
588
+
589
+ if not images:
590
+ inputs = {
591
+ "prompt": prompt,
592
+ }
593
+ elif len(images) == 1:
594
+ inputs = {
595
+ "prompt": prompt,
596
+ "multi_modal_data": {"image": images[-1]}, # type: ignore
597
+ }
598
+ else:
599
+ inputs = {
600
+ "prompt": prompt,
601
+ "multi_modal_data": {"image": images}, # type: ignore
602
+ }
603
+ generate_config = self._sanitize_generate_config(generate_config)
604
+
605
+ stream = generate_config.get("stream", False)
606
+ if stream:
607
+ it = self.generate(inputs, generate_config)
608
+ assert isinstance(it, Iterator)
609
+ return self._to_chat_completion_chunks(it)
610
+ else:
611
+ c = self.generate(inputs, generate_config)
612
+ assert not isinstance(c, Iterator)
613
+ if tools:
614
+ return self._tool_calls_completion(self.model_family, self.model_uid, c)
615
+ return self._to_chat_completion(c)
@@ -75,6 +75,7 @@ SGLANG_SUPPORTED_CHAT_MODELS = [
75
75
  "llama-2-chat",
76
76
  "llama-3-instruct",
77
77
  "llama-3.1-instruct",
78
+ "llama-3.3-instruct",
78
79
  "qwen-chat",
79
80
  "qwen1.5-chat",
80
81
  "qwen2-instruct",