xinference 0.15.3__py3-none-any.whl → 0.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/__init__.py +0 -4
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +29 -2
- xinference/client/restful/restful_client.py +10 -0
- xinference/constants.py +7 -3
- xinference/core/image_interface.py +76 -23
- xinference/core/model.py +158 -46
- xinference/core/progress_tracker.py +187 -0
- xinference/core/scheduler.py +10 -7
- xinference/core/supervisor.py +11 -0
- xinference/core/utils.py +9 -0
- xinference/core/worker.py +1 -0
- xinference/deploy/supervisor.py +4 -0
- xinference/model/__init__.py +4 -0
- xinference/model/audio/chattts.py +2 -1
- xinference/model/audio/core.py +0 -2
- xinference/model/audio/model_spec.json +8 -0
- xinference/model/audio/model_spec_modelscope.json +9 -0
- xinference/model/image/core.py +6 -7
- xinference/model/image/scheduler/__init__.py +13 -0
- xinference/model/image/scheduler/flux.py +533 -0
- xinference/model/image/sdapi.py +35 -4
- xinference/model/image/stable_diffusion/core.py +215 -110
- xinference/model/image/utils.py +39 -3
- xinference/model/llm/__init__.py +2 -0
- xinference/model/llm/llm_family.json +185 -17
- xinference/model/llm/llm_family_modelscope.json +124 -12
- xinference/model/llm/transformers/chatglm.py +104 -0
- xinference/model/llm/transformers/cogvlm2.py +2 -1
- xinference/model/llm/transformers/cogvlm2_video.py +2 -0
- xinference/model/llm/transformers/core.py +43 -113
- xinference/model/llm/transformers/deepseek_v2.py +0 -226
- xinference/model/llm/transformers/deepseek_vl.py +2 -0
- xinference/model/llm/transformers/glm4v.py +2 -1
- xinference/model/llm/transformers/intern_vl.py +2 -0
- xinference/model/llm/transformers/internlm2.py +3 -95
- xinference/model/llm/transformers/minicpmv25.py +2 -0
- xinference/model/llm/transformers/minicpmv26.py +2 -0
- xinference/model/llm/transformers/omnilmm.py +2 -0
- xinference/model/llm/transformers/opt.py +68 -0
- xinference/model/llm/transformers/qwen2_audio.py +11 -4
- xinference/model/llm/transformers/qwen2_vl.py +2 -28
- xinference/model/llm/transformers/qwen_vl.py +2 -1
- xinference/model/llm/transformers/utils.py +36 -283
- xinference/model/llm/transformers/yi_vl.py +2 -0
- xinference/model/llm/utils.py +60 -16
- xinference/model/llm/vllm/core.py +68 -9
- xinference/model/llm/vllm/utils.py +0 -1
- xinference/model/utils.py +7 -4
- xinference/model/video/core.py +0 -2
- xinference/utils.py +2 -3
- xinference/web/ui/build/asset-manifest.json +3 -3
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/js/{main.e51a356d.js → main.f7da0140.js} +3 -3
- xinference/web/ui/build/static/js/main.f7da0140.js.map +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/331312668fa8bd3d7401818f4a25fa98135d7f61371cd6bfff78b18cf4fbdd92.json +1 -0
- {xinference-0.15.3.dist-info → xinference-0.16.0.dist-info}/METADATA +38 -6
- {xinference-0.15.3.dist-info → xinference-0.16.0.dist-info}/RECORD +63 -59
- xinference/web/ui/build/static/js/main.e51a356d.js.map +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/4385c1095eefbff0a8ec3b2964ba6e5a66a05ab31be721483ca2f43e2a91f6ff.json +0 -1
- /xinference/web/ui/build/static/js/{main.e51a356d.js.LICENSE.txt → main.f7da0140.js.LICENSE.txt} +0 -0
- {xinference-0.15.3.dist-info → xinference-0.16.0.dist-info}/LICENSE +0 -0
- {xinference-0.15.3.dist-info → xinference-0.16.0.dist-info}/WHEEL +0 -0
- {xinference-0.15.3.dist-info → xinference-0.16.0.dist-info}/entry_points.txt +0 -0
- {xinference-0.15.3.dist-info → xinference-0.16.0.dist-info}/top_level.txt +0 -0
|
@@ -206,7 +206,7 @@
|
|
|
206
206
|
"none"
|
|
207
207
|
],
|
|
208
208
|
"model_id": "THUDM/glm-4-9b-chat",
|
|
209
|
-
"model_revision": "
|
|
209
|
+
"model_revision": "eb55a443d66541f30869f6caac5ad0d2e95bcbaa"
|
|
210
210
|
},
|
|
211
211
|
{
|
|
212
212
|
"model_format": "ggufv2",
|
|
@@ -1111,7 +1111,8 @@
|
|
|
1111
1111
|
"th"
|
|
1112
1112
|
],
|
|
1113
1113
|
"model_ability": [
|
|
1114
|
-
"chat"
|
|
1114
|
+
"chat",
|
|
1115
|
+
"tools"
|
|
1115
1116
|
],
|
|
1116
1117
|
"model_description": "The Llama 3.1 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
|
|
1117
1118
|
"model_specs": [
|
|
@@ -1299,14 +1300,16 @@
|
|
|
1299
1300
|
"model_id": "hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4"
|
|
1300
1301
|
}
|
|
1301
1302
|
],
|
|
1302
|
-
"chat_template": "{{-
|
|
1303
|
+
"chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
|
|
1303
1304
|
"stop_token_ids": [
|
|
1304
1305
|
128001,
|
|
1306
|
+
128008,
|
|
1305
1307
|
128009
|
|
1306
1308
|
],
|
|
1307
1309
|
"stop": [
|
|
1308
1310
|
"<|end_of_text|>",
|
|
1309
|
-
"<|eot_id|>"
|
|
1311
|
+
"<|eot_id|>",
|
|
1312
|
+
"<|eom_id|>"
|
|
1310
1313
|
]
|
|
1311
1314
|
},
|
|
1312
1315
|
{
|
|
@@ -6906,18 +6909,15 @@
|
|
|
6906
6909
|
"model_id":"Qwen/Qwen2-VL-72B-Instruct-GPTQ-{quantization}"
|
|
6907
6910
|
}
|
|
6908
6911
|
],
|
|
6909
|
-
"
|
|
6910
|
-
|
|
6911
|
-
|
|
6912
|
-
|
|
6913
|
-
|
|
6914
|
-
|
|
6915
|
-
|
|
6916
|
-
"
|
|
6917
|
-
|
|
6918
|
-
"<|endoftext|>"
|
|
6919
|
-
]
|
|
6920
|
-
}
|
|
6912
|
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
|
|
6913
|
+
"stop_token_ids": [
|
|
6914
|
+
151645,
|
|
6915
|
+
151643
|
|
6916
|
+
],
|
|
6917
|
+
"stop": [
|
|
6918
|
+
"<|im_end|>",
|
|
6919
|
+
"<|endoftext|>"
|
|
6920
|
+
]
|
|
6921
6921
|
},
|
|
6922
6922
|
{
|
|
6923
6923
|
"version": 1,
|
|
@@ -7923,9 +7923,177 @@
|
|
|
7923
7923
|
"00021-of-00021"
|
|
7924
7924
|
]
|
|
7925
7925
|
}
|
|
7926
|
+
},
|
|
7927
|
+
{
|
|
7928
|
+
"model_format": "mlx",
|
|
7929
|
+
"model_size_in_billions": "0_5",
|
|
7930
|
+
"quantizations": [
|
|
7931
|
+
"4-bit"
|
|
7932
|
+
],
|
|
7933
|
+
"model_id": "mlx-community/Qwen2.5-0.5B-Instruct-4bit"
|
|
7934
|
+
},
|
|
7935
|
+
{
|
|
7936
|
+
"model_format": "mlx",
|
|
7937
|
+
"model_size_in_billions": "0_5",
|
|
7938
|
+
"quantizations": [
|
|
7939
|
+
"8-bit"
|
|
7940
|
+
],
|
|
7941
|
+
"model_id": "mlx-community/Qwen2.5-0.5B-Instruct-8bit"
|
|
7942
|
+
},
|
|
7943
|
+
{
|
|
7944
|
+
"model_format": "mlx",
|
|
7945
|
+
"model_size_in_billions": "0_5",
|
|
7946
|
+
"quantizations": [
|
|
7947
|
+
"none"
|
|
7948
|
+
],
|
|
7949
|
+
"model_id": "mlx-community/Qwen2.5-0.5B-Instruct-bf16"
|
|
7950
|
+
},
|
|
7951
|
+
{
|
|
7952
|
+
"model_format": "mlx",
|
|
7953
|
+
"model_size_in_billions": "1_5",
|
|
7954
|
+
"quantizations": [
|
|
7955
|
+
"4-bit"
|
|
7956
|
+
],
|
|
7957
|
+
"model_id": "mlx-community/Qwen2.5-1.5B-Instruct-4bit"
|
|
7958
|
+
},
|
|
7959
|
+
{
|
|
7960
|
+
"model_format": "mlx",
|
|
7961
|
+
"model_size_in_billions": "1_5",
|
|
7962
|
+
"quantizations": [
|
|
7963
|
+
"8-bit"
|
|
7964
|
+
],
|
|
7965
|
+
"model_id": "mlx-community/Qwen2.5-1.5B-Instruct-8bit"
|
|
7966
|
+
},
|
|
7967
|
+
{
|
|
7968
|
+
"model_format": "mlx",
|
|
7969
|
+
"model_size_in_billions": "1_5",
|
|
7970
|
+
"quantizations": [
|
|
7971
|
+
"none"
|
|
7972
|
+
],
|
|
7973
|
+
"model_id": "mlx-community/Qwen2.5-1.5B-Instruct-bf16"
|
|
7974
|
+
},
|
|
7975
|
+
{
|
|
7976
|
+
"model_format": "mlx",
|
|
7977
|
+
"model_size_in_billions": 3,
|
|
7978
|
+
"quantizations": [
|
|
7979
|
+
"4-bit"
|
|
7980
|
+
],
|
|
7981
|
+
"model_id": "mlx-community/Qwen2.5-3B-Instruct-4bit"
|
|
7982
|
+
},
|
|
7983
|
+
{
|
|
7984
|
+
"model_format": "mlx",
|
|
7985
|
+
"model_size_in_billions": 3,
|
|
7986
|
+
"quantizations": [
|
|
7987
|
+
"8-bit"
|
|
7988
|
+
],
|
|
7989
|
+
"model_id": "mlx-community/Qwen2.5-3B-Instruct-8bit"
|
|
7990
|
+
},
|
|
7991
|
+
{
|
|
7992
|
+
"model_format": "mlx",
|
|
7993
|
+
"model_size_in_billions": 3,
|
|
7994
|
+
"quantizations": [
|
|
7995
|
+
"none"
|
|
7996
|
+
],
|
|
7997
|
+
"model_id": "mlx-community/Qwen2.5-3B-Instruct-bf16"
|
|
7998
|
+
},
|
|
7999
|
+
{
|
|
8000
|
+
"model_format": "mlx",
|
|
8001
|
+
"model_size_in_billions": 7,
|
|
8002
|
+
"quantizations": [
|
|
8003
|
+
"4-bit"
|
|
8004
|
+
],
|
|
8005
|
+
"model_id": "mlx-community/Qwen2.5-7B-Instruct-4bit"
|
|
8006
|
+
},
|
|
8007
|
+
{
|
|
8008
|
+
"model_format": "mlx",
|
|
8009
|
+
"model_size_in_billions": 7,
|
|
8010
|
+
"quantizations": [
|
|
8011
|
+
"8-bit"
|
|
8012
|
+
],
|
|
8013
|
+
"model_id": "mlx-community/Qwen2.5-7B-Instruct-8bit"
|
|
8014
|
+
},
|
|
8015
|
+
{
|
|
8016
|
+
"model_format": "mlx",
|
|
8017
|
+
"model_size_in_billions": 7,
|
|
8018
|
+
"quantizations": [
|
|
8019
|
+
"none"
|
|
8020
|
+
],
|
|
8021
|
+
"model_id": "mlx-community/Qwen2.5-7B-Instruct-bf16"
|
|
8022
|
+
},
|
|
8023
|
+
{
|
|
8024
|
+
"model_format": "mlx",
|
|
8025
|
+
"model_size_in_billions": 14,
|
|
8026
|
+
"quantizations": [
|
|
8027
|
+
"4-bit"
|
|
8028
|
+
],
|
|
8029
|
+
"model_id": "mlx-community/Qwen2.5-14B-Instruct-4bit"
|
|
8030
|
+
},
|
|
8031
|
+
{
|
|
8032
|
+
"model_format": "mlx",
|
|
8033
|
+
"model_size_in_billions": 14,
|
|
8034
|
+
"quantizations": [
|
|
8035
|
+
"8-bit"
|
|
8036
|
+
],
|
|
8037
|
+
"model_id": "mlx-community/Qwen2.5-14B-Instruct-8bit"
|
|
8038
|
+
},
|
|
8039
|
+
{
|
|
8040
|
+
"model_format": "mlx",
|
|
8041
|
+
"model_size_in_billions": 14,
|
|
8042
|
+
"quantizations": [
|
|
8043
|
+
"none"
|
|
8044
|
+
],
|
|
8045
|
+
"model_id": "mlx-community/Qwen2.5-14B-Instruct-bf16"
|
|
8046
|
+
},
|
|
8047
|
+
{
|
|
8048
|
+
"model_format": "mlx",
|
|
8049
|
+
"model_size_in_billions": 32,
|
|
8050
|
+
"quantizations": [
|
|
8051
|
+
"4-bit"
|
|
8052
|
+
],
|
|
8053
|
+
"model_id": "mlx-community/Qwen2.5-32B-Instruct-4bit"
|
|
8054
|
+
},
|
|
8055
|
+
{
|
|
8056
|
+
"model_format": "mlx",
|
|
8057
|
+
"model_size_in_billions": 32,
|
|
8058
|
+
"quantizations": [
|
|
8059
|
+
"8-bit"
|
|
8060
|
+
],
|
|
8061
|
+
"model_id": "mlx-community/Qwen2.5-32B-Instruct-8bit"
|
|
8062
|
+
},
|
|
8063
|
+
{
|
|
8064
|
+
"model_format": "mlx",
|
|
8065
|
+
"model_size_in_billions": 32,
|
|
8066
|
+
"quantizations": [
|
|
8067
|
+
"none"
|
|
8068
|
+
],
|
|
8069
|
+
"model_id": "mlx-community/Qwen2.5-32B-Instruct-bf16"
|
|
8070
|
+
},
|
|
8071
|
+
{
|
|
8072
|
+
"model_format": "mlx",
|
|
8073
|
+
"model_size_in_billions": 72,
|
|
8074
|
+
"quantizations": [
|
|
8075
|
+
"4-bit"
|
|
8076
|
+
],
|
|
8077
|
+
"model_id": "mlx-community/Qwen2.5-72B-Instruct-4bit"
|
|
8078
|
+
},
|
|
8079
|
+
{
|
|
8080
|
+
"model_format": "mlx",
|
|
8081
|
+
"model_size_in_billions": 72,
|
|
8082
|
+
"quantizations": [
|
|
8083
|
+
"8-bit"
|
|
8084
|
+
],
|
|
8085
|
+
"model_id": "mlx-community/Qwen2.5-72B-Instruct-8bit"
|
|
8086
|
+
},
|
|
8087
|
+
{
|
|
8088
|
+
"model_format": "mlx",
|
|
8089
|
+
"model_size_in_billions": 72,
|
|
8090
|
+
"quantizations": [
|
|
8091
|
+
"none"
|
|
8092
|
+
],
|
|
8093
|
+
"model_id": "mlx-community/Qwen2.5-72B-Instruct-bf16"
|
|
7926
8094
|
}
|
|
7927
8095
|
],
|
|
7928
|
-
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{
|
|
8096
|
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
|
7929
8097
|
"stop_token_ids": [
|
|
7930
8098
|
151643,
|
|
7931
8099
|
151644,
|
|
@@ -246,7 +246,8 @@
|
|
|
246
246
|
"th"
|
|
247
247
|
],
|
|
248
248
|
"model_ability": [
|
|
249
|
-
"chat"
|
|
249
|
+
"chat",
|
|
250
|
+
"tools"
|
|
250
251
|
],
|
|
251
252
|
"model_description": "The Llama 3.1 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks..",
|
|
252
253
|
"model_specs": [
|
|
@@ -350,14 +351,16 @@
|
|
|
350
351
|
"model_hub": "modelscope"
|
|
351
352
|
}
|
|
352
353
|
],
|
|
353
|
-
"chat_template": "{{-
|
|
354
|
+
"chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
|
|
354
355
|
"stop_token_ids": [
|
|
355
356
|
128001,
|
|
357
|
+
128008,
|
|
356
358
|
128009
|
|
357
359
|
],
|
|
358
360
|
"stop": [
|
|
359
361
|
"<|end_of_text|>",
|
|
360
|
-
"<|eot_id|>"
|
|
362
|
+
"<|eot_id|>",
|
|
363
|
+
"<|eom_id|>"
|
|
361
364
|
]
|
|
362
365
|
},
|
|
363
366
|
{
|
|
@@ -4624,14 +4627,15 @@
|
|
|
4624
4627
|
"model_hub": "modelscope"
|
|
4625
4628
|
}
|
|
4626
4629
|
],
|
|
4627
|
-
"
|
|
4628
|
-
|
|
4629
|
-
|
|
4630
|
-
|
|
4631
|
-
|
|
4632
|
-
|
|
4633
|
-
|
|
4634
|
-
|
|
4630
|
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
|
|
4631
|
+
"stop_token_ids": [
|
|
4632
|
+
151645,
|
|
4633
|
+
151643
|
|
4634
|
+
],
|
|
4635
|
+
"stop": [
|
|
4636
|
+
"<|im_end|>",
|
|
4637
|
+
"<|endoftext|>"
|
|
4638
|
+
]
|
|
4635
4639
|
},
|
|
4636
4640
|
{
|
|
4637
4641
|
"version": 1,
|
|
@@ -5677,9 +5681,117 @@
|
|
|
5677
5681
|
"00021-of-00021"
|
|
5678
5682
|
]
|
|
5679
5683
|
}
|
|
5684
|
+
},
|
|
5685
|
+
{
|
|
5686
|
+
"model_format": "mlx",
|
|
5687
|
+
"model_size_in_billions": 3,
|
|
5688
|
+
"quantizations": [
|
|
5689
|
+
"4-bit"
|
|
5690
|
+
],
|
|
5691
|
+
"model_id": "okwinds/Qwen2.5-3B-Instruct-MLX-4bit",
|
|
5692
|
+
"model_hub": "modelscope"
|
|
5693
|
+
},
|
|
5694
|
+
{
|
|
5695
|
+
"model_format": "mlx",
|
|
5696
|
+
"model_size_in_billions": 3,
|
|
5697
|
+
"quantizations": [
|
|
5698
|
+
"8-bit"
|
|
5699
|
+
],
|
|
5700
|
+
"model_id": "okwinds/Qwen2.5-3B-Instruct-MLX-8bit",
|
|
5701
|
+
"model_hub": "modelscope"
|
|
5702
|
+
},
|
|
5703
|
+
{
|
|
5704
|
+
"model_format": "mlx",
|
|
5705
|
+
"model_size_in_billions": 7,
|
|
5706
|
+
"quantizations": [
|
|
5707
|
+
"4-bit"
|
|
5708
|
+
],
|
|
5709
|
+
"model_id": "okwinds/Qwen2.5-7B-Instruct-MLX-4bit",
|
|
5710
|
+
"model_hub": "modelscope"
|
|
5711
|
+
},
|
|
5712
|
+
{
|
|
5713
|
+
"model_format": "mlx",
|
|
5714
|
+
"model_size_in_billions": 7,
|
|
5715
|
+
"quantizations": [
|
|
5716
|
+
"8-bit"
|
|
5717
|
+
],
|
|
5718
|
+
"model_id": "okwinds/Qwen2.5-7B-Instruct-MLX-8bit",
|
|
5719
|
+
"model_hub": "modelscope"
|
|
5720
|
+
},
|
|
5721
|
+
{
|
|
5722
|
+
"model_format": "mlx",
|
|
5723
|
+
"model_size_in_billions": 14,
|
|
5724
|
+
"quantizations": [
|
|
5725
|
+
"4-bit"
|
|
5726
|
+
],
|
|
5727
|
+
"model_id": "okwinds/Qwen2.5-14B-Instruct-MLX-4bit",
|
|
5728
|
+
"model_hub": "modelscope"
|
|
5729
|
+
},
|
|
5730
|
+
{
|
|
5731
|
+
"model_format": "mlx",
|
|
5732
|
+
"model_size_in_billions": 14,
|
|
5733
|
+
"quantizations": [
|
|
5734
|
+
"8-bit"
|
|
5735
|
+
],
|
|
5736
|
+
"model_id": "okwinds/Qwen2.5-14B-Instruct-MLX-8bit",
|
|
5737
|
+
"model_hub": "modelscope"
|
|
5738
|
+
},
|
|
5739
|
+
{
|
|
5740
|
+
"model_format": "mlx",
|
|
5741
|
+
"model_size_in_billions": 32,
|
|
5742
|
+
"quantizations": [
|
|
5743
|
+
"2-bit"
|
|
5744
|
+
],
|
|
5745
|
+
"model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-2bit",
|
|
5746
|
+
"model_hub": "modelscope"
|
|
5747
|
+
},
|
|
5748
|
+
{
|
|
5749
|
+
"model_format": "mlx",
|
|
5750
|
+
"model_size_in_billions": 32,
|
|
5751
|
+
"quantizations": [
|
|
5752
|
+
"4-bit"
|
|
5753
|
+
],
|
|
5754
|
+
"model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-4bit",
|
|
5755
|
+
"model_hub": "modelscope"
|
|
5756
|
+
},
|
|
5757
|
+
{
|
|
5758
|
+
"model_format": "mlx",
|
|
5759
|
+
"model_size_in_billions": 32,
|
|
5760
|
+
"quantizations": [
|
|
5761
|
+
"8-bit"
|
|
5762
|
+
],
|
|
5763
|
+
"model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-8bit",
|
|
5764
|
+
"model_hub": "modelscope"
|
|
5765
|
+
},
|
|
5766
|
+
{
|
|
5767
|
+
"model_format": "mlx",
|
|
5768
|
+
"model_size_in_billions": 72,
|
|
5769
|
+
"quantizations": [
|
|
5770
|
+
"2-bit"
|
|
5771
|
+
],
|
|
5772
|
+
"model_id": "okwinds/Qwen2.5-32B-Instruct-MLX-2bit",
|
|
5773
|
+
"model_hub": "modelscope"
|
|
5774
|
+
},
|
|
5775
|
+
{
|
|
5776
|
+
"model_format": "mlx",
|
|
5777
|
+
"model_size_in_billions": 72,
|
|
5778
|
+
"quantizations": [
|
|
5779
|
+
"4-bit"
|
|
5780
|
+
],
|
|
5781
|
+
"model_id": "okwinds/Qwen2.5-72B-Instruct-MLX-4bit",
|
|
5782
|
+
"model_hub": "modelscope"
|
|
5783
|
+
},
|
|
5784
|
+
{
|
|
5785
|
+
"model_format": "mlx",
|
|
5786
|
+
"model_size_in_billions": 72,
|
|
5787
|
+
"quantizations": [
|
|
5788
|
+
"8-bit"
|
|
5789
|
+
],
|
|
5790
|
+
"model_id": "okwinds/Qwen2.5-72B-Instruct-MLX-8bit",
|
|
5791
|
+
"model_hub": "modelscope"
|
|
5680
5792
|
}
|
|
5681
5793
|
],
|
|
5682
|
-
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{
|
|
5794
|
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
|
5683
5795
|
"stop_token_ids": [
|
|
5684
5796
|
151643,
|
|
5685
5797
|
151644,
|
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
import json
|
|
15
|
+
import logging
|
|
15
16
|
import typing
|
|
16
17
|
import uuid
|
|
17
18
|
from threading import Thread
|
|
@@ -29,6 +30,8 @@ from ..utils import (
|
|
|
29
30
|
)
|
|
30
31
|
from .core import PytorchChatModel, PytorchModelConfig
|
|
31
32
|
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
32
35
|
|
|
33
36
|
class ChatglmPytorchChatModel(PytorchChatModel):
|
|
34
37
|
def __init__(
|
|
@@ -445,3 +448,104 @@ class ChatglmPytorchChatModel(PytorchChatModel):
|
|
|
445
448
|
raw_config["top_p"] = 0.8
|
|
446
449
|
|
|
447
450
|
return raw_config
|
|
451
|
+
|
|
452
|
+
def prepare_batch_inference(self, req_list: List[InferenceRequest]):
|
|
453
|
+
super(PytorchChatModel, self).prepare_batch_inference(req_list)
|
|
454
|
+
for r in req_list:
|
|
455
|
+
try:
|
|
456
|
+
if not r.stopped and r.is_prefill:
|
|
457
|
+
tools = r.generate_config.get("tools", None)
|
|
458
|
+
tools = list(tools) if tools is not None else None
|
|
459
|
+
tool_choice = r.generate_config.get("tool_choice", "none")
|
|
460
|
+
|
|
461
|
+
r.prompt = self._process_messages(
|
|
462
|
+
r.prompt, tools=tools, tool_choice=tool_choice
|
|
463
|
+
)
|
|
464
|
+
r.full_prompt = self.get_full_context(
|
|
465
|
+
r.prompt,
|
|
466
|
+
self.model_family.chat_template, # type: ignore
|
|
467
|
+
tokenizer=self._tokenizer,
|
|
468
|
+
)
|
|
469
|
+
if tools:
|
|
470
|
+
r.tools = tools
|
|
471
|
+
except Exception as e:
|
|
472
|
+
logger.exception(f"prepare inference error with {e}")
|
|
473
|
+
r.stopped = True
|
|
474
|
+
r.error_msg = str(e)
|
|
475
|
+
|
|
476
|
+
def handle_chat_result_non_streaming(self, req: InferenceRequest):
|
|
477
|
+
if req.tools:
|
|
478
|
+
response = req.completion[0]["choices"][0]["text"]
|
|
479
|
+
usage = req.completion[0]["usage"]
|
|
480
|
+
function_call = self._process_response_non_streaming(
|
|
481
|
+
response, req.tools, use_tool=True
|
|
482
|
+
)
|
|
483
|
+
req.completion[0] = self._tool_calls_completion(
|
|
484
|
+
self.model_family, self.model_uid, function_call
|
|
485
|
+
)
|
|
486
|
+
req.completion[0]["usage"] = usage
|
|
487
|
+
else:
|
|
488
|
+
req.completion[0] = self._to_chat_completion(req.completion[0])
|
|
489
|
+
|
|
490
|
+
def handle_chat_result_streaming(self, req: InferenceRequest):
|
|
491
|
+
results = []
|
|
492
|
+
tools = {tool["function"]["name"] for tool in req.tools} if req.tools else {}
|
|
493
|
+
response = "".join(req.outputs)
|
|
494
|
+
eos_pos = response.find("<eos_stream>")
|
|
495
|
+
if eos_pos != -1:
|
|
496
|
+
response = response[:eos_pos]
|
|
497
|
+
|
|
498
|
+
if "<bos_stream>" in req.completion:
|
|
499
|
+
bos_pos = req.completion.index("<bos_stream>")
|
|
500
|
+
results.append(
|
|
501
|
+
self._get_first_chat_completion_chunk(req.completion[bos_pos + 1])
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
if req.stopped:
|
|
505
|
+
if tools:
|
|
506
|
+
new_response = self._process_response_streaming(
|
|
507
|
+
response, tools, end=True
|
|
508
|
+
)
|
|
509
|
+
if new_response:
|
|
510
|
+
if isinstance(new_response, dict): # tool call case
|
|
511
|
+
chunk_id = [
|
|
512
|
+
c for c in req.completion if not isinstance(c, str)
|
|
513
|
+
][0]["id"]
|
|
514
|
+
results.append(
|
|
515
|
+
self._tool_calls_completion_chunk(
|
|
516
|
+
self.model_family,
|
|
517
|
+
self.model_uid,
|
|
518
|
+
new_response,
|
|
519
|
+
chunk_id=chunk_id,
|
|
520
|
+
)
|
|
521
|
+
)
|
|
522
|
+
else: # normal case
|
|
523
|
+
for c in req.completion:
|
|
524
|
+
if c == "<bos_stream>":
|
|
525
|
+
continue
|
|
526
|
+
elif c == "<eos_stream>":
|
|
527
|
+
break
|
|
528
|
+
else:
|
|
529
|
+
results.append(self._to_chat_completion_chunk(c))
|
|
530
|
+
else:
|
|
531
|
+
for c in req.completion:
|
|
532
|
+
if c == "<bos_stream>":
|
|
533
|
+
continue
|
|
534
|
+
elif c == "<eos_stream>":
|
|
535
|
+
break
|
|
536
|
+
else:
|
|
537
|
+
results.append(self._to_chat_completion_chunk(c))
|
|
538
|
+
else:
|
|
539
|
+
if response and response[-1] != "�":
|
|
540
|
+
new_response = self._process_response_streaming(
|
|
541
|
+
response, tools, end=False
|
|
542
|
+
)
|
|
543
|
+
if new_response is not None: # normal case
|
|
544
|
+
for c in req.completion:
|
|
545
|
+
if c == "<bos_stream>":
|
|
546
|
+
continue
|
|
547
|
+
results.append(self._to_chat_completion_chunk(c))
|
|
548
|
+
|
|
549
|
+
if req.stopped and req.include_usage:
|
|
550
|
+
results.append(self._get_final_chat_completion_chunk(req.completion[-1]))
|
|
551
|
+
req.completion = results
|
|
@@ -29,7 +29,7 @@ from ..utils import (
|
|
|
29
29
|
parse_messages,
|
|
30
30
|
)
|
|
31
31
|
from .core import PytorchChatModel, PytorchGenerateConfig
|
|
32
|
-
from .utils import get_max_src_len
|
|
32
|
+
from .utils import cache_clean, get_max_src_len
|
|
33
33
|
|
|
34
34
|
logger = logging.getLogger(__name__)
|
|
35
35
|
|
|
@@ -176,6 +176,7 @@ class CogVLM2Model(PytorchChatModel):
|
|
|
176
176
|
query = content
|
|
177
177
|
return query, image, history
|
|
178
178
|
|
|
179
|
+
@cache_clean
|
|
179
180
|
def chat(
|
|
180
181
|
self,
|
|
181
182
|
messages: List[Dict],
|
|
@@ -28,6 +28,7 @@ from ..utils import (
|
|
|
28
28
|
parse_messages,
|
|
29
29
|
)
|
|
30
30
|
from .core import PytorchChatModel, PytorchGenerateConfig
|
|
31
|
+
from .utils import cache_clean
|
|
31
32
|
|
|
32
33
|
logger = logging.getLogger(__name__)
|
|
33
34
|
|
|
@@ -227,6 +228,7 @@ class CogVLM2VideoModel(PytorchChatModel):
|
|
|
227
228
|
|
|
228
229
|
return query, image, video, history
|
|
229
230
|
|
|
231
|
+
@cache_clean
|
|
230
232
|
def chat(
|
|
231
233
|
self,
|
|
232
234
|
messages: List[Dict],
|