xinference 0.14.4.post1__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (149) hide show
  1. xinference/_compat.py +51 -0
  2. xinference/_version.py +3 -3
  3. xinference/api/restful_api.py +5 -39
  4. xinference/client/restful/restful_client.py +3 -24
  5. xinference/conftest.py +1 -1
  6. xinference/constants.py +5 -0
  7. xinference/core/cache_tracker.py +1 -1
  8. xinference/core/chat_interface.py +8 -14
  9. xinference/core/event.py +1 -1
  10. xinference/core/model.py +82 -31
  11. xinference/core/scheduler.py +37 -37
  12. xinference/core/status_guard.py +1 -1
  13. xinference/core/supervisor.py +11 -10
  14. xinference/core/utils.py +80 -22
  15. xinference/core/worker.py +17 -16
  16. xinference/deploy/cmdline.py +8 -16
  17. xinference/deploy/local.py +1 -1
  18. xinference/deploy/supervisor.py +1 -1
  19. xinference/deploy/utils.py +1 -1
  20. xinference/deploy/worker.py +1 -1
  21. xinference/model/audio/cosyvoice.py +86 -41
  22. xinference/model/embedding/core.py +52 -31
  23. xinference/model/image/stable_diffusion/core.py +18 -1
  24. xinference/model/llm/__init__.py +21 -11
  25. xinference/model/llm/llama_cpp/core.py +16 -33
  26. xinference/model/llm/llm_family.json +619 -1297
  27. xinference/model/llm/llm_family.py +31 -52
  28. xinference/model/llm/llm_family_csghub.json +18 -35
  29. xinference/model/llm/llm_family_modelscope.json +573 -1119
  30. xinference/model/llm/lmdeploy/core.py +56 -88
  31. xinference/model/llm/mlx/core.py +46 -69
  32. xinference/model/llm/sglang/core.py +33 -18
  33. xinference/model/llm/transformers/chatglm.py +167 -305
  34. xinference/model/llm/transformers/cogvlm2.py +36 -63
  35. xinference/model/llm/transformers/cogvlm2_video.py +33 -223
  36. xinference/model/llm/transformers/core.py +49 -50
  37. xinference/model/llm/transformers/deepseek_vl.py +53 -96
  38. xinference/model/llm/transformers/glm4v.py +55 -111
  39. xinference/model/llm/transformers/intern_vl.py +39 -70
  40. xinference/model/llm/transformers/internlm2.py +32 -54
  41. xinference/model/llm/transformers/minicpmv25.py +22 -55
  42. xinference/model/llm/transformers/minicpmv26.py +158 -68
  43. xinference/model/llm/transformers/omnilmm.py +5 -28
  44. xinference/model/llm/transformers/qwen2_vl.py +208 -0
  45. xinference/model/llm/transformers/qwen_vl.py +34 -86
  46. xinference/model/llm/transformers/utils.py +32 -38
  47. xinference/model/llm/transformers/yi_vl.py +32 -72
  48. xinference/model/llm/utils.py +195 -489
  49. xinference/model/llm/vllm/core.py +153 -100
  50. xinference/model/rerank/core.py +41 -8
  51. xinference/model/rerank/model_spec.json +7 -0
  52. xinference/model/rerank/model_spec_modelscope.json +7 -1
  53. xinference/model/utils.py +1 -31
  54. xinference/thirdparty/cosyvoice/bin/export_jit.py +64 -0
  55. xinference/thirdparty/cosyvoice/bin/export_trt.py +8 -0
  56. xinference/thirdparty/cosyvoice/bin/inference.py +5 -2
  57. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +38 -22
  58. xinference/thirdparty/cosyvoice/cli/model.py +139 -26
  59. xinference/thirdparty/cosyvoice/flow/flow.py +15 -9
  60. xinference/thirdparty/cosyvoice/flow/length_regulator.py +20 -1
  61. xinference/thirdparty/cosyvoice/hifigan/generator.py +8 -4
  62. xinference/thirdparty/cosyvoice/llm/llm.py +14 -13
  63. xinference/thirdparty/cosyvoice/transformer/attention.py +7 -3
  64. xinference/thirdparty/cosyvoice/transformer/decoder.py +1 -1
  65. xinference/thirdparty/cosyvoice/transformer/embedding.py +4 -3
  66. xinference/thirdparty/cosyvoice/transformer/encoder.py +4 -2
  67. xinference/thirdparty/cosyvoice/utils/common.py +36 -0
  68. xinference/thirdparty/cosyvoice/utils/file_utils.py +16 -0
  69. xinference/thirdparty/deepseek_vl/serve/assets/Kelpy-Codos.js +100 -0
  70. xinference/thirdparty/deepseek_vl/serve/assets/avatar.png +0 -0
  71. xinference/thirdparty/deepseek_vl/serve/assets/custom.css +355 -0
  72. xinference/thirdparty/deepseek_vl/serve/assets/custom.js +22 -0
  73. xinference/thirdparty/deepseek_vl/serve/assets/favicon.ico +0 -0
  74. xinference/thirdparty/deepseek_vl/serve/examples/app.png +0 -0
  75. xinference/thirdparty/deepseek_vl/serve/examples/chart.png +0 -0
  76. xinference/thirdparty/deepseek_vl/serve/examples/mirror.png +0 -0
  77. xinference/thirdparty/deepseek_vl/serve/examples/pipeline.png +0 -0
  78. xinference/thirdparty/deepseek_vl/serve/examples/puzzle.png +0 -0
  79. xinference/thirdparty/deepseek_vl/serve/examples/rap.jpeg +0 -0
  80. xinference/thirdparty/fish_speech/fish_speech/configs/base.yaml +87 -0
  81. xinference/thirdparty/fish_speech/fish_speech/configs/firefly_gan_vq.yaml +34 -0
  82. xinference/thirdparty/fish_speech/fish_speech/configs/lora/r_8_alpha_16.yaml +4 -0
  83. xinference/thirdparty/fish_speech/fish_speech/configs/text2semantic_finetune.yaml +83 -0
  84. xinference/thirdparty/fish_speech/fish_speech/datasets/protos/text-data.proto +24 -0
  85. xinference/thirdparty/fish_speech/fish_speech/i18n/README.md +27 -0
  86. xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/.gitignore +114 -0
  87. xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/README.md +36 -0
  88. xinference/thirdparty/fish_speech/fish_speech/webui/css/style.css +161 -0
  89. xinference/thirdparty/fish_speech/fish_speech/webui/html/footer.html +11 -0
  90. xinference/thirdparty/fish_speech/fish_speech/webui/js/animate.js +69 -0
  91. xinference/thirdparty/fish_speech/tools/sensevoice/README.md +59 -0
  92. xinference/thirdparty/matcha/VERSION +1 -0
  93. xinference/thirdparty/matcha/hifigan/LICENSE +21 -0
  94. xinference/thirdparty/matcha/hifigan/README.md +101 -0
  95. xinference/thirdparty/omnilmm/LICENSE +201 -0
  96. xinference/thirdparty/whisper/__init__.py +156 -0
  97. xinference/thirdparty/whisper/__main__.py +3 -0
  98. xinference/thirdparty/whisper/assets/gpt2.tiktoken +50256 -0
  99. xinference/thirdparty/whisper/assets/mel_filters.npz +0 -0
  100. xinference/thirdparty/whisper/assets/multilingual.tiktoken +50257 -0
  101. xinference/thirdparty/whisper/audio.py +157 -0
  102. xinference/thirdparty/whisper/decoding.py +826 -0
  103. xinference/thirdparty/whisper/model.py +314 -0
  104. xinference/thirdparty/whisper/normalizers/__init__.py +2 -0
  105. xinference/thirdparty/whisper/normalizers/basic.py +76 -0
  106. xinference/thirdparty/whisper/normalizers/english.json +1741 -0
  107. xinference/thirdparty/whisper/normalizers/english.py +550 -0
  108. xinference/thirdparty/whisper/timing.py +386 -0
  109. xinference/thirdparty/whisper/tokenizer.py +395 -0
  110. xinference/thirdparty/whisper/transcribe.py +605 -0
  111. xinference/thirdparty/whisper/triton_ops.py +109 -0
  112. xinference/thirdparty/whisper/utils.py +316 -0
  113. xinference/thirdparty/whisper/version.py +1 -0
  114. xinference/types.py +7 -49
  115. xinference/web/ui/build/asset-manifest.json +6 -6
  116. xinference/web/ui/build/index.html +1 -1
  117. xinference/web/ui/build/static/css/{main.4bafd904.css → main.632e9148.css} +2 -2
  118. xinference/web/ui/build/static/css/main.632e9148.css.map +1 -0
  119. xinference/web/ui/build/static/js/main.9cfafbd6.js +3 -0
  120. xinference/web/ui/build/static/js/{main.eb13fe95.js.LICENSE.txt → main.9cfafbd6.js.LICENSE.txt} +2 -0
  121. xinference/web/ui/build/static/js/main.9cfafbd6.js.map +1 -0
  122. xinference/web/ui/node_modules/.cache/babel-loader/01d6d198156bacbd436c51435edbd4b2cacd47a79db929105eba30f74b67d48d.json +1 -0
  123. xinference/web/ui/node_modules/.cache/babel-loader/10c69dc7a296779fcffedeff9393d832dfcb0013c36824adf623d3c518b801ff.json +1 -0
  124. xinference/web/ui/node_modules/.cache/babel-loader/59eb25f514afcc4fefd1b309d192b2455f1e0aec68a9de598ca4b2333fe2c774.json +1 -0
  125. xinference/web/ui/node_modules/.cache/babel-loader/68bede6d95bb5ef0b35bbb3ec5b8c937eaf6862c6cdbddb5ef222a7776aaf336.json +1 -0
  126. xinference/web/ui/node_modules/.cache/babel-loader/77d50223f3e734d4485cca538cb098a8c3a7a0a1a9f01f58cdda3af42fe1adf5.json +1 -0
  127. xinference/web/ui/node_modules/.cache/babel-loader/a56d5a642409a84988891089c98ca28ad0546432dfbae8aaa51bc5a280e1cdd2.json +1 -0
  128. xinference/web/ui/node_modules/.cache/babel-loader/d9ff696a3e3471f01b46c63d18af32e491eb5dc0e43cb30202c96871466df57f.json +1 -0
  129. xinference/web/ui/node_modules/.cache/babel-loader/f5039ddbeb815c51491a1989532006b96fc3ae49c6c60e3c097f875b4ae915ae.json +1 -0
  130. xinference/web/ui/node_modules/.package-lock.json +37 -0
  131. xinference/web/ui/node_modules/a-sync-waterfall/package.json +21 -0
  132. xinference/web/ui/node_modules/nunjucks/node_modules/commander/package.json +48 -0
  133. xinference/web/ui/node_modules/nunjucks/package.json +112 -0
  134. xinference/web/ui/package-lock.json +38 -0
  135. xinference/web/ui/package.json +1 -0
  136. {xinference-0.14.4.post1.dist-info → xinference-0.15.0.dist-info}/METADATA +8 -8
  137. {xinference-0.14.4.post1.dist-info → xinference-0.15.0.dist-info}/RECORD +141 -87
  138. xinference/model/llm/transformers/llama_2.py +0 -108
  139. xinference/web/ui/build/static/css/main.4bafd904.css.map +0 -1
  140. xinference/web/ui/build/static/js/main.eb13fe95.js +0 -3
  141. xinference/web/ui/build/static/js/main.eb13fe95.js.map +0 -1
  142. xinference/web/ui/node_modules/.cache/babel-loader/0b11a5339468c13b2d31ac085e7effe4303259b2071abd46a0a8eb8529233a5e.json +0 -1
  143. xinference/web/ui/node_modules/.cache/babel-loader/213b5913e164773c2b0567455377765715f5f07225fbac77ad8e1e9dc9648a47.json +0 -1
  144. xinference/web/ui/node_modules/.cache/babel-loader/5c26a23b5eacf5b752a08531577ae3840bb247745ef9a39583dc2d05ba93a82a.json +0 -1
  145. xinference/web/ui/node_modules/.cache/babel-loader/978b57d1a04a701bc3fcfebc511f5f274eed6ed7eade67f6fb76c27d5fd9ecc8.json +0 -1
  146. {xinference-0.14.4.post1.dist-info → xinference-0.15.0.dist-info}/LICENSE +0 -0
  147. {xinference-0.14.4.post1.dist-info → xinference-0.15.0.dist-info}/WHEEL +0 -0
  148. {xinference-0.14.4.post1.dist-info → xinference-0.15.0.dist-info}/entry_points.txt +0 -0
  149. {xinference-0.14.4.post1.dist-info → xinference-0.15.0.dist-info}/top_level.txt +0 -0
@@ -70,19 +70,11 @@
70
70
  "model_revision": "v1.0.1"
71
71
  }
72
72
  ],
73
- "prompt_style": {
74
- "style_name": "LLAMA2",
75
- "system_prompt": "<s>[INST] <<SYS>>\nYou are a helpful AI assistant.\n<</SYS>>\n\n",
76
- "roles": [
77
- "[INST]",
78
- "[/INST]"
79
- ],
80
- "intra_message_sep": " ",
81
- "inter_message_sep": " </s><s>",
82
- "stop_token_ids": [
73
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = '<<SYS>>\n' + messages[0]['content'] | trim + '\n<</SYS>>\n\n' %}{% set messages = messages[1:] %}{% else %}{% set system_message = '' %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{% set content = system_message + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '<s>' + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + '</s>' }}{% endif %}{% endfor %}",
74
+ "stop_token_ids": [
83
75
  2
84
- ]
85
- }
76
+ ],
77
+ "stop": []
86
78
  },
87
79
  {
88
80
  "version": 1,
@@ -175,24 +167,15 @@
175
167
  "model_hub": "modelscope"
176
168
  }
177
169
  ],
178
- "prompt_style": {
179
- "style_name": "LLAMA3",
180
- "system_prompt": "You are a helpful assistant.",
181
- "roles": [
182
- "user",
183
- "assistant"
184
- ],
185
- "intra_message_sep": "\n\n",
186
- "inter_message_sep": "<|eot_id|>",
187
- "stop_token_ids": [
188
- 128001,
189
- 128009
190
- ],
191
- "stop": [
192
- "<|end_of_text|>",
193
- "<|eot_id|>"
194
- ]
195
- }
170
+ "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = '<|begin_of_text|>' + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
171
+ "stop_token_ids": [
172
+ 128001,
173
+ 128009
174
+ ],
175
+ "stop": [
176
+ "<|end_of_text|>",
177
+ "<|eot_id|>"
178
+ ]
196
179
  },
197
180
  {
198
181
  "version": 1,
@@ -367,24 +350,15 @@
367
350
  "model_hub": "modelscope"
368
351
  }
369
352
  ],
370
- "prompt_style": {
371
- "style_name": "LLAMA3",
372
- "system_prompt": "You are a helpful assistant.",
373
- "roles": [
374
- "user",
375
- "assistant"
376
- ],
377
- "intra_message_sep": "\n\n",
378
- "inter_message_sep": "<|eot_id|>",
379
- "stop_token_ids": [
380
- 128001,
381
- 128009
382
- ],
383
- "stop": [
384
- "<|end_of_text|>",
385
- "<|eot_id|>"
386
- ]
387
- }
353
+ "chat_template": "{{- '<|begin_of_text|>' }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\n\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\n\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\n\" }}\n{{- \"Today Date: \" + date_string + \"\n\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\n\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\n\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\n\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\n\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\n\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\n\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}\n{%- endif %}\n",
354
+ "stop_token_ids": [
355
+ 128001,
356
+ 128009
357
+ ],
358
+ "stop": [
359
+ "<|end_of_text|>",
360
+ "<|eot_id|>"
361
+ ]
388
362
  },
389
363
  {
390
364
  "version": 1,
@@ -449,20 +423,12 @@
449
423
  "model_revision": "v1.0.3"
450
424
  }
451
425
  ],
452
- "prompt_style": {
453
- "style_name": "NO_COLON_TWO",
454
- "system_prompt": "",
455
- "roles": [
456
- "<reserved_106>",
457
- "<reserved_107>"
458
- ],
459
- "intra_message_sep": "",
460
- "inter_message_sep": "</s>",
461
- "stop_token_ids": [
462
- 2,
463
- 195
464
- ]
465
- }
426
+ "chat_template": "{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }}\n\n{% for message in messages %}\n{% if message['role'] == 'user' %}\n<reserved_106>\n{{ message['content']|trim -}}\n{% if not loop.last %}\n\n\n{% endif %}\n{% elif message['role'] == 'assistant' %}\n<reserved_107>\n{{ message['content']|trim -}}\n{% if not loop.last %}\n\n\n{% endif %}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}\n<reserved_107>\n{% endif %}",
427
+ "stop_token_ids": [
428
+ 2,
429
+ 195
430
+ ],
431
+ "stop": []
466
432
  },
467
433
  {
468
434
  "version": 1,
@@ -503,139 +469,6 @@
503
469
  }
504
470
  ]
505
471
  },
506
- {
507
- "version": 1,
508
- "context_length": 8192,
509
- "model_name": "chatglm3",
510
- "model_lang": [
511
- "en",
512
- "zh"
513
- ],
514
- "model_ability": [
515
- "chat",
516
- "tools"
517
- ],
518
- "model_description": "ChatGLM3 is the third generation of ChatGLM, still open-source and trained on Chinese and English data.",
519
- "model_specs": [
520
- {
521
- "model_format": "pytorch",
522
- "model_size_in_billions": 6,
523
- "quantizations": [
524
- "4-bit",
525
- "8-bit",
526
- "none"
527
- ],
528
- "model_hub": "modelscope",
529
- "model_id": "ZhipuAI/chatglm3-6b",
530
- "model_revision": "v1.0.2"
531
- }
532
- ],
533
- "prompt_style": {
534
- "style_name": "CHATGLM3",
535
- "system_prompt": "",
536
- "roles": [
537
- "user",
538
- "assistant"
539
- ],
540
- "stop_token_ids": [
541
- 64795,
542
- 64797,
543
- 2
544
- ],
545
- "stop": [
546
- "<|user|>",
547
- "<|observation|>"
548
- ]
549
- }
550
- },
551
- {
552
- "version": 1,
553
- "context_length": 32768,
554
- "model_name": "chatglm3-32k",
555
- "model_lang": [
556
- "en",
557
- "zh"
558
- ],
559
- "model_ability": [
560
- "chat"
561
- ],
562
- "model_description": "ChatGLM3 is the third generation of ChatGLM, still open-source and trained on Chinese and English data.",
563
- "model_specs": [
564
- {
565
- "model_format": "pytorch",
566
- "model_size_in_billions": 6,
567
- "quantizations": [
568
- "4-bit",
569
- "8-bit",
570
- "none"
571
- ],
572
- "model_hub": "modelscope",
573
- "model_id": "ZhipuAI/chatglm3-6b-32k",
574
- "model_revision": "master"
575
- }
576
- ],
577
- "prompt_style": {
578
- "style_name": "CHATGLM3",
579
- "system_prompt": "",
580
- "roles": [
581
- "user",
582
- "assistant"
583
- ],
584
- "stop_token_ids": [
585
- 64795,
586
- 64797,
587
- 2
588
- ],
589
- "stop": [
590
- "<|user|>",
591
- "<|observation|>"
592
- ]
593
- }
594
- },
595
- {
596
- "version": 1,
597
- "context_length": 131072,
598
- "model_name": "chatglm3-128k",
599
- "model_lang": [
600
- "en",
601
- "zh"
602
- ],
603
- "model_ability": [
604
- "chat"
605
- ],
606
- "model_description": "ChatGLM3 is the third generation of ChatGLM, still open-source and trained on Chinese and English data.",
607
- "model_specs": [
608
- {
609
- "model_format": "pytorch",
610
- "model_size_in_billions": 6,
611
- "quantizations": [
612
- "4-bit",
613
- "8-bit",
614
- "none"
615
- ],
616
- "model_hub": "modelscope",
617
- "model_id": "ZhipuAI/chatglm3-6b-128k",
618
- "model_revision": "master"
619
- }
620
- ],
621
- "prompt_style": {
622
- "style_name": "CHATGLM3",
623
- "system_prompt": "",
624
- "roles": [
625
- "user",
626
- "assistant"
627
- ],
628
- "stop_token_ids": [
629
- 64795,
630
- 64797,
631
- 2
632
- ],
633
- "stop": [
634
- "<|user|>",
635
- "<|observation|>"
636
- ]
637
- }
638
- },
639
472
  {
640
473
  "version": 1,
641
474
  "context_length": 131072,
@@ -690,24 +523,17 @@
690
523
  "model_revision": "master"
691
524
  }
692
525
  ],
693
- "prompt_style": {
694
- "style_name": "CHATGLM3",
695
- "system_prompt": "",
696
- "roles": [
697
- "user",
698
- "assistant"
699
- ],
700
- "stop_token_ids": [
701
- 151329,
702
- 151336,
703
- 151338
704
- ],
705
- "stop": [
706
- "<|endoftext|>",
707
- "<|user|>",
708
- "<|observation|>"
709
- ]
710
- }
526
+ "chat_template": "[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n在调用上述函数时,请使用 Json 格式表示调用的参数。{% elif tool['type'] == 'python' %}\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。{% elif tool['type'] == 'simple_browser' %}\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。{% elif tool['type'] == 'cogview' %}\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
527
+ "stop_token_ids": [
528
+ 151329,
529
+ 151336,
530
+ 151338
531
+ ],
532
+ "stop": [
533
+ "<|endoftext|>",
534
+ "<|user|>",
535
+ "<|observation|>"
536
+ ]
711
537
  },
712
538
  {
713
539
  "version": 1,
@@ -763,24 +589,17 @@
763
589
  "model_revision": "master"
764
590
  }
765
591
  ],
766
- "prompt_style": {
767
- "style_name": "CHATGLM3",
768
- "system_prompt": "",
769
- "roles": [
770
- "user",
771
- "assistant"
772
- ],
773
- "stop_token_ids": [
774
- 151329,
775
- 151336,
776
- 151338
777
- ],
778
- "stop": [
779
- "<|endoftext|>",
780
- "<|user|>",
781
- "<|observation|>"
782
- ]
783
- }
592
+ "chat_template": "[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 GLM-4 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n在调用上述函数时,请使用 Json 格式表示调用的参数。{% elif tool['type'] == 'python' %}\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。{% elif tool['type'] == 'simple_browser' %}\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。{% elif tool['type'] == 'cogview' %}\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
593
+ "stop_token_ids": [
594
+ 151329,
595
+ 151336,
596
+ 151338
597
+ ],
598
+ "stop": [
599
+ "<|endoftext|>",
600
+ "<|user|>",
601
+ "<|observation|>"
602
+ ]
784
603
  },
785
604
  {
786
605
  "version": 1,
@@ -809,24 +628,17 @@
809
628
  "model_revision": "master"
810
629
  }
811
630
  ],
812
- "prompt_style": {
813
- "style_name": "CHATGLM3",
814
- "system_prompt": "",
815
- "roles": [
816
- "user",
817
- "assistant"
818
- ],
819
- "stop_token_ids": [
820
- 151329,
821
- 151336,
822
- 151338
823
- ],
824
- "stop": [
825
- "<|endoftext|>",
826
- "<|user|>",
827
- "<|observation|>"
828
- ]
829
- }
631
+ "chat_template": "",
632
+ "stop_token_ids": [
633
+ 151329,
634
+ 151336,
635
+ 151338
636
+ ],
637
+ "stop": [
638
+ "<|endoftext|>",
639
+ "<|user|>",
640
+ "<|observation|>"
641
+ ]
830
642
  },
831
643
  {
832
644
  "version": 1,
@@ -869,24 +681,17 @@
869
681
  "model_hub": "modelscope"
870
682
  }
871
683
  ],
872
- "prompt_style": {
873
- "style_name": "CHATGLM3",
874
- "system_prompt": "",
875
- "roles": [
876
- "user",
877
- "assistant"
878
- ],
879
- "stop_token_ids": [
880
- 151329,
881
- 151336,
882
- 151338
883
- ],
884
- "stop": [
885
- "<|endoftext|>",
886
- "<|user|>",
887
- "<|observation|>"
888
- ]
889
- }
684
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ '<|system|>\n' + item['content'] }}{% elif loop.first %}{{ '<|system|>\n你是一位智能编程助手,你叫CodeGeeX。你会为用户回答关于编程、代码、计算机方面的任何问题,并提供格式规范、可以执行、准确安全的代码,并在必要时提供详细的解释。' }}{% endif %}{% if item['role'] == 'user' %}{{ '<|user|>\n' + item['content'] }}{% elif item['role'] == 'assistant' %}{{ '<|assistant|>\n' + item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% endif %}",
685
+ "stop_token_ids": [
686
+ 151329,
687
+ 151336,
688
+ 151338
689
+ ],
690
+ "stop": [
691
+ "<|endoftext|>",
692
+ "<|user|>",
693
+ "<|observation|>"
694
+ ]
890
695
  },
891
696
  {
892
697
  "version": 1,
@@ -926,14 +731,13 @@
926
731
  "model_revision": "master"
927
732
  }
928
733
  ],
929
- "prompt_style": {
930
- "style_name": "XVERSE",
931
- "system_prompt": "",
932
- "roles": [
933
- "user",
934
- "assistant"
935
- ]
936
- }
734
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ '<|system|> \n' + item['content'] }}{% endif %}{% if item['role'] == 'user' %}{{ '<|user|> \n' + item['content'] }}{% elif item['role'] == 'assistant' %}{{ '<|assistant|> \n' + item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% endif %}",
735
+ "stop_token_ids": [
736
+ 3
737
+ ],
738
+ "stop": [
739
+ "<|endoftext|>"
740
+ ]
937
741
  },
938
742
  {
939
743
  "version": 1,
@@ -1045,23 +849,15 @@
1045
849
  "model_hub": "modelscope"
1046
850
  }
1047
851
  ],
1048
- "prompt_style": {
1049
- "style_name": "INTERNLM2",
1050
- "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
1051
- "roles": [
1052
- "<|im_start|>user",
1053
- "<|im_start|>assistant"
1054
- ],
1055
- "intra_message_sep": "<|im_end|>",
1056
- "stop_token_ids": [
1057
- 2,
1058
- 92542
1059
- ],
1060
- "stop": [
1061
- "</s>",
1062
- "<|im_end|>"
1063
- ]
1064
- }
852
+ "chat_template": "{{ '<s>' }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
853
+ "stop_token_ids": [
854
+ 2,
855
+ 92542
856
+ ],
857
+ "stop": [
858
+ "</s>",
859
+ "<|im_end|>"
860
+ ]
1065
861
  },
1066
862
  {
1067
863
  "version": 1,
@@ -1086,23 +882,15 @@
1086
882
  "model_hub": "modelscope"
1087
883
  }
1088
884
  ],
1089
- "prompt_style": {
1090
- "style_name": "INTERNLM2",
1091
- "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
1092
- "roles": [
1093
- "<|im_start|>user",
1094
- "<|im_start|>assistant"
1095
- ],
1096
- "intra_message_sep": "<|im_end|>",
1097
- "stop_token_ids": [
1098
- 2,
1099
- 92542
1100
- ],
1101
- "stop": [
1102
- "</s>",
1103
- "<|im_end|>"
1104
- ]
1105
- }
885
+ "chat_template": "{{ '<s>' }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
886
+ "stop_token_ids": [
887
+ 2,
888
+ 92542
889
+ ],
890
+ "stop": [
891
+ "</s>",
892
+ "<|im_end|>"
893
+ ]
1106
894
  },
1107
895
  {
1108
896
  "version": 1,
@@ -1140,18 +928,13 @@
1140
928
  "model_revision": "v1.0.0"
1141
929
  }
1142
930
  ],
1143
- "prompt_style": {
1144
- "style_name": "ADD_COLON_SINGLE",
1145
- "system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.",
1146
- "roles": [
1147
- "Instruction",
1148
- "Response"
1149
- ],
1150
- "intra_message_sep": "\n\n### ",
1151
- "stop": [
1152
- "</s>"
1153
- ]
1154
- }
931
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ item['content'] + '\n\n### ' }}{% elif loop.first %}{{ 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### ' }}{% endif %}{% if item['role'] == 'user' %}{{ 'Instruction: ' + item['content'] + '\n\n### ' }}{% elif item['role'] == 'assistant' %}{{ 'Response: ' + item['content'] + '\n\n### ' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Response: Let\\'s think step by step.' }}{% endif %}",
932
+ "stop_token_ids": [
933
+ 2
934
+ ],
935
+ "stop": [
936
+ "</s>"
937
+ ]
1155
938
  },
1156
939
  {
1157
940
  "version": 1,
@@ -1252,24 +1035,15 @@
1252
1035
  "model_hub": "modelscope"
1253
1036
  }
1254
1037
  ],
1255
- "prompt_style": {
1256
- "style_name": "CodeShell",
1257
- "system_prompt": "",
1258
- "roles": [
1259
- "## human:",
1260
- "## assistant: "
1261
- ],
1262
- "intra_message_sep": "",
1263
- "inter_message_sep": "",
1264
- "stop_token_ids": [
1265
- 70000
1266
- ],
1267
- "stop": [
1268
- "<|endoftext|>",
1269
- "|||",
1270
- "|<end>|"
1271
- ]
1272
- }
1038
+ "chat_template": "{% for item in messages %}{% if item['role'] == 'user' %}{{ '## human: ' + item['content'] + '|<end>|' }}{% elif item['role'] == 'assistant' %}{{ '## assistant: ' + item['content'] + '|<end>|' }}{% endif %}{% endfor %}{{ '## assistant: ' }}",
1039
+ "stop_token_ids": [
1040
+ 70000
1041
+ ],
1042
+ "stop": [
1043
+ "<|endoftext|>",
1044
+ "|||",
1045
+ "|<end>|"
1046
+ ]
1273
1047
  },
1274
1048
  {
1275
1049
  "version": 1,
@@ -1353,19 +1127,13 @@
1353
1127
  "model_revision": "v0.1.0"
1354
1128
  }
1355
1129
  ],
1356
- "prompt_style": {
1357
- "style_name": "LLAMA2",
1358
- "system_prompt": "<s>[INST] <<SYS>>\nWrite code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```:\n<</SYS>>\n\n",
1359
- "roles": [
1360
- "[INST]",
1361
- "[/INST]"
1362
- ],
1363
- "intra_message_sep": " ",
1364
- "inter_message_sep": " </s><s>",
1365
- "stop_token_ids": [
1130
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = '<<SYS>>\n' + messages[0]['content'] | trim + '\n<</SYS>>\n\n' %}{% set messages = messages[1:] %}{% else %}{% set system_message = '' %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{% set content = system_message + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '<s>' + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + '</s>' }}{% endif %}{% endfor %}",
1131
+ "stop_token_ids": [
1366
1132
  2
1367
- ]
1368
- }
1133
+ ],
1134
+ "stop": [
1135
+ "</s>"
1136
+ ]
1369
1137
  },
1370
1138
  {
1371
1139
  "version": 1,
@@ -1567,16 +1335,13 @@
1567
1335
  "model_revision": "master"
1568
1336
  }
1569
1337
  ],
1570
- "prompt_style": {
1571
- "style_name": "MIXTRAL_V01",
1572
- "system_prompt": "",
1573
- "roles": [
1574
- "user",
1575
- "assistant"
1576
- ],
1577
- "intra_message_sep": "",
1578
- "inter_message_sep": ""
1579
- }
1338
+ "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- '<s>' }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\n\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + '</s>'}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n",
1339
+ "stop_token_ids": [
1340
+ 2
1341
+ ],
1342
+ "stop": [
1343
+ "</s>"
1344
+ ]
1580
1345
  },
1581
1346
  {
1582
1347
  "version": 1,
@@ -1716,28 +1481,19 @@
1716
1481
  "model_revision": "master"
1717
1482
  }
1718
1483
  ],
1719
- "prompt_style": {
1720
- "style_name": "CHATML",
1721
- "system_prompt": "",
1722
- "roles": [
1723
- "<|im_start|>user",
1724
- "<|im_start|>assistant"
1725
- ],
1726
- "intra_message_sep": "<|im_end|>",
1727
- "inter_message_sep": "",
1728
- "stop_token_ids": [
1729
- 2,
1730
- 6,
1731
- 7,
1732
- 8
1733
- ],
1734
- "stop": [
1735
- "<|endoftext|>",
1736
- "<|im_start|>",
1737
- "<|im_end|>",
1738
- "<|im_sep|>"
1739
- ]
1740
- }
1484
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
1485
+ "stop_token_ids": [
1486
+ 2,
1487
+ 6,
1488
+ 7,
1489
+ 8
1490
+ ],
1491
+ "stop": [
1492
+ "<|endoftext|>",
1493
+ "<|im_start|>",
1494
+ "<|im_end|>",
1495
+ "<|im_sep|>"
1496
+ ]
1741
1497
  },
1742
1498
  {
1743
1499
  "version": 1,
@@ -1900,28 +1656,19 @@
1900
1656
  "model_revision": "master"
1901
1657
  }
1902
1658
  ],
1903
- "prompt_style": {
1904
- "style_name": "CHATML",
1905
- "system_prompt": "",
1906
- "roles": [
1907
- "<|im_start|>user",
1908
- "<|im_start|>assistant"
1909
- ],
1910
- "intra_message_sep": "<|im_end|>",
1911
- "inter_message_sep": "",
1912
- "stop_token_ids": [
1913
- 2,
1914
- 6,
1915
- 7,
1916
- 8
1917
- ],
1918
- "stop": [
1919
- "<|endoftext|>",
1920
- "<|im_start|>",
1921
- "<|im_end|>",
1922
- "<|im_sep|>"
1923
- ]
1924
- }
1659
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
1660
+ "stop_token_ids": [
1661
+ 2,
1662
+ 6,
1663
+ 7,
1664
+ 8
1665
+ ],
1666
+ "stop": [
1667
+ "<|endoftext|>",
1668
+ "<|im_start|>",
1669
+ "<|im_end|>",
1670
+ "<|im_sep|>"
1671
+ ]
1925
1672
  },
1926
1673
  {
1927
1674
  "version": 1,
@@ -1961,28 +1708,19 @@
1961
1708
  "model_revision": "master"
1962
1709
  }
1963
1710
  ],
1964
- "prompt_style": {
1965
- "style_name": "CHATML",
1966
- "system_prompt": "",
1967
- "roles": [
1968
- "<|im_start|>user",
1969
- "<|im_start|>assistant"
1970
- ],
1971
- "intra_message_sep": "<|im_end|>",
1972
- "inter_message_sep": "",
1973
- "stop_token_ids": [
1974
- 2,
1975
- 6,
1976
- 7,
1977
- 8
1978
- ],
1979
- "stop": [
1980
- "<|endoftext|>",
1981
- "<|im_start|>",
1982
- "<|im_end|>",
1983
- "<|im_sep|>"
1984
- ]
1985
- }
1711
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
1712
+ "stop_token_ids": [
1713
+ 2,
1714
+ 6,
1715
+ 7,
1716
+ 8
1717
+ ],
1718
+ "stop": [
1719
+ "<|endoftext|>",
1720
+ "<|im_start|>",
1721
+ "<|im_end|>",
1722
+ "<|im_sep|>"
1723
+ ]
1986
1724
  },
1987
1725
  {
1988
1726
  "version": 1,
@@ -2009,15 +1747,13 @@
2009
1747
  "model_revision": "v1.0.0"
2010
1748
  }
2011
1749
  ],
2012
- "prompt_style": {
2013
- "style_name": "ADD_COLON_SINGLE_COT",
2014
- "system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.",
2015
- "roles": [
2016
- "Instruction",
2017
- "Response"
2018
- ],
2019
- "intra_message_sep": "\n\n### "
2020
- }
1750
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ item['content'] + '\n\n### ' }}{% elif loop.first %}{{ 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### ' }}{% endif %}{% if item['role'] == 'user' %}{{ 'Instruction: ' + item['content'] + '\n\n### ' }}{% elif item['role'] == 'assistant' %}{{ 'Response: ' + item['content'] + '\n\n### ' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Response: Let\\'s think step by step.' }}{% endif %}",
1751
+ "stop_token_ids": [
1752
+ 2
1753
+ ],
1754
+ "stop": [
1755
+ "</s>"
1756
+ ]
2021
1757
  },
2022
1758
  {
2023
1759
  "version": 1,
@@ -2044,22 +1780,13 @@
2044
1780
  "model_revision": "v1.0.0"
2045
1781
  }
2046
1782
  ],
2047
- "prompt_style": {
2048
- "style_name": "LLAMA2",
2049
- "system_prompt": "[INST] ",
2050
- "roles": [
2051
- "[INST]",
2052
- "[/INST]"
2053
- ],
2054
- "intra_message_sep": " ",
2055
- "inter_message_sep": "<s>",
2056
- "stop_token_ids": [
2057
- 2
2058
- ],
2059
- "stop": [
2060
- "</s>"
2061
- ]
2062
- }
1783
+ "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- '<s>' }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\n\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + '</s>'}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n",
1784
+ "stop_token_ids": [
1785
+ 2
1786
+ ],
1787
+ "stop": [
1788
+ "</s>"
1789
+ ]
2063
1790
  },
2064
1791
  {
2065
1792
  "version": 1,
@@ -2095,22 +1822,13 @@
2095
1822
  "model_file_name_template": "mistral-7b-instruct-v0.2.{quantization}.gguf"
2096
1823
  }
2097
1824
  ],
2098
- "prompt_style": {
2099
- "style_name": "LLAMA2",
2100
- "system_prompt": "[INST] ",
2101
- "roles": [
2102
- "[INST]",
2103
- "[/INST]"
2104
- ],
2105
- "intra_message_sep": " ",
2106
- "inter_message_sep": "<s>",
2107
- "stop_token_ids": [
2108
- 2
2109
- ],
2110
- "stop": [
2111
- "</s>"
2112
- ]
2113
- }
1825
+ "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- '<s>' }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\n\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + '</s>'}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n",
1826
+ "stop_token_ids": [
1827
+ 2
1828
+ ],
1829
+ "stop": [
1830
+ "</s>"
1831
+ ]
2114
1832
  },
2115
1833
  {
2116
1834
  "version": 1,
@@ -2151,22 +1869,13 @@
2151
1869
  "model_hub": "modelscope"
2152
1870
  }
2153
1871
  ],
2154
- "prompt_style": {
2155
- "style_name": "mistral-nemo",
2156
- "system_prompt": "",
2157
- "roles": [
2158
- "[INST]",
2159
- "[/INST]"
2160
- ],
2161
- "intra_message_sep": "",
2162
- "inter_message_sep": "</s>",
2163
- "stop_token_ids": [
2164
- 2
2165
- ],
2166
- "stop": [
2167
- "</s>"
2168
- ]
2169
- }
1872
+ "chat_template": "{%- if messages[0][\"role\"] == \"system\" %}\n {%- set system_message = messages[0][\"content\"] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n{%- set user_messages = loop_messages | selectattr(\"role\", \"equalto\", \"user\") | list %}\n\n{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n{%- set ns = namespace() %}\n{%- set ns.index = 0 %}\n{%- for message in loop_messages %}\n {%- if not (message.role == \"tool\" or message.role == \"tool_results\" or (message.tool_calls is defined and message.tool_calls is not none)) %}\n {%- if (message[\"role\"] == \"user\") != (ns.index % 2 == 0) %}\n {{- raise_exception(\"After the optional system message, conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif %}\n {%- set ns.index = ns.index + 1 %}\n {%- endif %}\n{%- endfor %}\n\n{{- '<s>' }}\n{%- for message in loop_messages %}\n {%- if message[\"role\"] == \"user\" %}\n {%- if tools is not none and (message == user_messages[-1]) %}\n {{- \"[AVAILABLE_TOOLS][\" }}\n {%- for tool in tools %}\n {%- set tool = tool.function %}\n {{- '{\"type\": \"function\", \"function\": {' }}\n {%- for key, val in tool.items() if key != \"return\" %}\n {%- if val is string %}\n {{- '\"' + key + '\": \"' + val + '\"' }}\n {%- else %}\n {{- '\"' + key + '\": ' + val|tojson }}\n {%- endif %}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \"}}\" }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"[/AVAILABLE_TOOLS]\" }}\n {%- endif %}\n {%- if loop.last and system_message is defined %}\n {{- \"[INST]\" + system_message + \"\n\n\" + message[\"content\"] + \"[/INST]\" }}\n {%- else %}\n {{- \"[INST]\" + message[\"content\"] + \"[/INST]\" }}\n {%- endif %}\n {%- elif (message.tool_calls is defined and message.tool_calls is not none) %}\n {{- \"[TOOL_CALLS][\" }}\n {%- for tool_call in message.tool_calls %}\n {%- set out = tool_call.function|tojson %}\n {{- out[:-1] }}\n {%- if not tool_call.id is defined or tool_call.id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- ', \"id\": \"' + tool_call.id + '\"}' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" + '</s>' }}\n {%- endif %}\n {%- endfor %}\n {%- elif message[\"role\"] == \"assistant\" %}\n {{- message[\"content\"] + '</s>'}}\n {%- elif message[\"role\"] == \"tool_results\" or message[\"role\"] == \"tool\" %}\n {%- if message.content is defined and message.content.content is defined %}\n {%- set content = message.content.content %}\n {%- else %}\n {%- set content = message.content %}\n {%- endif %}\n {{- '[TOOL_RESULTS]{\"content\": ' + content|string + \", \" }}\n {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- '\"call_id\": \"' + message.tool_call_id + '\"}[/TOOL_RESULTS]' }}\n {%- else %}\n {{- raise_exception(\"Only user and assistant roles are supported, with the exception of an initial optional system message!\") }}\n {%- endif %}\n{%- endfor %}\n",
1873
+ "stop_token_ids": [
1874
+ 2
1875
+ ],
1876
+ "stop": [
1877
+ "</s>"
1878
+ ]
2170
1879
  },
2171
1880
  {
2172
1881
  "version": 1,
@@ -2208,106 +1917,13 @@
2208
1917
  "model_hub": "modelscope"
2209
1918
  }
2210
1919
  ],
2211
- "prompt_style": {
2212
- "style_name": "mistral-nemo",
2213
- "system_prompt": "",
2214
- "roles": [
2215
- "[INST]",
2216
- "[/INST]"
2217
- ],
2218
- "intra_message_sep": "",
2219
- "inter_message_sep": "</s>",
2220
- "stop_token_ids": [
2221
- 2
2222
- ],
2223
- "stop": [
2224
- "</s>"
2225
- ]
2226
- }
2227
- },
2228
- {
2229
- "version": 1,
2230
- "context_length": 8192,
2231
- "model_name": "zephyr-7b-alpha",
2232
- "model_lang": [
2233
- "en"
2234
- ],
2235
- "model_ability": [
2236
- "chat"
2237
- ],
2238
- "model_description": "Zephyr-7B-α is the first model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1.",
2239
- "model_specs": [
2240
- {
2241
- "model_format": "pytorch",
2242
- "model_size_in_billions": 7,
2243
- "quantizations": [
2244
- "4-bit",
2245
- "8-bit",
2246
- "none"
2247
- ],
2248
- "model_hub": "modelscope",
2249
- "model_id": "keepitsimple/zephyr-7b-alpha",
2250
- "model_revision": "v1.0-1"
2251
- }
2252
- ],
2253
- "prompt_style": {
2254
- "style_name": "NO_COLON_TWO",
2255
- "system_prompt": "<|system|>\nYou are a friendly chatbot.</s>\n",
2256
- "roles": [
2257
- "<|user|>\n",
2258
- "<|assistant|>\n"
2259
- ],
2260
- "intra_message_sep": "</s>\n",
2261
- "inter_message_sep": "</s>\n",
2262
- "stop_token_ids": [
2263
- 2
2264
- ],
2265
- "stop": [
2266
- "</s>"
2267
- ]
2268
- }
2269
- },
2270
- {
2271
- "version": 1,
2272
- "context_length": 8192,
2273
- "model_name": "zephyr-7b-beta",
2274
- "model_lang": [
2275
- "en"
2276
- ],
2277
- "model_ability": [
2278
- "chat"
2279
- ],
2280
- "model_description": "Zephyr-7B-β is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1",
2281
- "model_specs": [
2282
- {
2283
- "model_format": "pytorch",
2284
- "model_size_in_billions": 7,
2285
- "quantizations": [
2286
- "4-bit",
2287
- "8-bit",
2288
- "none"
2289
- ],
2290
- "model_hub": "modelscope",
2291
- "model_id": "modelscope/zephyr-7b-beta",
2292
- "model_revision": "master"
2293
- }
1920
+ "chat_template": "{%- if messages[0][\"role\"] == \"system\" %}\n {%- set system_message = messages[0][\"content\"] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n{%- set user_messages = loop_messages | selectattr(\"role\", \"equalto\", \"user\") | list %}\n\n{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n{%- set ns = namespace() %}\n{%- set ns.index = 0 %}\n{%- for message in loop_messages %}\n {%- if not (message.role == \"tool\" or message.role == \"tool_results\" or (message.tool_calls is defined and message.tool_calls is not none)) %}\n {%- if (message[\"role\"] == \"user\") != (ns.index % 2 == 0) %}\n {{- raise_exception(\"After the optional system message, conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif %}\n {%- set ns.index = ns.index + 1 %}\n {%- endif %}\n{%- endfor %}\n\n{{- '<s>' }}\n{%- for message in loop_messages %}\n {%- if message[\"role\"] == \"user\" %}\n {%- if tools is not none and (message == user_messages[-1]) %}\n {{- \"[AVAILABLE_TOOLS][\" }}\n {%- for tool in tools %}\n {%- set tool = tool.function %}\n {{- '{\"type\": \"function\", \"function\": {' }}\n {%- for key, val in tool.items() if key != \"return\" %}\n {%- if val is string %}\n {{- '\"' + key + '\": \"' + val + '\"' }}\n {%- else %}\n {{- '\"' + key + '\": ' + val|tojson }}\n {%- endif %}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \"}}\" }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"[/AVAILABLE_TOOLS]\" }}\n {%- endif %}\n {%- if loop.last and system_message is defined %}\n {{- \"[INST]\" + system_message + \"\n\n\" + message[\"content\"] + \"[/INST]\" }}\n {%- else %}\n {{- \"[INST]\" + message[\"content\"] + \"[/INST]\" }}\n {%- endif %}\n {%- elif (message.tool_calls is defined and message.tool_calls is not none) %}\n {{- \"[TOOL_CALLS][\" }}\n {%- for tool_call in message.tool_calls %}\n {%- set out = tool_call.function|tojson %}\n {{- out[:-1] }}\n {%- if not tool_call.id is defined or tool_call.id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- ', \"id\": \"' + tool_call.id + '\"}' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" + '</s>' }}\n {%- endif %}\n {%- endfor %}\n {%- elif message[\"role\"] == \"assistant\" %}\n {{- message[\"content\"] + '</s>'}}\n {%- elif message[\"role\"] == \"tool_results\" or message[\"role\"] == \"tool\" %}\n {%- if message.content is defined and message.content.content is defined %}\n {%- set content = message.content.content %}\n {%- else %}\n {%- set content = message.content %}\n {%- endif %}\n {{- '[TOOL_RESULTS]{\"content\": ' + content|string + \", \" }}\n {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- '\"call_id\": \"' + message.tool_call_id + '\"}[/TOOL_RESULTS]' }}\n {%- else %}\n {{- raise_exception(\"Only user and assistant roles are supported, with the exception of an initial optional system message!\") }}\n {%- endif %}\n{%- endfor %}\n",
1921
+ "stop_token_ids": [
1922
+ 2
2294
1923
  ],
2295
- "prompt_style": {
2296
- "style_name": "NO_COLON_TWO",
2297
- "system_prompt": "<|system|>\nYou are a friendly chatbot.</s>\n",
2298
- "roles": [
2299
- "<|user|>\n",
2300
- "<|assistant|>\n"
2301
- ],
2302
- "intra_message_sep": "</s>\n",
2303
- "inter_message_sep": "</s>\n",
2304
- "stop_token_ids": [
2305
- 2
2306
- ],
2307
- "stop": [
2308
- "</s>"
2309
- ]
2310
- }
1924
+ "stop": [
1925
+ "</s>"
1926
+ ]
2311
1927
  },
2312
1928
  {
2313
1929
  "version": 1,
@@ -2318,8 +1934,7 @@
2318
1934
  "zh"
2319
1935
  ],
2320
1936
  "model_ability": [
2321
- "chat",
2322
- "tools"
1937
+ "chat"
2323
1938
  ],
2324
1939
  "model_description": "Qwen-chat is a fine-tuned version of the Qwen LLM trained with alignment techniques, specializing in chatting.",
2325
1940
  "model_specs": [
@@ -2438,25 +2053,17 @@
2438
2053
  "model_revision": "master"
2439
2054
  }
2440
2055
  ],
2441
- "prompt_style": {
2442
- "style_name": "QWEN",
2443
- "system_prompt": "You are a helpful assistant.",
2444
- "roles": [
2445
- "user",
2446
- "assistant"
2447
- ],
2448
- "intra_message_sep": "\n",
2449
- "stop_token_ids": [
2450
- 151643,
2451
- 151644,
2452
- 151645
2453
- ],
2454
- "stop": [
2455
- "<|endoftext|>",
2456
- "<|im_start|>",
2457
- "<|im_end|>"
2458
- ]
2459
- }
2056
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ '<|im_start|>system\n' + item['content'] + '<|im_end|>\n' }}{% elif loop.first %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{% if item['role'] == 'user' %}{{ '<|im_start|>user\n' + item['content'] + '<|im_end|>' }}{% elif item['role'] == 'assistant' %}{{ '<|im_start|>assistant\n' + item['content'] + '<|im_end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
2057
+ "stop_token_ids": [
2058
+ 151643,
2059
+ 151644,
2060
+ 151645
2061
+ ],
2062
+ "stop": [
2063
+ "<|endoftext|>",
2064
+ "<|im_start|>",
2065
+ "<|im_end|>"
2066
+ ]
2460
2067
  },
2461
2068
  {
2462
2069
  "version": 1,
@@ -2832,25 +2439,17 @@
2832
2439
  }
2833
2440
  }
2834
2441
  ],
2835
- "prompt_style": {
2836
- "style_name": "QWEN",
2837
- "system_prompt": "You are a helpful assistant.",
2838
- "roles": [
2839
- "user",
2840
- "assistant"
2841
- ],
2842
- "intra_message_sep": "\n",
2843
- "stop_token_ids": [
2844
- 151643,
2845
- 151644,
2846
- 151645
2847
- ],
2848
- "stop": [
2849
- "<|endoftext|>",
2850
- "<|im_start|>",
2851
- "<|im_end|>"
2852
- ]
2853
- }
2442
+ "chat_template": "{%- macro json_to_python_type(json_spec) %}\n {%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n {%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n {%- elif json_spec.type == \"array\" %}\n {{- \"list[\" + json_to_python_type(json_spec|items) + \"]\" }}\n {%- elif json_spec.type == \"object\" %}\n {%- if json_spec.additionalProperties is defined %}\n {{- \"dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']' }}\n {%- else %}\n {{- \"dict\" }}\n {%- endif %}\n {%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n {%- else %}\n {{- \"Any\" }}\n {%- endif %}\n{%- endmacro %}\n\n{%- if tools %}\n {{- '<|im_start|>system\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] + '\n\n' }}\n {%- endif %}\n {{- '# Tools\n\n' }}\n {{- \"You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \" }}\n {%- for tool in tools %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{- '{\"type\": \"function\", \"function\": ' }}\n {{- '{\"name\": ' + tool.name + '\", ' }}\n {{- '\"description\": \"' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {{- param_name + \": \" + json_to_python_type(param_fields) }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- if tool.return is defined %}\n {{- \" -> \" + json_to_python_type(tool.return) }}\n {%- endif %}\n {{- \" - \" + tool.description + \"\n\n\" }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.first %}\n {{- \" Args:\n\" }}\n {%- endif %}\n {{- \" \" + param_name + \"(\" + json_to_python_type(param_fields) + \"): \" + param_fields.description|trim }}\n {%- endfor %}\n {%- if tool.return is defined and tool.return.description is defined %}\n {{- \"\n Returns:\n \" + tool.return.description }}\n {%- endif %}\n {{- '\"' }}\n {{- ', \"parameters\": ' }}\n {%- if tool.parameters.properties | length == 0 %}\n {{- \"{}\" }}\n {%- else %}\n {{- tool.parameters|tojson }}\n {%- endif %}\n {{- \"}\" }}\n {%- if not loop.last %}\n {{- \"\n\" }}\n {%- endif %}\n {%- endfor %}\n {{- \" </tools>\" }}\n {{- 'Use the following pydantic model json schema for each tool call you will make: {\"properties\": {\"arguments\": {\"title\": \"Arguments\", \"type\": \"object\"}, \"name\": {\"title\": \"Name\", \"type\": \"string\"}}, \"required\": [\"arguments\", \"name\"], \"title\": \"FunctionCall\", \"type\": \"object\"}\n' }}\n {{- \"For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\" }}\n {{- \"<tool_call>\n\" }}\n {{- '{\"name\": <function-name>, \"arguments\": <args-json-object>}\n' }}\n {{- '</tool_call><|im_end|>\n' }}\n{%- else %}\n {%- if messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}\n {%- else %}\n {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if message.role == \"user\" or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and message.tool_calls is not defined) %}\n {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\n<tool_call>\n' }}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{' }}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {%- if tool_call.arguments is defined %}\n {{- ', ' }}\n {{- '\"arguments\": ' }}\n {{- tool_call.arguments|tojson }}\n {%- endif %}\n {{- '\"}' }}\n {{- '\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if not message.name is defined %}\n {{- raise_exception(\"Tool response dicts require a 'name' key indicating the name of the called function!\") }}\n {%- endif %}\n {{- '<|im_start|>user\n<tool_response>\n' }}\n {{- '{\"name\": \"' }}\n {{- message.name }}\n {{- '\", \"content\": ' }}\n {{- message.content|tojson + '}' }}\n {{- '\n</tool_response><|im_end|>\n' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n' }}\n{%- endif %}",
2443
+ "stop_token_ids": [
2444
+ 151643,
2445
+ 151644,
2446
+ 151645
2447
+ ],
2448
+ "stop": [
2449
+ "<|endoftext|>",
2450
+ "<|im_start|>",
2451
+ "<|im_end|>"
2452
+ ]
2854
2453
  },
2855
2454
  {
2856
2455
  "version": 1,
@@ -2884,28 +2483,20 @@
2884
2483
  "Int4"
2885
2484
  ],
2886
2485
  "model_id": "qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
2887
- "model_hub": "modelscope"
2888
- }
2889
- ],
2890
- "prompt_style": {
2891
- "style_name": "QWEN",
2892
- "system_prompt": "You are a helpful assistant.",
2893
- "roles": [
2894
- "user",
2895
- "assistant"
2896
- ],
2897
- "intra_message_sep": "\n",
2898
- "stop_token_ids": [
2899
- 151643,
2900
- 151644,
2901
- 151645
2902
- ],
2903
- "stop": [
2904
- "<|endoftext|>",
2905
- "<|im_start|>",
2906
- "<|im_end|>"
2907
- ]
2908
- }
2486
+ "model_hub": "modelscope"
2487
+ }
2488
+ ],
2489
+ "chat_template": "{%- macro json_to_python_type(json_spec) %}\n {%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n {%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n {%- elif json_spec.type == \"array\" %}\n {{- \"list[\" + json_to_python_type(json_spec|items) + \"]\" }}\n {%- elif json_spec.type == \"object\" %}\n {%- if json_spec.additionalProperties is defined %}\n {{- \"dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']' }}\n {%- else %}\n {{- \"dict\" }}\n {%- endif %}\n {%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n {%- else %}\n {{- \"Any\" }}\n {%- endif %}\n{%- endmacro %}\n\n{%- if tools %}\n {{- '<|im_start|>system\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] + '\n\n' }}\n {%- endif %}\n {{- '# Tools\n\n' }}\n {{- \"You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \" }}\n {%- for tool in tools %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{- '{\"type\": \"function\", \"function\": ' }}\n {{- '{\"name\": ' + tool.name + '\", ' }}\n {{- '\"description\": \"' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {{- param_name + \": \" + json_to_python_type(param_fields) }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- if tool.return is defined %}\n {{- \" -> \" + json_to_python_type(tool.return) }}\n {%- endif %}\n {{- \" - \" + tool.description + \"\n\n\" }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.first %}\n {{- \" Args:\n\" }}\n {%- endif %}\n {{- \" \" + param_name + \"(\" + json_to_python_type(param_fields) + \"): \" + param_fields.description|trim }}\n {%- endfor %}\n {%- if tool.return is defined and tool.return.description is defined %}\n {{- \"\n Returns:\n \" + tool.return.description }}\n {%- endif %}\n {{- '\"' }}\n {{- ', \"parameters\": ' }}\n {%- if tool.parameters.properties | length == 0 %}\n {{- \"{}\" }}\n {%- else %}\n {{- tool.parameters|tojson }}\n {%- endif %}\n {{- \"}\" }}\n {%- if not loop.last %}\n {{- \"\n\" }}\n {%- endif %}\n {%- endfor %}\n {{- \" </tools>\" }}\n {{- 'Use the following pydantic model json schema for each tool call you will make: {\"properties\": {\"arguments\": {\"title\": \"Arguments\", \"type\": \"object\"}, \"name\": {\"title\": \"Name\", \"type\": \"string\"}}, \"required\": [\"arguments\", \"name\"], \"title\": \"FunctionCall\", \"type\": \"object\"}\n' }}\n {{- \"For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\" }}\n {{- \"<tool_call>\n\" }}\n {{- '{\"name\": <function-name>, \"arguments\": <args-json-object>}\n' }}\n {{- '</tool_call><|im_end|>\n' }}\n{%- else %}\n {%- if messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}\n {%- else %}\n {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if message.role == \"user\" or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and message.tool_calls is not defined) %}\n {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\n<tool_call>\n' }}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{' }}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {%- if tool_call.arguments is defined %}\n {{- ', ' }}\n {{- '\"arguments\": ' }}\n {{- tool_call.arguments|tojson }}\n {%- endif %}\n {{- '\"}' }}\n {{- '\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if not message.name is defined %}\n {{- raise_exception(\"Tool response dicts require a 'name' key indicating the name of the called function!\") }}\n {%- endif %}\n {{- '<|im_start|>user\n<tool_response>\n' }}\n {{- '{\"name\": \"' }}\n {{- message.name }}\n {{- '\", \"content\": ' }}\n {{- message.content|tojson + '}' }}\n {{- '\n</tool_response><|im_end|>\n' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n' }}\n{%- endif %}",
2490
+ "stop_token_ids": [
2491
+ 151643,
2492
+ 151644,
2493
+ 151645
2494
+ ],
2495
+ "stop": [
2496
+ "<|endoftext|>",
2497
+ "<|im_start|>",
2498
+ "<|im_end|>"
2499
+ ]
2909
2500
  },
2910
2501
  {
2911
2502
  "version": 1,
@@ -2984,25 +2575,17 @@
2984
2575
  "model_hub": "modelscope"
2985
2576
  }
2986
2577
  ],
2987
- "prompt_style": {
2988
- "style_name": "QWEN",
2989
- "system_prompt": "You are a helpful assistant.",
2990
- "roles": [
2991
- "user",
2992
- "assistant"
2993
- ],
2994
- "intra_message_sep": "\n",
2995
- "stop_token_ids": [
2996
- 151643,
2997
- 151644,
2998
- 151645
2999
- ],
3000
- "stop": [
3001
- "<|endoftext|>",
3002
- "<|im_start|>",
3003
- "<|im_end|>"
3004
- ]
3005
- }
2578
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
2579
+ "stop_token_ids": [
2580
+ 151643,
2581
+ 151644,
2582
+ 151645
2583
+ ],
2584
+ "stop": [
2585
+ "<|endoftext|>",
2586
+ "<|im_start|>",
2587
+ "<|im_end|>"
2588
+ ]
3006
2589
  },
3007
2590
  {
3008
2591
  "version": 1,
@@ -3281,25 +2864,17 @@
3281
2864
  }
3282
2865
  }
3283
2866
  ],
3284
- "prompt_style": {
3285
- "style_name": "QWEN",
3286
- "system_prompt": "You are a helpful assistant.",
3287
- "roles": [
3288
- "user",
3289
- "assistant"
3290
- ],
3291
- "intra_message_sep": "\n",
3292
- "stop_token_ids": [
3293
- 151643,
3294
- 151644,
3295
- 151645
3296
- ],
3297
- "stop": [
3298
- "<|endoftext|>",
3299
- "<|im_start|>",
3300
- "<|im_end|>"
3301
- ]
3302
- }
2867
+ "chat_template": "{%- macro json_to_python_type(json_spec) %}\n {%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n {%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n {%- elif json_spec.type == \"array\" %}\n {{- \"list[\" + json_to_python_type(json_spec|items) + \"]\" }}\n {%- elif json_spec.type == \"object\" %}\n {%- if json_spec.additionalProperties is defined %}\n {{- \"dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']' }}\n {%- else %}\n {{- \"dict\" }}\n {%- endif %}\n {%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n {%- else %}\n {{- \"Any\" }}\n {%- endif %}\n{%- endmacro %}\n\n{%- if tools %}\n {{- '<|im_start|>system\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] + '\n\n' }}\n {%- endif %}\n {{- '# Tools\n\n' }}\n {{- \"You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \" }}\n {%- for tool in tools %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{- '{\"type\": \"function\", \"function\": ' }}\n {{- '{\"name\": ' + tool.name + '\", ' }}\n {{- '\"description\": \"' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {{- param_name + \": \" + json_to_python_type(param_fields) }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- if tool.return is defined %}\n {{- \" -> \" + json_to_python_type(tool.return) }}\n {%- endif %}\n {{- \" - \" + tool.description + \"\n\n\" }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.first %}\n {{- \" Args:\n\" }}\n {%- endif %}\n {{- \" \" + param_name + \"(\" + json_to_python_type(param_fields) + \"): \" + param_fields.description|trim }}\n {%- endfor %}\n {%- if tool.return is defined and tool.return.description is defined %}\n {{- \"\n Returns:\n \" + tool.return.description }}\n {%- endif %}\n {{- '\"' }}\n {{- ', \"parameters\": ' }}\n {%- if tool.parameters.properties | length == 0 %}\n {{- \"{}\" }}\n {%- else %}\n {{- tool.parameters|tojson }}\n {%- endif %}\n {{- \"}\" }}\n {%- if not loop.last %}\n {{- \"\n\" }}\n {%- endif %}\n {%- endfor %}\n {{- \" </tools>\" }}\n {{- 'Use the following pydantic model json schema for each tool call you will make: {\"properties\": {\"arguments\": {\"title\": \"Arguments\", \"type\": \"object\"}, \"name\": {\"title\": \"Name\", \"type\": \"string\"}}, \"required\": [\"arguments\", \"name\"], \"title\": \"FunctionCall\", \"type\": \"object\"}\n' }}\n {{- \"For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\" }}\n {{- \"<tool_call>\n\" }}\n {{- '{\"name\": <function-name>, \"arguments\": <args-json-object>}\n' }}\n {{- '</tool_call><|im_end|>\n' }}\n{%- else %}\n {%- if messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}\n {%- else %}\n {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if message.role == \"user\" or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and message.tool_calls is not defined) %}\n {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\n<tool_call>\n' }}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{' }}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {%- if tool_call.arguments is defined %}\n {{- ', ' }}\n {{- '\"arguments\": ' }}\n {{- tool_call.arguments|tojson }}\n {%- endif %}\n {{- '\"}' }}\n {{- '\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if not message.name is defined %}\n {{- raise_exception(\"Tool response dicts require a 'name' key indicating the name of the called function!\") }}\n {%- endif %}\n {{- '<|im_start|>user\n<tool_response>\n' }}\n {{- '{\"name\": \"' }}\n {{- message.name }}\n {{- '\", \"content\": ' }}\n {{- message.content|tojson + '}' }}\n {{- '\n</tool_response><|im_end|>\n' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n' }}\n{%- endif %}",
2868
+ "stop_token_ids": [
2869
+ 151643,
2870
+ 151644,
2871
+ 151645
2872
+ ],
2873
+ "stop": [
2874
+ "<|endoftext|>",
2875
+ "<|im_start|>",
2876
+ "<|im_end|>"
2877
+ ]
3303
2878
  },
3304
2879
  {
3305
2880
  "version": 1,
@@ -3365,25 +2940,17 @@
3365
2940
  }
3366
2941
  }
3367
2942
  ],
3368
- "prompt_style": {
3369
- "style_name": "QWEN",
3370
- "system_prompt": "You are a helpful assistant.",
3371
- "roles": [
3372
- "user",
3373
- "assistant"
3374
- ],
3375
- "intra_message_sep": "\n",
3376
- "stop_token_ids": [
3377
- 151643,
3378
- 151644,
3379
- 151645
3380
- ],
3381
- "stop": [
3382
- "<|endoftext|>",
3383
- "<|im_start|>",
3384
- "<|im_end|>"
3385
- ]
3386
- }
2943
+ "chat_template": "{%- macro json_to_python_type(json_spec) %}\n {%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n {%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n {%- elif json_spec.type == \"array\" %}\n {{- \"list[\" + json_to_python_type(json_spec|items) + \"]\" }}\n {%- elif json_spec.type == \"object\" %}\n {%- if json_spec.additionalProperties is defined %}\n {{- \"dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']' }}\n {%- else %}\n {{- \"dict\" }}\n {%- endif %}\n {%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n {%- else %}\n {{- \"Any\" }}\n {%- endif %}\n{%- endmacro %}\n\n{%- if tools %}\n {{- '<|im_start|>system\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] + '\n\n' }}\n {%- endif %}\n {{- '# Tools\n\n' }}\n {{- \"You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \" }}\n {%- for tool in tools %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{- '{\"type\": \"function\", \"function\": ' }}\n {{- '{\"name\": ' + tool.name + '\", ' }}\n {{- '\"description\": \"' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {{- param_name + \": \" + json_to_python_type(param_fields) }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- if tool.return is defined %}\n {{- \" -> \" + json_to_python_type(tool.return) }}\n {%- endif %}\n {{- \" - \" + tool.description + \"\n\n\" }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.first %}\n {{- \" Args:\n\" }}\n {%- endif %}\n {{- \" \" + param_name + \"(\" + json_to_python_type(param_fields) + \"): \" + param_fields.description|trim }}\n {%- endfor %}\n {%- if tool.return is defined and tool.return.description is defined %}\n {{- \"\n Returns:\n \" + tool.return.description }}\n {%- endif %}\n {{- '\"' }}\n {{- ', \"parameters\": ' }}\n {%- if tool.parameters.properties | length == 0 %}\n {{- \"{}\" }}\n {%- else %}\n {{- tool.parameters|tojson }}\n {%- endif %}\n {{- \"}\" }}\n {%- if not loop.last %}\n {{- \"\n\" }}\n {%- endif %}\n {%- endfor %}\n {{- \" </tools>\" }}\n {{- 'Use the following pydantic model json schema for each tool call you will make: {\"properties\": {\"arguments\": {\"title\": \"Arguments\", \"type\": \"object\"}, \"name\": {\"title\": \"Name\", \"type\": \"string\"}}, \"required\": [\"arguments\", \"name\"], \"title\": \"FunctionCall\", \"type\": \"object\"}\n' }}\n {{- \"For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\" }}\n {{- \"<tool_call>\n\" }}\n {{- '{\"name\": <function-name>, \"arguments\": <args-json-object>}\n' }}\n {{- '</tool_call><|im_end|>\n' }}\n{%- else %}\n {%- if messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}\n {%- else %}\n {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if message.role == \"user\" or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and message.tool_calls is not defined) %}\n {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\n<tool_call>\n' }}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{' }}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {%- if tool_call.arguments is defined %}\n {{- ', ' }}\n {{- '\"arguments\": ' }}\n {{- tool_call.arguments|tojson }}\n {%- endif %}\n {{- '\"}' }}\n {{- '\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if not message.name is defined %}\n {{- raise_exception(\"Tool response dicts require a 'name' key indicating the name of the called function!\") }}\n {%- endif %}\n {{- '<|im_start|>user\n<tool_response>\n' }}\n {{- '{\"name\": \"' }}\n {{- message.name }}\n {{- '\", \"content\": ' }}\n {{- message.content|tojson + '}' }}\n {{- '\n</tool_response><|im_end|>\n' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n' }}\n{%- endif %}",
2944
+ "stop_token_ids": [
2945
+ 151643,
2946
+ 151644,
2947
+ 151645
2948
+ ],
2949
+ "stop": [
2950
+ "<|endoftext|>",
2951
+ "<|im_start|>",
2952
+ "<|im_end|>"
2953
+ ]
3387
2954
  },
3388
2955
  {
3389
2956
  "version": 1,
@@ -3418,19 +2985,13 @@
3418
2985
  "model_hub": "modelscope"
3419
2986
  }
3420
2987
  ],
3421
- "prompt_style": {
3422
- "style_name": "DEEPSEEK_CHAT",
3423
- "system_prompt": "<|begin▁of▁sentence|>",
3424
- "roles": [
3425
- "User",
3426
- "Assistant"
3427
- ],
3428
- "intra_message_sep": "\n\n",
3429
- "inter_message_sep": "<|end▁of▁sentence|>",
3430
- "stop": [
3431
- "<|end▁of▁sentence|>"
3432
- ]
3433
- }
2988
+ "chat_template": "",
2989
+ "stop_token_ids": [
2990
+ 100001
2991
+ ],
2992
+ "stop": [
2993
+ "<|end▁of▁sentence|>"
2994
+ ]
3434
2995
  },
3435
2996
  {
3436
2997
  "version": 1,
@@ -3505,19 +3066,13 @@
3505
3066
  "model_hub": "modelscope"
3506
3067
  }
3507
3068
  ],
3508
- "prompt_style": {
3509
- "style_name": "DEEPSEEK_CHAT",
3510
- "system_prompt": "<|begin▁of▁sentence|>",
3511
- "roles": [
3512
- "User",
3513
- "Assistant"
3514
- ],
3515
- "intra_message_sep": "\n\n",
3516
- "inter_message_sep": "<|end▁of▁sentence|>",
3517
- "stop": [
3518
- "<|end▁of▁sentence|>"
3519
- ]
3520
- }
3069
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ '<|begin▁of▁sentence|>' }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + '<|end▁of▁sentence|>' }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
3070
+ "stop_token_ids": [
3071
+ 100001
3072
+ ],
3073
+ "stop": [
3074
+ "<|end▁of▁sentence|>"
3075
+ ]
3521
3076
  },
3522
3077
  {
3523
3078
  "version": 1,
@@ -3614,18 +3169,13 @@
3614
3169
  "model_hub": "modelscope"
3615
3170
  }
3616
3171
  ],
3617
- "prompt_style": {
3618
- "style_name": "DEEPSEEK_CODER",
3619
- "system_prompt": "You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.",
3620
- "roles": [
3621
- "### Instruction:",
3622
- "### Response:"
3623
- ],
3624
- "inter_message_sep": "\n",
3625
- "stop": [
3626
- "<|EOT|>"
3627
- ]
3628
- }
3172
+ "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{'<|begin▁of▁sentence|>'}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\n' + message['content'] + '\n'}}\n {%- else %}\n{{'### Response:\n' + message['content'] + '\n<|EOT|>\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
3173
+ "stop_token_ids": [
3174
+ 32021
3175
+ ],
3176
+ "stop": [
3177
+ "<|EOT|>"
3178
+ ]
3629
3179
  },
3630
3180
  {
3631
3181
  "version": 1,
@@ -3713,23 +3263,15 @@
3713
3263
  "model_revision": "master"
3714
3264
  }
3715
3265
  ],
3716
- "prompt_style": {
3717
- "style_name": "INTERNLM2",
3718
- "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
3719
- "roles": [
3720
- "<|im_start|>user",
3721
- "<|im_start|>assistant"
3722
- ],
3723
- "intra_message_sep": "<|im_end|>",
3724
- "stop_token_ids": [
3725
- 2,
3726
- 92542
3727
- ],
3728
- "stop": [
3729
- "</s>",
3730
- "<|im_end|>"
3731
- ]
3732
- }
3266
+ "chat_template": "{{ '<s>' }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
3267
+ "stop_token_ids": [
3268
+ 2,
3269
+ 92542
3270
+ ],
3271
+ "stop": [
3272
+ "</s>",
3273
+ "<|im_end|>"
3274
+ ]
3733
3275
  },
3734
3276
  {
3735
3277
  "version": 1,
@@ -3766,24 +3308,17 @@
3766
3308
  "model_revision": "master"
3767
3309
  }
3768
3310
  ],
3769
- "prompt_style": {
3770
- "style_name": "QWEN",
3771
- "system_prompt": "You are a helpful assistant.",
3772
- "roles": [
3773
- "user",
3774
- "assistant"
3775
- ],
3776
- "stop_token_ids": [
3777
- 151643,
3778
- 151644,
3779
- 151645
3780
- ],
3781
- "stop": [
3782
- "<|endoftext|>",
3783
- "<|im_start|>",
3784
- "<|im_end|>"
3785
- ]
3786
- }
3311
+ "chat_template": "",
3312
+ "stop_token_ids": [
3313
+ 151643,
3314
+ 151644,
3315
+ 151645
3316
+ ],
3317
+ "stop": [
3318
+ "<|endoftext|>",
3319
+ "<|im_start|>",
3320
+ "<|im_end|>"
3321
+ ]
3787
3322
  },
3788
3323
  {
3789
3324
  "version": 1,
@@ -3819,18 +3354,17 @@
3819
3354
  "model_id": "OrionStarAI/Orion-14B-Chat-{quantization}"
3820
3355
  }
3821
3356
  ],
3822
- "prompt_style": {
3823
- "style_name": "orion",
3824
- "roles": [
3825
- "Human",
3826
- "assistant"
3827
- ],
3828
- "stop": [
3829
- "<s>",
3830
- "</s>",
3831
- "<unk>"
3832
- ]
3833
- }
3357
+ "chat_template": "{% for message in messages %}{% if loop.first %}{{ '<s>' }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'] + '\n\nAssistant: ' + '</s>' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + '</s>' }}{% endif %}{% endfor %}",
3358
+ "stop_token_ids": [
3359
+ 1,
3360
+ 2,
3361
+ 0
3362
+ ],
3363
+ "stop": [
3364
+ "<s>",
3365
+ "</s>",
3366
+ "<unk>"
3367
+ ]
3834
3368
  },
3835
3369
  {
3836
3370
  "version": 1,
@@ -3857,18 +3391,17 @@
3857
3391
  "model_id": "OrionStarAI/Orion-14B-Chat-RAG"
3858
3392
  }
3859
3393
  ],
3860
- "prompt_style": {
3861
- "style_name": "orion",
3862
- "roles": [
3863
- "Human",
3864
- "assistant"
3865
- ],
3866
- "stop": [
3867
- "<s>",
3868
- "</s>",
3869
- "<unk>"
3870
- ]
3871
- }
3394
+ "chat_template": "{% for message in messages %}{% if loop.first %}{{ '<s>' }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'] + '\n\nAssistant: ' + '</s>' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + '</s>' }}{% endif %}{% endfor %}",
3395
+ "stop_token_ids": [
3396
+ 1,
3397
+ 2,
3398
+ 0
3399
+ ],
3400
+ "stop": [
3401
+ "<s>",
3402
+ "</s>",
3403
+ "<unk>"
3404
+ ]
3872
3405
  },
3873
3406
  {
3874
3407
  "version": 1,
@@ -3903,28 +3436,19 @@
3903
3436
  "model_id": "01ai/Yi-VL-34B"
3904
3437
  }
3905
3438
  ],
3906
- "prompt_style": {
3907
- "style_name": "CHATML",
3908
- "system_prompt": "",
3909
- "roles": [
3910
- "<|im_start|>user",
3911
- "<|im_start|>assistant"
3912
- ],
3913
- "intra_message_sep": "<|im_end|>",
3914
- "inter_message_sep": "",
3915
- "stop_token_ids": [
3916
- 2,
3917
- 6,
3918
- 7,
3919
- 8
3920
- ],
3921
- "stop": [
3922
- "<|endoftext|>",
3923
- "<|im_start|>",
3924
- "<|im_end|>",
3925
- "<|im_sep|>"
3926
- ]
3927
- }
3439
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
3440
+ "stop_token_ids": [
3441
+ 2,
3442
+ 6,
3443
+ 7,
3444
+ 8
3445
+ ],
3446
+ "stop": [
3447
+ "<|endoftext|>",
3448
+ "<|im_start|>",
3449
+ "<|im_end|>",
3450
+ "<|im_sep|>"
3451
+ ]
3928
3452
  },
3929
3453
  {
3930
3454
  "version": 1,
@@ -3961,17 +3485,17 @@
3961
3485
  "model_id": "AI-ModelScope/gemma-7b-it"
3962
3486
  }
3963
3487
  ],
3964
- "prompt_style": {
3965
- "style_name": "gemma",
3966
- "roles": [
3967
- "user",
3968
- "model"
3969
- ],
3970
- "stop": [
3971
- "<end_of_turn>",
3972
- "<start_of_turn>"
3973
- ]
3974
- }
3488
+ "chat_template": "{{ '<bos>' }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
3489
+ "stop_token_ids": [
3490
+ 1,
3491
+ 106,
3492
+ 107
3493
+ ],
3494
+ "stop": [
3495
+ "<eos>",
3496
+ "<end_of_turn>",
3497
+ "<start_of_turn>"
3498
+ ]
3975
3499
  },
3976
3500
  {
3977
3501
  "version": 1,
@@ -4042,17 +3566,17 @@
4042
3566
  "model_hub": "modelscope"
4043
3567
  }
4044
3568
  ],
4045
- "prompt_style": {
4046
- "style_name": "gemma",
4047
- "roles": [
4048
- "user",
4049
- "model"
4050
- ],
4051
- "stop": [
4052
- "<end_of_turn>",
4053
- "<start_of_turn>"
4054
- ]
4055
- }
3569
+ "chat_template": "{{ '<bos>' }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
3570
+ "stop_token_ids": [
3571
+ 1,
3572
+ 106,
3573
+ 107
3574
+ ],
3575
+ "stop": [
3576
+ "<eos>",
3577
+ "<end_of_turn>",
3578
+ "<start_of_turn>"
3579
+ ]
4056
3580
  },
4057
3581
  {
4058
3582
  "version":1,
@@ -4089,14 +3613,13 @@
4089
3613
  "model_revision":"master"
4090
3614
  }
4091
3615
  ],
4092
- "prompt_style":{
4093
- "style_name":"OmniLMM",
4094
- "system_prompt":"The role of first msg should be user",
4095
- "roles":[
4096
- "user",
4097
- "assistant"
4098
- ]
4099
- }
3616
+ "chat_template": "",
3617
+ "stop_token_ids": [
3618
+ 2
3619
+ ],
3620
+ "stop": [
3621
+ "</s>"
3622
+ ]
4100
3623
  },
4101
3624
  {
4102
3625
  "version": 1,
@@ -4121,22 +3644,15 @@
4121
3644
  "model_revision": "master"
4122
3645
  }
4123
3646
  ],
4124
- "prompt_style": {
4125
- "style_name": "MINICPM-2B",
4126
- "system_prompt": "",
4127
- "roles": [
4128
- "user",
4129
- "assistant"
4130
- ],
4131
- "stop_token_ids": [
4132
- 1,
4133
- 2
4134
- ],
4135
- "stop": [
4136
- "<s>",
4137
- "</s>"
4138
- ]
4139
- }
3647
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",
3648
+ "stop_token_ids": [
3649
+ 1,
3650
+ 2
3651
+ ],
3652
+ "stop": [
3653
+ "<s>",
3654
+ "</s>"
3655
+ ]
4140
3656
  },
4141
3657
  {
4142
3658
  "version": 1,
@@ -4161,22 +3677,15 @@
4161
3677
  "model_revision": "master"
4162
3678
  }
4163
3679
  ],
4164
- "prompt_style": {
4165
- "style_name": "MINICPM-2B",
4166
- "system_prompt": "",
4167
- "roles": [
4168
- "user",
4169
- "assistant"
4170
- ],
4171
- "stop_token_ids": [
4172
- 1,
4173
- 2
4174
- ],
4175
- "stop": [
4176
- "<s>",
4177
- "</s>"
4178
- ]
4179
- }
3680
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",
3681
+ "stop_token_ids": [
3682
+ 1,
3683
+ 2
3684
+ ],
3685
+ "stop": [
3686
+ "<s>",
3687
+ "</s>"
3688
+ ]
4180
3689
  },
4181
3690
  {
4182
3691
  "version": 1,
@@ -4201,22 +3710,15 @@
4201
3710
  "model_revision": "master"
4202
3711
  }
4203
3712
  ],
4204
- "prompt_style": {
4205
- "style_name": "MINICPM-2B",
4206
- "system_prompt": "",
4207
- "roles": [
4208
- "user",
4209
- "assistant"
4210
- ],
4211
- "stop_token_ids": [
4212
- 1,
4213
- 2
4214
- ],
4215
- "stop": [
4216
- "<s>",
4217
- "</s>"
4218
- ]
4219
- }
3713
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",
3714
+ "stop_token_ids": [
3715
+ 1,
3716
+ 2
3717
+ ],
3718
+ "stop": [
3719
+ "<s>",
3720
+ "</s>"
3721
+ ]
4220
3722
  },
4221
3723
  {
4222
3724
  "version": 1,
@@ -4241,22 +3743,15 @@
4241
3743
  "model_revision": "master"
4242
3744
  }
4243
3745
  ],
4244
- "prompt_style": {
4245
- "style_name": "MINICPM-2B",
4246
- "system_prompt": "",
4247
- "roles": [
4248
- "user",
4249
- "assistant"
4250
- ],
4251
- "stop_token_ids": [
4252
- 1,
4253
- 2
4254
- ],
4255
- "stop": [
4256
- "<s>",
4257
- "</s>"
4258
- ]
4259
- }
3746
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",
3747
+ "stop_token_ids": [
3748
+ 1,
3749
+ 2
3750
+ ],
3751
+ "stop": [
3752
+ "<s>",
3753
+ "</s>"
3754
+ ]
4260
3755
  },
4261
3756
  {
4262
3757
  "version": 1,
@@ -4281,22 +3776,15 @@
4281
3776
  "model_revision": "master"
4282
3777
  }
4283
3778
  ],
4284
- "prompt_style": {
4285
- "style_name": "MINICPM-2B",
4286
- "system_prompt": "",
4287
- "roles": [
4288
- "user",
4289
- "assistant"
4290
- ],
4291
- "stop_token_ids": [
4292
- 1,
4293
- 2
4294
- ],
4295
- "stop": [
4296
- "<s>",
4297
- "</s>"
4298
- ]
4299
- }
3779
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",
3780
+ "stop_token_ids": [
3781
+ 1,
3782
+ 2
3783
+ ],
3784
+ "stop": [
3785
+ "<s>",
3786
+ "</s>"
3787
+ ]
4300
3788
  },
4301
3789
  {
4302
3790
  "version":1,
@@ -4333,14 +3821,13 @@
4333
3821
  "model_revision":"master"
4334
3822
  }
4335
3823
  ],
4336
- "prompt_style":{
4337
- "style_name":"OmniLMM",
4338
- "system_prompt":"The role of first msg should be user",
4339
- "roles":[
4340
- "user",
4341
- "assistant"
4342
- ]
4343
- }
3824
+ "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = '<|begin_of_text|>' + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
3825
+ "stop_token_ids": [
3826
+ 128001
3827
+ ],
3828
+ "stop": [
3829
+ "<|end_of_text|>"
3830
+ ]
4344
3831
  },
4345
3832
  {
4346
3833
  "version":1,
@@ -4377,14 +3864,15 @@
4377
3864
  "model_revision":"master"
4378
3865
  }
4379
3866
  ],
4380
- "prompt_style":{
4381
- "style_name":"QWEN",
4382
- "system_prompt":"You are a helpful assistant",
4383
- "roles":[
4384
- "user",
4385
- "assistant"
4386
- ]
4387
- }
3867
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
3868
+ "stop_token_ids": [
3869
+ 151645,
3870
+ 151643
3871
+ ],
3872
+ "stop": [
3873
+ "<|im_end|>",
3874
+ "<|endoftext|>"
3875
+ ]
4388
3876
  },
4389
3877
  {
4390
3878
  "version": 1,
@@ -4463,23 +3951,15 @@
4463
3951
  "model_revision": "master"
4464
3952
  }
4465
3953
  ],
4466
- "prompt_style": {
4467
- "style_name": "ADD_COLON_SINGLE",
4468
- "intra_message_sep": "\n",
4469
- "system_prompt": "",
4470
- "roles": [
4471
- "USER",
4472
- "ASSISTANT"
4473
- ],
4474
- "stop_token_ids": [
4475
- 100006,
4476
- 100007
4477
- ],
4478
- "stop": [
4479
- "[CLS]",
4480
- "</s>"
4481
- ]
4482
- }
3954
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ item['content'] + '\n' }}{% endif %}{% if item['role'] == 'user' %}{{ 'USER: ' + item['content'] + '\n' }}{% elif item['role'] == 'assistant' %}{{ 'ASSISTANT: ' + item['content'] + '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT: ' }}{% endif %}",
3955
+ "stop_token_ids": [
3956
+ 100006,
3957
+ 100007
3958
+ ],
3959
+ "stop": [
3960
+ "[CLS]",
3961
+ "</s>"
3962
+ ]
4483
3963
  },
4484
3964
  {
4485
3965
  "version": 1,
@@ -4504,23 +3984,15 @@
4504
3984
  "model_revision": "master"
4505
3985
  }
4506
3986
  ],
4507
- "prompt_style": {
4508
- "style_name": "ADD_COLON_SINGLE",
4509
- "intra_message_sep": "\n",
4510
- "system_prompt": "",
4511
- "roles": [
4512
- "USER",
4513
- "ASSISTANT"
4514
- ],
4515
- "stop_token_ids": [
4516
- 100006,
4517
- 100007
4518
- ],
4519
- "stop": [
4520
- "[CLS]",
4521
- "</s>"
4522
- ]
4523
- }
3987
+ "chat_template": "{% for item in messages %}{% if loop.first and item['role'] == 'system' %}{{ item['content'] + '\n' }}{% endif %}{% if item['role'] == 'user' %}{{ 'USER: ' + item['content'] + '\n' }}{% elif item['role'] == 'assistant' %}{{ 'ASSISTANT: ' + item['content'] + '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT: ' }}{% endif %}",
3988
+ "stop_token_ids": [
3989
+ 100006,
3990
+ 100007
3991
+ ],
3992
+ "stop": [
3993
+ "[CLS]",
3994
+ "</s>"
3995
+ ]
4524
3996
  },
4525
3997
  {
4526
3998
  "version": 1,
@@ -4588,20 +4060,15 @@
4588
4060
  "model_revision": "master"
4589
4061
  }
4590
4062
  ],
4591
- "prompt_style": {
4592
- "style_name": "c4ai-command-r",
4593
- "system_prompt": "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.",
4594
- "roles": [
4595
- "<|USER_TOKEN|>",
4596
- "<|CHATBOT_TOKEN|>"
4597
- ],
4598
- "intra_message_sep": "",
4599
- "inter_message_sep": "<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|>",
4600
- "stop_token_ids": [
4601
- 6,
4602
- 255001
4603
- ]
4604
- }
4063
+ "chat_template": "{{ '<BOS_TOKEN>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true %}{% set loop_messages = messages %}{% set system_message = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}",
4064
+ "stop_token_ids": [
4065
+ 6,
4066
+ 255001
4067
+ ],
4068
+ "stop": [
4069
+ "<EOS_TOKEN>",
4070
+ "<|END_OF_TURN_TOKEN|>"
4071
+ ]
4605
4072
  },
4606
4073
  {
4607
4074
  "version": 1,
@@ -4628,24 +4095,17 @@
4628
4095
  "model_revision": "master"
4629
4096
  }
4630
4097
  ],
4631
- "prompt_style": {
4632
- "style_name": "PHI3",
4633
- "system_prompt": "You are a helpful AI assistant.",
4634
- "roles": [
4635
- "user",
4636
- "assistant"
4637
- ],
4638
- "intra_message_sep": "\n",
4639
- "inter_message_sep": "<|end|>\n",
4640
- "stop_token_ids":[
4641
- 32000,
4642
- 32007
4643
- ],
4644
- "stop": [
4645
- "<|endoftext|>",
4646
- "<|end|>"
4647
- ]
4648
- }
4098
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ '<|endoftext|>' }}{% endif %}",
4099
+ "stop_token_ids":[
4100
+ 32000,
4101
+ 32001,
4102
+ 32007
4103
+ ],
4104
+ "stop": [
4105
+ "<|endoftext|>",
4106
+ "<|assistant|>",
4107
+ "<|end|>"
4108
+ ]
4649
4109
  },
4650
4110
  {
4651
4111
  "version": 1,
@@ -4672,24 +4132,17 @@
4672
4132
  "model_revision": "master"
4673
4133
  }
4674
4134
  ],
4675
- "prompt_style": {
4676
- "style_name": "PHI3",
4677
- "system_prompt": "You are a helpful AI assistant.",
4678
- "roles": [
4679
- "user",
4680
- "assistant"
4681
- ],
4682
- "intra_message_sep": "\n",
4683
- "inter_message_sep": "<|end|>\n",
4684
- "stop_token_ids":[
4685
- 32000,
4686
- 32007
4687
- ],
4688
- "stop": [
4689
- "<|endoftext|>",
4690
- "<|end|>"
4691
- ]
4692
- }
4135
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ '<|endoftext|>' }}{% endif %}",
4136
+ "stop_token_ids":[
4137
+ 32000,
4138
+ 32001,
4139
+ 32007
4140
+ ],
4141
+ "stop": [
4142
+ "<|endoftext|>",
4143
+ "<|assistant|>",
4144
+ "<|end|>"
4145
+ ]
4693
4146
  },
4694
4147
  {
4695
4148
  "version": 1,
@@ -4718,25 +4171,17 @@
4718
4171
  "model_revision": "master"
4719
4172
  }
4720
4173
  ],
4721
- "prompt_style": {
4722
- "style_name": "INTERNVL",
4723
- "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
4724
- "roles": [
4725
- "<|im_start|>user",
4726
- "<|im_start|>assistant"
4727
- ],
4728
- "intra_message_sep": "<|im_end|>",
4729
- "stop_token_ids": [
4730
- 2,
4731
- 92543,
4732
- 92542
4733
- ],
4734
- "stop": [
4735
- "</s>",
4736
- "<|im_end|>",
4737
- "<|im_start|>"
4738
- ]
4739
- }
4174
+ "chat_template": "{{ '<s>' }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
4175
+ "stop_token_ids": [
4176
+ 2,
4177
+ 92542,
4178
+ 92543
4179
+ ],
4180
+ "stop": [
4181
+ "</s>",
4182
+ "<|im_end|>",
4183
+ "<|im_start|>"
4184
+ ]
4740
4185
  },
4741
4186
  {
4742
4187
  "version": 1,
@@ -4888,25 +4333,17 @@
4888
4333
  "model_revision": "master"
4889
4334
  }
4890
4335
  ],
4891
- "prompt_style": {
4892
- "style_name": "INTERNVL",
4893
- "system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
4894
- "roles": [
4895
- "<|im_start|>user",
4896
- "<|im_start|>assistant"
4897
- ],
4898
- "intra_message_sep": "<|im_end|>",
4899
- "stop_token_ids": [
4900
- 2,
4901
- 92543,
4902
- 92542
4903
- ],
4904
- "stop": [
4905
- "</s>",
4906
- "<|im_end|>",
4907
- "<|im_start|>"
4908
- ]
4909
- }
4336
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
4337
+ "stop_token_ids": [
4338
+ 151643,
4339
+ 151644,
4340
+ 151645
4341
+ ],
4342
+ "stop": [
4343
+ "<|endoftext|>",
4344
+ "<|im_start|>",
4345
+ "<|im_end|>"
4346
+ ]
4910
4347
  },
4911
4348
  {
4912
4349
  "version": 1,
@@ -4943,24 +4380,15 @@
4943
4380
  "model_revision": "master"
4944
4381
  }
4945
4382
  ],
4946
- "prompt_style": {
4947
- "style_name": "LLAMA3",
4948
- "system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.",
4949
- "roles": [
4950
- "user",
4951
- "assistant"
4952
- ],
4953
- "intra_message_sep": "\n\n",
4954
- "inter_message_sep": "<|eot_id|>",
4955
- "stop_token_ids": [
4956
- 128001,
4957
- 128009
4958
- ],
4959
- "stop": [
4960
- "<|end_of_text|>",
4961
- "<|eot_id|>"
4962
- ]
4963
- }
4383
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = '<|begin_of_text|>' + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ '<|end_of_text|>' }}{% endif %}",
4384
+ "stop_token_ids": [
4385
+ 128001,
4386
+ 128009
4387
+ ],
4388
+ "stop": [
4389
+ "<|end_of_text|>",
4390
+ "<|eot_id|>"
4391
+ ]
4964
4392
  },
4965
4393
  {
4966
4394
  "version": 1,
@@ -4989,24 +4417,15 @@
4989
4417
  "model_revision": "master"
4990
4418
  }
4991
4419
  ],
4992
- "prompt_style": {
4993
- "style_name": "LLAMA3",
4994
- "system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.",
4995
- "roles": [
4996
- "user",
4997
- "assistant"
4998
- ],
4999
- "intra_message_sep": "\n\n",
5000
- "inter_message_sep": "<|eot_id|>",
5001
- "stop_token_ids": [
5002
- 128001,
5003
- 128009
5004
- ],
5005
- "stop": [
5006
- "<|end_of_text|>",
5007
- "<|eot_id|>"
5008
- ]
5009
- }
4420
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = '<|begin_of_text|>' + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ '<|end_of_text|>' }}{% endif %}",
4421
+ "stop_token_ids": [
4422
+ 128001,
4423
+ 128009
4424
+ ],
4425
+ "stop": [
4426
+ "<|end_of_text|>",
4427
+ "<|eot_id|>"
4428
+ ]
5010
4429
  },
5011
4430
  {
5012
4431
  "version": 1,
@@ -5080,22 +4499,57 @@
5080
4499
  "model_revision": "master"
5081
4500
  }
5082
4501
  ],
4502
+ "chat_template": "{{ (messages|selectattr('role', 'equalto', 'system')|list|last).content|trim if (messages|selectattr('role', 'equalto', 'system')|list) else '' }}{%- for message in messages -%}{%- if message['role'] == 'user' -%}{{- '<_user>' + message['content'] +'<_bot>' -}}{%- elif message['role'] == 'assistant' -%}{{- message['content'] + '<_end>' -}}{%- endif -%}{%- endfor -%}",
4503
+ "stop": [
4504
+ "<_end>",
4505
+ "<_start>"
4506
+ ],
4507
+ "stop_token_ids": [
4508
+ 160133,
4509
+ 160132
4510
+ ]
4511
+ },
4512
+ {
4513
+ "version": 1,
4514
+ "context_length": 32768,
4515
+ "model_name": "qwen2-vl-instruct",
4516
+ "model_lang": [
4517
+ "en",
4518
+ "zh"
4519
+ ],
4520
+ "model_ability": [
4521
+ "chat",
4522
+ "vision"
4523
+ ],
4524
+ "model_description": "Qwen2-VL: To See the World More Clearly.Qwen2-VL is the latest version of the vision language models in the Qwen model familities.",
4525
+ "model_specs": [
4526
+ {
4527
+ "model_format": "pytorch",
4528
+ "model_size_in_billions": 2,
4529
+ "quantizations": [
4530
+ "none"
4531
+ ],
4532
+ "model_hub": "modelscope",
4533
+ "model_id": "qwen/Qwen2-VL-2B-Instruct",
4534
+ "model_revision": "master"
4535
+ },
4536
+ {
4537
+ "model_format": "pytorch",
4538
+ "model_size_in_billions": 7,
4539
+ "quantizations": [
4540
+ "none"
4541
+ ],
4542
+ "model_hub": "modelscope",
4543
+ "model_id": "qwen/Qwen2-VL-7B-Instruct",
4544
+ "model_revision": "master"
4545
+ }
4546
+ ],
5083
4547
  "prompt_style": {
5084
- "style_name": "NO_COLON_TWO",
5085
- "system_prompt": "You are a helpful assistant.",
4548
+ "style_name": "QWEN",
4549
+ "system_prompt": "You are a helpful assistant",
5086
4550
  "roles": [
5087
- "<_user>",
5088
- "<_bot>"
5089
- ],
5090
- "intra_message_sep": "",
5091
- "inter_message_sep": "",
5092
- "stop": [
5093
- "<_end>",
5094
- "<_start>"
5095
- ],
5096
- "stop_token_ids": [
5097
- 160133,
5098
- 160132
4551
+ "user",
4552
+ "assistant"
5099
4553
  ]
5100
4554
  }
5101
4555
  }