xinference 1.7.0__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (83) hide show
  1. xinference/_version.py +3 -3
  2. xinference/api/restful_api.py +3 -4
  3. xinference/client/__init__.py +2 -0
  4. xinference/client/common.py +49 -2
  5. xinference/client/handlers.py +18 -0
  6. xinference/client/restful/async_restful_client.py +1760 -0
  7. xinference/client/restful/restful_client.py +74 -78
  8. xinference/core/media_interface.py +3 -1
  9. xinference/core/model.py +5 -4
  10. xinference/core/supervisor.py +10 -5
  11. xinference/core/worker.py +15 -14
  12. xinference/deploy/local.py +51 -9
  13. xinference/deploy/worker.py +5 -3
  14. xinference/device_utils.py +22 -3
  15. xinference/model/audio/fish_speech.py +23 -34
  16. xinference/model/audio/model_spec.json +4 -2
  17. xinference/model/audio/model_spec_modelscope.json +4 -2
  18. xinference/model/audio/utils.py +2 -2
  19. xinference/model/core.py +1 -0
  20. xinference/model/embedding/__init__.py +8 -8
  21. xinference/model/embedding/custom.py +6 -1
  22. xinference/model/embedding/embed_family.py +0 -41
  23. xinference/model/embedding/model_spec.json +10 -1
  24. xinference/model/embedding/model_spec_modelscope.json +10 -1
  25. xinference/model/embedding/sentence_transformers/core.py +30 -15
  26. xinference/model/flexible/core.py +1 -1
  27. xinference/model/flexible/launchers/__init__.py +2 -0
  28. xinference/model/flexible/launchers/image_process_launcher.py +1 -1
  29. xinference/model/flexible/launchers/modelscope_launcher.py +47 -0
  30. xinference/model/flexible/launchers/transformers_launcher.py +5 -5
  31. xinference/model/flexible/launchers/yolo_launcher.py +62 -0
  32. xinference/model/llm/__init__.py +7 -0
  33. xinference/model/llm/core.py +18 -1
  34. xinference/model/llm/llama_cpp/core.py +1 -1
  35. xinference/model/llm/llm_family.json +43 -3
  36. xinference/model/llm/llm_family.py +6 -0
  37. xinference/model/llm/llm_family_modelscope.json +45 -3
  38. xinference/model/llm/mlx/core.py +271 -18
  39. xinference/model/llm/mlx/distributed_models/__init__.py +13 -0
  40. xinference/model/llm/mlx/distributed_models/core.py +164 -0
  41. xinference/model/llm/mlx/distributed_models/deepseek_v3.py +75 -0
  42. xinference/model/llm/mlx/distributed_models/qwen2.py +82 -0
  43. xinference/model/llm/mlx/distributed_models/qwen3.py +82 -0
  44. xinference/model/llm/mlx/distributed_models/qwen3_moe.py +76 -0
  45. xinference/model/llm/reasoning_parser.py +12 -6
  46. xinference/model/llm/sglang/core.py +8 -4
  47. xinference/model/llm/transformers/chatglm.py +4 -1
  48. xinference/model/llm/transformers/core.py +4 -2
  49. xinference/model/llm/transformers/multimodal/cogagent.py +10 -4
  50. xinference/model/llm/transformers/multimodal/intern_vl.py +1 -1
  51. xinference/model/llm/utils.py +36 -17
  52. xinference/model/llm/vllm/core.py +142 -34
  53. xinference/model/llm/vllm/distributed_executor.py +96 -21
  54. xinference/model/llm/vllm/xavier/transfer.py +2 -2
  55. xinference/model/rerank/core.py +26 -9
  56. xinference/model/rerank/model_spec.json +3 -3
  57. xinference/model/rerank/model_spec_modelscope.json +3 -3
  58. xinference/web/ui/build/asset-manifest.json +3 -3
  59. xinference/web/ui/build/index.html +1 -1
  60. xinference/web/ui/build/static/js/main.9b12b7f9.js +3 -0
  61. xinference/web/ui/build/static/js/main.9b12b7f9.js.map +1 -0
  62. xinference/web/ui/node_modules/.cache/babel-loader/0fd4820d93f99509e80d8702dc3f6f8272424acab5608fa7c0e82cb1d3250a87.json +1 -0
  63. xinference/web/ui/node_modules/.cache/babel-loader/1460361af6975e63576708039f1cb732faf9c672d97c494d4055fc6331460be0.json +1 -0
  64. xinference/web/ui/node_modules/.cache/babel-loader/4efd8dda58fda83ed9546bf2f587df67f8d98e639117bee2d9326a9a1d9bebb2.json +1 -0
  65. xinference/web/ui/node_modules/.cache/babel-loader/5b2dafe5aa9e1105e0244a2b6751807342fa86aa0144b4e84d947a1686102715.json +1 -0
  66. xinference/web/ui/node_modules/.cache/babel-loader/f75545479c17fdfe2a00235fa4a0e9da1ae95e6b3caafba87ded92de6b0240e4.json +1 -0
  67. xinference/web/ui/src/locales/en.json +3 -0
  68. xinference/web/ui/src/locales/ja.json +3 -0
  69. xinference/web/ui/src/locales/ko.json +3 -0
  70. xinference/web/ui/src/locales/zh.json +3 -0
  71. {xinference-1.7.0.dist-info → xinference-1.7.1.dist-info}/METADATA +4 -3
  72. {xinference-1.7.0.dist-info → xinference-1.7.1.dist-info}/RECORD +77 -67
  73. xinference/web/ui/build/static/js/main.8a9e3ba0.js +0 -3
  74. xinference/web/ui/build/static/js/main.8a9e3ba0.js.map +0 -1
  75. xinference/web/ui/node_modules/.cache/babel-loader/26b8c9f34b0bed789b3a833767672e39302d1e0c09b4276f4d58d1df7b6bd93b.json +0 -1
  76. xinference/web/ui/node_modules/.cache/babel-loader/34cfbfb7836e136ba3261cfd411cc554bf99ba24b35dcceebeaa4f008cb3c9dc.json +0 -1
  77. xinference/web/ui/node_modules/.cache/babel-loader/c5c7c2cd1b863ce41adff2c4737bba06eef3a1acf28288cb83d992060f6b8923.json +0 -1
  78. xinference/web/ui/node_modules/.cache/babel-loader/cc97b49285d7717c63374766c789141a4329a04582ab32756d7e0e614d4c5c7f.json +0 -1
  79. /xinference/web/ui/build/static/js/{main.8a9e3ba0.js.LICENSE.txt → main.9b12b7f9.js.LICENSE.txt} +0 -0
  80. {xinference-1.7.0.dist-info → xinference-1.7.1.dist-info}/WHEEL +0 -0
  81. {xinference-1.7.0.dist-info → xinference-1.7.1.dist-info}/entry_points.txt +0 -0
  82. {xinference-1.7.0.dist-info → xinference-1.7.1.dist-info}/licenses/LICENSE +0 -0
  83. {xinference-1.7.0.dist-info → xinference-1.7.1.dist-info}/top_level.txt +0 -0
@@ -18,7 +18,7 @@ from ..core import FlexibleModel, FlexibleModelSpec
18
18
 
19
19
 
20
20
  class MockModel(FlexibleModel):
21
- def infer(self, **kwargs):
21
+ def infer(self, *args, **kwargs):
22
22
  return kwargs
23
23
 
24
24
 
@@ -27,8 +27,8 @@ class AutoModel(FlexibleModel):
27
27
  config = self.config or {}
28
28
  self._pipeline = pipeline(model=self.model_path, device=self.device, **config)
29
29
 
30
- def infer(self, **kwargs):
31
- return self._pipeline(**kwargs)
30
+ def infer(self, *args, **kwargs):
31
+ return self._pipeline(*args, **kwargs)
32
32
 
33
33
 
34
34
  class TransformersTextClassificationModel(FlexibleModel):
@@ -37,8 +37,8 @@ class TransformersTextClassificationModel(FlexibleModel):
37
37
 
38
38
  self._pipeline = pipeline(model=self._model_path, device=self._device, **config)
39
39
 
40
- def infer(self, **kwargs):
41
- return self._pipeline(**kwargs)
40
+ def infer(self, *args, **kwargs):
41
+ return self._pipeline(*args, **kwargs)
42
42
 
43
43
 
44
44
  def launcher(model_uid: str, model_spec: FlexibleModelSpec, **kwargs) -> FlexibleModel:
@@ -0,0 +1,62 @@
1
+ # Copyright 2022-2025 XProbe Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import base64
16
+ import inspect
17
+ import io
18
+ import json
19
+
20
+ import PIL.Image
21
+
22
+ from ..core import FlexibleModel, FlexibleModelSpec
23
+
24
+
25
+ class UltralyticsModel(FlexibleModel):
26
+ def load(self):
27
+ from ultralytics import YOLO
28
+
29
+ config = dict(self.config or {})
30
+ if self._device:
31
+ config["device"] = self._device
32
+
33
+ self._model = YOLO(model=self._model_path, **config)
34
+
35
+ def infer(self, *args, **kwargs):
36
+ predict_func = self._model.predict
37
+
38
+ sig = inspect.signature(predict_func)
39
+ bound_args = sig.bind_partial(*args, **kwargs) # 或 bind() 视场景选择
40
+ bound_args.apply_defaults()
41
+
42
+ if "source" in bound_args.arguments:
43
+ source = bound_args.arguments["source"]
44
+ decoded = base64.b64decode(source)
45
+ img = PIL.Image.open(io.BytesIO(decoded))
46
+
47
+ bound_args.arguments["source"] = img
48
+
49
+ results = predict_func(*bound_args.args, **bound_args.kwargs)
50
+ return [json.loads(r.to_json()) for r in results]
51
+
52
+
53
+ def launcher(model_uid: str, model_spec: FlexibleModelSpec, **kwargs) -> FlexibleModel:
54
+ device = kwargs.get("device")
55
+
56
+ model_path = model_spec.model_uri
57
+ if model_path is None:
58
+ raise ValueError("model_path required")
59
+
60
+ return UltralyticsModel(
61
+ model_uid=model_uid, model_path=model_path, device=device, config=kwargs
62
+ )
@@ -152,6 +152,13 @@ def load_model_family_from_json(json_filename, target_families):
152
152
  "stop_token_ids": model_spec.stop_token_ids,
153
153
  "stop": model_spec.stop,
154
154
  }
155
+ if model_spec.reasoning_start_tag and model_spec.reasoning_end_tag:
156
+ BUILTIN_LLM_PROMPT_STYLE[model_spec.model_name][
157
+ "reasoning_start_tag"
158
+ ] = model_spec.reasoning_start_tag
159
+ BUILTIN_LLM_PROMPT_STYLE[model_spec.model_name][
160
+ "reasoning_end_tag"
161
+ ] = model_spec.reasoning_end_tag
155
162
 
156
163
  # register model family
157
164
  if "chat" in model_spec.model_ability:
@@ -20,6 +20,7 @@ import platform
20
20
  import warnings
21
21
  from abc import abstractmethod
22
22
  from collections import defaultdict
23
+ from contextvars import ContextVar
23
24
  from functools import lru_cache
24
25
  from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
25
26
 
@@ -105,9 +106,14 @@ class LLM(abc.ABC):
105
106
  @staticmethod
106
107
  @lru_cache
107
108
  def _get_cuda_count():
109
+ from ...device_utils import get_available_device_env_name
108
110
  from ...utils import cuda_count
109
111
 
110
- cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES", None)
112
+ env_name = get_available_device_env_name()
113
+ if env_name is None:
114
+ return cuda_count()
115
+
116
+ cuda_visible_devices = os.getenv(env_name, None)
111
117
  if cuda_visible_devices is None:
112
118
  return cuda_count()
113
119
 
@@ -152,6 +158,17 @@ class LLM(abc.ABC):
152
158
  )
153
159
 
154
160
 
161
+ # Context variable for passing per-request chat context (e.g., chat_template_kwargs).
162
+ # This variable should be set at the beginning of each chat or stream_chat call.
163
+ # It allows downstream components (e.g., reasoning_parser) to access request-specific
164
+ # settings like 'enable_thinking', without requiring those values to be passed explicitly
165
+ # through every function layer.
166
+ #
167
+ # The context is automatically isolated per thread or coroutine, so concurrent requests
168
+ # will not interfere with each other.
169
+ chat_context_var: ContextVar[dict] = ContextVar("chat_context_var", default={})
170
+
171
+
155
172
  class LLMDescription(ModelDescription):
156
173
  def __init__(
157
174
  self,
@@ -162,7 +162,7 @@ class XllamaCppModel(LLM, ChatModelMixin):
162
162
  if self.model_family.chat_template:
163
163
  params.chat_template = self.model_family.chat_template
164
164
  # This is the default value, could be overwritten by _llamacpp_model_config
165
- params.n_parallel = os.cpu_count()
165
+ params.n_parallel = min(8, os.cpu_count() or 1)
166
166
  for k, v in self._llamacpp_model_config.items():
167
167
  try:
168
168
  if "." in k:
@@ -6160,7 +6160,7 @@
6160
6160
  "quantizations": [
6161
6161
  "none"
6162
6162
  ],
6163
- "model_id": "openbmb/MiniCPM4-0.5B"
6163
+ "model_id": "JunHowie/MiniCPM4-0.5B"
6164
6164
  },
6165
6165
  {
6166
6166
  "model_format": "pytorch",
@@ -6168,7 +6168,7 @@
6168
6168
  "quantizations": [
6169
6169
  "none"
6170
6170
  ],
6171
- "model_id": "openbmb/MiniCPM4-8B"
6171
+ "model_id": "JunHowie/MiniCPM4-8B"
6172
6172
  },
6173
6173
  {
6174
6174
  "model_format": "mlx",
@@ -10518,8 +10518,48 @@
10518
10518
  "packages": [
10519
10519
  "transformers>=4.51.0",
10520
10520
  "mlx-lm>=0.24.0 ; sys_platform=='darwin'",
10521
- "numpy==1.26.4"
10521
+ "#system_numpy#"
10522
10522
  ]
10523
10523
  }
10524
+ },
10525
+ {
10526
+ "version": 1,
10527
+ "context_length": 32768,
10528
+ "model_name": "qwenLong-l1",
10529
+ "model_lang": [
10530
+ "en",
10531
+ "zh"
10532
+ ],
10533
+ "model_ability": [
10534
+ "chat"
10535
+ ],
10536
+ "model_description": "QwenLong-L1: Towards Long-Context Large Reasoning Models with Reinforcement Learning",
10537
+ "model_specs": [
10538
+ {
10539
+ "model_format": "pytorch",
10540
+ "model_size_in_billions": 32,
10541
+ "quantizations": [
10542
+ "none"
10543
+ ],
10544
+ "model_id": "Tongyi-Zhiwen/QwenLong-L1-32B"
10545
+ },
10546
+ {
10547
+ "model_format": "awq",
10548
+ "model_size_in_billions": 32,
10549
+ "quantizations": [
10550
+ "Int4"
10551
+ ],
10552
+ "model_id": "Tongyi-Zhiwen/QwenLong-L1-32B-AWQ"
10553
+ }
10554
+ ],
10555
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}",
10556
+ "stop_token_ids": [
10557
+ 151643
10558
+ ],
10559
+ "stop": [
10560
+ "<|end▁of▁sentence|>"
10561
+ ],
10562
+ "reasoning_start_tag": "<think>",
10563
+ "reasoning_end_tag": "</think>"
10524
10564
  }
10525
10565
  ]
@@ -232,6 +232,12 @@ class CustomLLMFamilyV1(LLMFamilyV1):
232
232
  "stop_token_ids"
233
233
  ]
234
234
  llm_spec.stop = BUILTIN_LLM_PROMPT_STYLE[llm_spec.chat_template]["stop"]
235
+ llm_spec.reasoning_start_tag = BUILTIN_LLM_PROMPT_STYLE[
236
+ llm_spec.chat_template
237
+ ].get("reasoning_start_tag")
238
+ llm_spec.reasoning_end_tag = BUILTIN_LLM_PROMPT_STYLE[
239
+ llm_spec.chat_template
240
+ ].get("reasoning_end_tag")
235
241
  llm_spec.chat_template = BUILTIN_LLM_PROMPT_STYLE[llm_spec.chat_template][
236
242
  "chat_template"
237
243
  ]
@@ -4295,7 +4295,7 @@
4295
4295
  "quantizations": [
4296
4296
  "none"
4297
4297
  ],
4298
- "model_id": "OpenBMB/MiniCPM4-0.5B",
4298
+ "model_id": "JunHowie/MiniCPM4-0.5B",
4299
4299
  "model_hub": "modelscope"
4300
4300
  },
4301
4301
  {
@@ -4304,7 +4304,7 @@
4304
4304
  "quantizations": [
4305
4305
  "none"
4306
4306
  ],
4307
- "model_id": "OpenBMB/MiniCPM4-8B",
4307
+ "model_id": "JunHowie/MiniCPM4-8B",
4308
4308
  "model_hub": "modelscope"
4309
4309
  },
4310
4310
  {
@@ -8651,8 +8651,50 @@
8651
8651
  "packages": [
8652
8652
  "transformers>=4.51.0",
8653
8653
  "mlx-lm>=0.24.0 ; sys_platform=='darwin'",
8654
- "numpy==1.26.4"
8654
+ "#system_numpy#"
8655
8655
  ]
8656
8656
  }
8657
+ },
8658
+ {
8659
+ "version": 1,
8660
+ "context_length": 32768,
8661
+ "model_name": "qwenLong-l1",
8662
+ "model_lang": [
8663
+ "en",
8664
+ "zh"
8665
+ ],
8666
+ "model_ability": [
8667
+ "chat"
8668
+ ],
8669
+ "model_description": "QwenLong-L1: Towards Long-Context Large Reasoning Models with Reinforcement Learning",
8670
+ "model_specs": [
8671
+ {
8672
+ "model_format": "pytorch",
8673
+ "model_size_in_billions": 32,
8674
+ "quantizations": [
8675
+ "none"
8676
+ ],
8677
+ "model_id": "iic/QwenLong-L1-32B",
8678
+ "model_hub": "modelscope"
8679
+ },
8680
+ {
8681
+ "model_format": "awq",
8682
+ "model_size_in_billions": 32,
8683
+ "quantizations": [
8684
+ "Int4"
8685
+ ],
8686
+ "model_id": "iic/QwenLong-L1-32B-AWQ",
8687
+ "model_hub": "modelscope"
8688
+ }
8689
+ ],
8690
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}",
8691
+ "stop_token_ids": [
8692
+ 151643
8693
+ ],
8694
+ "stop": [
8695
+ "<|end▁of▁sentence|>"
8696
+ ],
8697
+ "reasoning_start_tag": "<think>",
8698
+ "reasoning_end_tag": "</think>"
8657
8699
  }
8658
8700
  ]