xinference 1.9.0__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (92) hide show
  1. xinference/_version.py +3 -3
  2. xinference/api/restful_api.py +415 -1
  3. xinference/constants.py +2 -0
  4. xinference/core/model.py +3 -4
  5. xinference/core/supervisor.py +29 -1
  6. xinference/core/worker.py +4 -1
  7. xinference/deploy/cmdline.py +2 -0
  8. xinference/deploy/test/test_cmdline.py +1 -1
  9. xinference/model/audio/core.py +5 -0
  10. xinference/model/audio/cosyvoice.py +0 -1
  11. xinference/model/audio/kokoro.py +1 -1
  12. xinference/model/audio/kokoro_zh.py +124 -0
  13. xinference/model/audio/model_spec.json +64 -20
  14. xinference/model/embedding/flag/core.py +5 -0
  15. xinference/model/embedding/llama_cpp/core.py +22 -19
  16. xinference/model/embedding/sentence_transformers/core.py +19 -4
  17. xinference/model/embedding/vllm/core.py +40 -8
  18. xinference/model/image/cache_manager.py +56 -0
  19. xinference/model/image/core.py +9 -0
  20. xinference/model/image/model_spec.json +116 -9
  21. xinference/model/image/stable_diffusion/core.py +141 -31
  22. xinference/model/llm/core.py +10 -0
  23. xinference/model/llm/llama_cpp/core.py +42 -40
  24. xinference/model/llm/llm_family.json +435 -23
  25. xinference/model/llm/llm_family.py +1 -0
  26. xinference/model/llm/mlx/core.py +52 -33
  27. xinference/model/llm/sglang/core.py +2 -44
  28. xinference/model/llm/tool_parsers/__init__.py +58 -0
  29. xinference/model/llm/tool_parsers/abstract_tool_parser.py +33 -0
  30. xinference/model/llm/tool_parsers/deepseek_r1_tool_parser.py +128 -0
  31. xinference/model/llm/tool_parsers/deepseek_v3_tool_parser.py +145 -0
  32. xinference/model/llm/tool_parsers/glm4_tool_parser.py +123 -0
  33. xinference/model/llm/tool_parsers/llama3_tool_parser.py +77 -0
  34. xinference/model/llm/tool_parsers/qwen_tool_parser.py +320 -0
  35. xinference/model/llm/transformers/core.py +6 -12
  36. xinference/model/llm/utils.py +128 -46
  37. xinference/model/llm/vllm/core.py +8 -61
  38. xinference/model/rerank/core.py +3 -0
  39. xinference/model/rerank/sentence_transformers/core.py +1 -1
  40. xinference/model/rerank/vllm/core.py +56 -6
  41. xinference/model/utils.py +1 -2
  42. xinference/model/video/model_spec.json +95 -1
  43. xinference/thirdparty/cosyvoice/bin/export_jit.py +3 -4
  44. xinference/thirdparty/cosyvoice/bin/export_onnx.py +49 -126
  45. xinference/thirdparty/cosyvoice/bin/{inference.py → inference_deprecated.py} +1 -0
  46. xinference/thirdparty/cosyvoice/bin/train.py +23 -3
  47. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +8 -4
  48. xinference/thirdparty/cosyvoice/cli/frontend.py +4 -4
  49. xinference/thirdparty/cosyvoice/cli/model.py +53 -75
  50. xinference/thirdparty/cosyvoice/dataset/dataset.py +5 -18
  51. xinference/thirdparty/cosyvoice/dataset/processor.py +24 -25
  52. xinference/thirdparty/cosyvoice/flow/decoder.py +24 -433
  53. xinference/thirdparty/cosyvoice/flow/flow.py +6 -14
  54. xinference/thirdparty/cosyvoice/flow/flow_matching.py +33 -145
  55. xinference/thirdparty/cosyvoice/hifigan/generator.py +169 -1
  56. xinference/thirdparty/cosyvoice/llm/llm.py +108 -17
  57. xinference/thirdparty/cosyvoice/transformer/upsample_encoder.py +14 -115
  58. xinference/thirdparty/cosyvoice/utils/common.py +20 -0
  59. xinference/thirdparty/cosyvoice/utils/executor.py +8 -4
  60. xinference/thirdparty/cosyvoice/utils/file_utils.py +45 -1
  61. xinference/thirdparty/cosyvoice/utils/losses.py +37 -0
  62. xinference/thirdparty/cosyvoice/utils/mask.py +35 -1
  63. xinference/thirdparty/cosyvoice/utils/train_utils.py +24 -6
  64. xinference/thirdparty/cosyvoice/vllm/cosyvoice2.py +103 -0
  65. xinference/types.py +105 -2
  66. xinference/ui/gradio/chat_interface.py +2 -0
  67. xinference/ui/gradio/media_interface.py +353 -7
  68. xinference/ui/web/ui/build/asset-manifest.json +3 -3
  69. xinference/ui/web/ui/build/index.html +1 -1
  70. xinference/ui/web/ui/build/static/js/main.1086c759.js +3 -0
  71. xinference/ui/web/ui/build/static/js/main.1086c759.js.map +1 -0
  72. xinference/ui/web/ui/node_modules/.cache/babel-loader/3c5758bd12fa334294b1de0ff6b1a4bac8d963c45472eab9dc3e530d82aa6b3f.json +1 -0
  73. xinference/ui/web/ui/node_modules/.cache/babel-loader/a3eb18af328280b139693c9092dff2a0ef8c9a967e6c8956ceee0996611f1984.json +1 -0
  74. xinference/ui/web/ui/node_modules/.cache/babel-loader/d5c224be7081f18cba1678b7874a9782eba895df004874ff8f243f94ba79942a.json +1 -0
  75. xinference/ui/web/ui/node_modules/.cache/babel-loader/f7f18bfb539b036a6a342176dd98a85df5057a884a8da978d679f2a0264883d0.json +1 -0
  76. xinference/ui/web/ui/src/locales/en.json +2 -0
  77. xinference/ui/web/ui/src/locales/ja.json +2 -0
  78. xinference/ui/web/ui/src/locales/ko.json +2 -0
  79. xinference/ui/web/ui/src/locales/zh.json +2 -0
  80. {xinference-1.9.0.dist-info → xinference-1.10.0.dist-info}/METADATA +16 -12
  81. {xinference-1.9.0.dist-info → xinference-1.10.0.dist-info}/RECORD +86 -77
  82. xinference/ui/web/ui/build/static/js/main.4918643a.js +0 -3
  83. xinference/ui/web/ui/build/static/js/main.4918643a.js.map +0 -1
  84. xinference/ui/web/ui/node_modules/.cache/babel-loader/3d2a89f0eccc1f90fc5036c9a1d587c2120e6a6b128aae31d1db7d6bad52722b.json +0 -1
  85. xinference/ui/web/ui/node_modules/.cache/babel-loader/89179f8f51887b9167721860a12412549ff04f78162e921a7b6aa6532646deb2.json +0 -1
  86. xinference/ui/web/ui/node_modules/.cache/babel-loader/8e5cb82c2ff3299c6a44563fe6b1c5515c9750613c51bb63abee0b1d70fc5019.json +0 -1
  87. xinference/ui/web/ui/node_modules/.cache/babel-loader/9dc5cfc67dd0617b0272aeef8651f1589b2155a4ff1fd72ad3166b217089b619.json +0 -1
  88. /xinference/ui/web/ui/build/static/js/{main.4918643a.js.LICENSE.txt → main.1086c759.js.LICENSE.txt} +0 -0
  89. {xinference-1.9.0.dist-info → xinference-1.10.0.dist-info}/WHEEL +0 -0
  90. {xinference-1.9.0.dist-info → xinference-1.10.0.dist-info}/entry_points.txt +0 -0
  91. {xinference-1.9.0.dist-info → xinference-1.10.0.dist-info}/licenses/LICENSE +0 -0
  92. {xinference-1.9.0.dist-info → xinference-1.10.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,77 @@
1
+ import logging
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ from . import register_tool_parser
5
+ from .abstract_tool_parser import ToolParser
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ @register_tool_parser("llama3")
11
+ class Llama3ToolParser(ToolParser):
12
+ """
13
+ Tool parser implementation for Llama3 model.
14
+
15
+ This parser handles the specific format used by Llama3 for tool calls,
16
+ which uses Python dictionary format that needs to be evaluated safely.
17
+
18
+ """
19
+
20
+ def __init__(self):
21
+ """
22
+ Initialize the Llama3 tool parser.
23
+ """
24
+ super().__init__()
25
+
26
+ def extract_tool_calls(
27
+ self, model_output: str
28
+ ) -> List[Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]]:
29
+ """
30
+ Extract tool calls from complete model output.
31
+
32
+ Parses the model output using eval() to extract tool call information.
33
+ This method expects the output to be a valid Python dictionary format.
34
+
35
+ Args:
36
+ model_output (str): The complete output string from the model.
37
+
38
+ Returns:
39
+ List[Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]]:
40
+ A list of tuples where each tuple contains:
41
+ - content (str or None): Raw content if parsing failed, None if successful
42
+ - function_name (str or None): Name of the function to call
43
+ - parameters (dict or None): Function parameters
44
+ """
45
+ try:
46
+ data = eval(model_output, {}, {})
47
+ return [(None, data["name"], data["parameters"])]
48
+ except Exception:
49
+ return [(model_output, None, None)]
50
+
51
+ def extract_tool_calls_streaming(
52
+ self, previous_text: List[str], current_text: str, delta_text: str
53
+ ) -> Optional[Any]:
54
+ """
55
+ Extract tool calls from streaming output.
56
+
57
+ Currently not supported for Llama3 model. This method raises
58
+ a ValueError indicating that streaming tool call extraction is only
59
+ available for specific model/backend combinations.
60
+
61
+ Args:
62
+ previous_text (List[str]): Previous text chunks from the stream.
63
+ current_text (str): Current accumulated text.
64
+ delta_text (str): New text delta in this chunk.
65
+
66
+ Raises:
67
+ ValueError: Always raised as streaming is not supported.
68
+
69
+ Note:
70
+ Llama3 model does not currently support streaming tool call
71
+ extraction. Use extract_tool_calls() with complete output instead.
72
+ """
73
+ raise NotImplementedError(
74
+ "Streaming support for tool calls is available only when using "
75
+ "Qwen models with vLLM backend or GLM4-chat models without vLLM backend. "
76
+ "Llama3 does not support streaming tool call extraction."
77
+ )
@@ -0,0 +1,320 @@
1
+ import json
2
+ import logging
3
+ import re
4
+ from typing import Any, Dict, List, Optional, Tuple
5
+
6
+ from . import register_tool_parser
7
+ from .abstract_tool_parser import ToolParser
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ @register_tool_parser("qwen")
13
+ class QwenToolParser(ToolParser):
14
+ """
15
+ Tool parser implementation for Qwen model.
16
+
17
+ This parser handles the specific format used by Qwen for tool calls,
18
+ which uses XML-like tags for both thinking blocks and tool calls.
19
+
20
+ """
21
+
22
+ def __init__(self):
23
+ """
24
+ Initialize the Qwen tool parser.
25
+
26
+ Sets up the XML-like tokens and regex patterns used for parsing
27
+ Qwen model outputs containing thinking blocks and tool calls.
28
+ """
29
+ super().__init__()
30
+
31
+ # Sentinel tokens for streaming mode
32
+ self.think_start_token: str = "<think>"
33
+ self.think_end_token: str = "</think>"
34
+ self.tool_call_start_token: str = "<tool_call>"
35
+ self.tool_call_end_token: str = "</tool_call>"
36
+
37
+ # Regex patterns for parsing different content types
38
+ self.think_regex = re.compile("<think>(.*?)</think>", re.DOTALL)
39
+ self.content_regex = r"(<(think|tool_call)>.*?</\2>)"
40
+ self.tool_call_complete_regex = re.compile(
41
+ r"<tool_call>(.*?)</tool_call>", re.DOTALL
42
+ )
43
+ self.tool_call_regex = re.compile(
44
+ r"<tool_call>.*?</tool_call>|<tool_call>.*?$", re.DOTALL
45
+ )
46
+
47
+ def _parse_json_function_call(
48
+ self,
49
+ function_call_str: str,
50
+ ) -> str:
51
+ """
52
+ Parse JSON function call from string.
53
+
54
+ Extracts the JSON content from tool_call XML tags.
55
+
56
+ Args:
57
+ function_call_str (str): The function call string to parse.
58
+
59
+ Returns:
60
+ str: Extracted JSON string or original string if no match found.
61
+ """
62
+ function_calls = self.tool_call_complete_regex.findall(function_call_str)
63
+ if len(function_calls) == 0:
64
+ return function_call_str
65
+ return function_calls[-1]
66
+
67
+ def _parse_json_function_call_stream(
68
+ self,
69
+ function_call_str: str,
70
+ ) -> Optional[str]:
71
+ """
72
+ Parse JSON function call from streaming string.
73
+
74
+ Extracts the JSON content from tool_call XML tags in streaming context.
75
+
76
+ Args:
77
+ function_call_str (str): The function call string to parse.
78
+
79
+ Returns:
80
+ Optional[str]: Extracted JSON string or None if no complete match found.
81
+ """
82
+ function_calls = self.tool_call_complete_regex.findall(function_call_str)
83
+ if len(function_calls) == 0:
84
+ return None
85
+ return function_calls[-1]
86
+
87
+ def is_contain_think_end_token(self, model_output: str) -> bool:
88
+ """
89
+ Check if the model output contains the think end token.
90
+
91
+ Args:
92
+ model_output (str): The model output to check.
93
+
94
+ Returns:
95
+ bool: True if think end token is present.
96
+ """
97
+ return self.think_end_token in model_output
98
+
99
+ def is_contain_think(self, model_output: str) -> bool:
100
+ """
101
+ Check if the model output contains complete thinking blocks.
102
+
103
+ Args:
104
+ model_output (str): The model output to check.
105
+
106
+ Returns:
107
+ bool: True if complete thinking blocks are present.
108
+ """
109
+ return self.think_regex.search(model_output) is not None
110
+
111
+ def is_contain_tool_call(self, model_output: str) -> bool:
112
+ """
113
+ Check if the model output contains complete tool calls.
114
+
115
+ Args:
116
+ model_output (str): The model output to check.
117
+
118
+ Returns:
119
+ bool: True if complete tool calls are present.
120
+ """
121
+ return self.tool_call_complete_regex.search(model_output) is not None
122
+
123
+ def is_contain_tool_call_start_token(self, model_output: str) -> bool:
124
+ """
125
+ Check if the model output contains the tool call start token.
126
+
127
+ Args:
128
+ model_output (str): The model output to check.
129
+
130
+ Returns:
131
+ bool: True if tool call start token is present.
132
+ """
133
+ return self.tool_call_start_token in model_output
134
+
135
+ def is_contain_tool_call_end_token(self, model_output: str) -> bool:
136
+ """
137
+ Check if the model output contains the tool call end token.
138
+
139
+ Args:
140
+ model_output (str): The model output to check.
141
+
142
+ Returns:
143
+ bool: True if tool call end token is present.
144
+ """
145
+ return self.tool_call_end_token in model_output
146
+
147
+ def _get_function_calls(self, model_output: str) -> List[str]:
148
+ """
149
+ Extract all function calls and content blocks from model output.
150
+
151
+ Parses the model output to separate thinking blocks, tool calls,
152
+ and regular content into individual components.
153
+
154
+ Args:
155
+ model_output (str): The complete model output to parse.
156
+
157
+ Returns:
158
+ List[str]: List of content blocks (text, thinking blocks, tool calls).
159
+ """
160
+ functions_calls = []
161
+ last_end = 0
162
+ for m in re.finditer(self.content_regex, model_output, re.DOTALL):
163
+ # Add any text before the current match
164
+ if m.start() > last_end:
165
+ functions_calls.append(model_output[last_end : m.start()])
166
+ # Add the matched content (think or tool_call block)
167
+ functions_calls.append(m.group(0))
168
+ last_end = m.end()
169
+ # Add any remaining text after the last match
170
+ if last_end < len(model_output):
171
+ functions_calls.append(model_output[last_end:])
172
+ return functions_calls
173
+
174
+ def _get_function_calls_streaming(self, model_output: str) -> List[str]:
175
+ """
176
+ Extract function calls from streaming model output.
177
+
178
+ Finds both complete and incomplete tool calls in streaming context.
179
+
180
+ Args:
181
+ model_output (str): The streaming model output to parse.
182
+
183
+ Returns:
184
+ List[str]: List of tool call blocks (complete or incomplete).
185
+ """
186
+ matched_ranges = self.tool_call_regex.findall(model_output)
187
+ return matched_ranges
188
+
189
+ def extract_tool_calls(
190
+ self, model_output: str
191
+ ) -> List[Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]]:
192
+ """
193
+ Extract tool calls from complete model output.
194
+
195
+ Parses the model output to find tool calls and thinking blocks,
196
+ extracting function names and arguments from JSON content within
197
+ tool_call XML tags.
198
+
199
+ Args:
200
+ model_output (str): The complete output string from the model.
201
+
202
+ Returns:
203
+ List[Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]]:
204
+ A list of tuples where each tuple contains:
205
+ - content (str or None): Raw content if parsing failed, None if successful
206
+ - function_name (str or None): Name of the function to call
207
+ - arguments (dict or None): Function arguments
208
+
209
+ Example:
210
+ >>> parser = QwenToolParser()
211
+ >>> output = '<tool_call>\n{"name": "get_weather", "arguments": {"location": "Beijing"}}\n</tool_call>'
212
+ >>> result = parser.extract_tool_calls(output)
213
+ >>> print(result)
214
+ [(None, 'get_weather', {'location': 'Beijing'})]
215
+ """
216
+ # If no tool call tokens, return original output as content
217
+ if self.tool_call_start_token not in model_output:
218
+ return [(model_output, None, None)]
219
+
220
+ try:
221
+ function_calls = self._get_function_calls(model_output)
222
+ if len(function_calls) == 0:
223
+ return [(model_output, None, None)]
224
+
225
+ results: List[
226
+ Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]
227
+ ] = []
228
+ for function_call in function_calls:
229
+ try:
230
+ parsed_json = self._parse_json_function_call(function_call)
231
+ res = json.loads(parsed_json, strict=False)
232
+ results.append((None, res["name"], res["arguments"]))
233
+ except Exception as e:
234
+ logger.error(
235
+ "Can't parse single qwen tool call output: %s. Error: %s",
236
+ function_call,
237
+ e,
238
+ )
239
+ results.append((function_call, None, None))
240
+ return results
241
+
242
+ except Exception as e:
243
+ logger.error(
244
+ "Can't parse qwen tool call output: %s. Error: %s",
245
+ model_output,
246
+ e,
247
+ )
248
+ return [(model_output, None, None)]
249
+
250
+ def _has_unclosed_tool_call(self, text: str) -> bool:
251
+ """
252
+ Check if the text has unclosed tool_call tags.
253
+
254
+ Counts the number of opening and closing tool_call tags to determine
255
+ if there are any unclosed tool calls in the text.
256
+
257
+ Args:
258
+ text (str): The text to check for unclosed tags.
259
+
260
+ Returns:
261
+ bool: True if there are unclosed tool_call tags.
262
+ """
263
+ if not text:
264
+ return True
265
+ start_count = text.count(self.tool_call_start_token)
266
+ end_count = text.count(self.tool_call_end_token)
267
+ return start_count > end_count
268
+
269
+ def extract_tool_calls_streaming(
270
+ self, previous_text: List[str], current_text: str, delta_text: str
271
+ ) -> Optional[Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]]:
272
+ """
273
+ Extract tool calls from streaming output.
274
+
275
+ Processes streaming model output to detect and extract tool calls
276
+ as they are being generated. Handles incomplete tool calls and
277
+ determines when a complete tool call is available.
278
+
279
+ Args:
280
+ previous_text (List[str]): Previous text chunks from the stream.
281
+ current_text (str): Current accumulated text.
282
+ delta_text (str): New text delta in this chunk.
283
+
284
+ Returns:
285
+ Optional[Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]]:
286
+ A tuple containing:
287
+ - content (str or None): Text content or None for tool calls
288
+ - function_name (str or None): Name of the function to call
289
+ - arguments (dict or None): Function arguments
290
+ Returns None if no complete tool call is ready.
291
+
292
+ Note:
293
+ This method is designed to work with Qwen's streaming output format
294
+ and handles partial tool calls during generation.
295
+ """
296
+ try:
297
+ # Check if current output contains tool_call start token
298
+ if self.is_contain_tool_call_start_token(current_text):
299
+ function_calls = self._get_function_calls_streaming(current_text)
300
+ # If the last function call contains thinking, it's not a tool call
301
+ if self.is_contain_think(function_calls[-1]):
302
+ return None
303
+ # If the previous round's tool_call tags are closed, this is a new tool call
304
+ if not self._has_unclosed_tool_call(previous_text[-1]):
305
+ return None
306
+ # Parse and return
307
+ function_call = self._parse_json_function_call_stream(
308
+ function_calls[-1]
309
+ )
310
+ if function_call is None:
311
+ return None
312
+ res = json.loads(function_call, strict=False)
313
+ return None, res["name"], res["arguments"]
314
+ else:
315
+ # Return delta text as regular content
316
+ return (delta_text, None, None)
317
+
318
+ except Exception as e:
319
+ logger.error("Error in Qwen streaming tool call extraction: %s", e)
320
+ raise
@@ -332,6 +332,7 @@ class PytorchModel(LLM):
332
332
  self.prepare_parse_reasoning_content(
333
333
  reasoning_content, enable_thinking=enable_thinking
334
334
  )
335
+ self.prepare_parse_tool_calls()
335
336
 
336
337
  logger.debug("Loading Transformers model with kwargs: %s", kwargs)
337
338
 
@@ -547,15 +548,13 @@ class PytorchModel(LLM):
547
548
  So we need pad `0` on the left again.
548
549
  """
549
550
  data = []
551
+ max_len = max(r.extra_kwargs["attention_mask_seq_len"] for r in reqs) + 1
550
552
  for r in reqs:
551
553
  r.extra_kwargs["attention_mask_seq_len"] += 1
554
+ real_len = r.extra_kwargs["attention_mask_seq_len"]
555
+ pad_len = max_len - real_len
556
+
552
557
  if self._tokenizer.padding_side == "left":
553
- attention_mask_seq_len = r.extra_kwargs["attention_mask_seq_len"]
554
- pad_len = seq_length - attention_mask_seq_len
555
- assert pad_len >= 0, (
556
- f"pad_len must be greater equal 0, got {pad_len} = "
557
- f"seq_length({seq_length}) - attention_mask_seq_len({attention_mask_seq_len})"
558
- )
559
558
  x = torch.cat(
560
559
  [
561
560
  (
@@ -563,14 +562,10 @@ class PytorchModel(LLM):
563
562
  if pad_len > 0
564
563
  else torch.tensor([], dtype=torch.long)
565
564
  ),
566
- torch.ones((attention_mask_seq_len,), dtype=torch.long),
565
+ torch.ones((real_len,), dtype=torch.long),
567
566
  ]
568
567
  )
569
568
  else:
570
- max_len = max(r.extra_kwargs["attention_mask_seq_len"] for r in reqs)
571
- real_len = r.extra_kwargs["attention_mask_seq_len"]
572
- pad_len = max_len - real_len
573
-
574
569
  x = torch.cat(
575
570
  [
576
571
  torch.ones((real_len,), dtype=torch.long),
@@ -989,7 +984,6 @@ class PytorchChatModel(PytorchModel, ChatModelMixin):
989
984
  self.model_family,
990
985
  self.model_uid,
991
986
  req.completion[0],
992
- self.reasoning_parser,
993
987
  )
994
988
  else:
995
989
  req.completion[0] = self._to_chat_completion(