xinference 1.5.1__py3-none-any.whl → 1.6.0.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (96) hide show
  1. xinference/_version.py +3 -3
  2. xinference/api/restful_api.py +97 -8
  3. xinference/client/restful/restful_client.py +51 -11
  4. xinference/core/media_interface.py +758 -0
  5. xinference/core/model.py +49 -9
  6. xinference/core/worker.py +31 -37
  7. xinference/deploy/utils.py +0 -3
  8. xinference/model/audio/__init__.py +16 -27
  9. xinference/model/audio/core.py +1 -0
  10. xinference/model/audio/cosyvoice.py +4 -2
  11. xinference/model/audio/model_spec.json +20 -3
  12. xinference/model/audio/model_spec_modelscope.json +18 -1
  13. xinference/model/embedding/__init__.py +16 -24
  14. xinference/model/image/__init__.py +15 -25
  15. xinference/model/llm/__init__.py +37 -110
  16. xinference/model/llm/core.py +15 -6
  17. xinference/model/llm/llama_cpp/core.py +25 -353
  18. xinference/model/llm/llm_family.json +613 -89
  19. xinference/model/llm/llm_family.py +9 -1
  20. xinference/model/llm/llm_family_modelscope.json +540 -90
  21. xinference/model/llm/mlx/core.py +6 -3
  22. xinference/model/llm/reasoning_parser.py +281 -5
  23. xinference/model/llm/sglang/core.py +16 -3
  24. xinference/model/llm/transformers/chatglm.py +2 -2
  25. xinference/model/llm/transformers/cogagent.py +1 -1
  26. xinference/model/llm/transformers/cogvlm2.py +1 -1
  27. xinference/model/llm/transformers/core.py +9 -3
  28. xinference/model/llm/transformers/glm4v.py +1 -1
  29. xinference/model/llm/transformers/minicpmv26.py +1 -1
  30. xinference/model/llm/transformers/qwen-omni.py +6 -0
  31. xinference/model/llm/transformers/qwen_vl.py +1 -1
  32. xinference/model/llm/utils.py +68 -45
  33. xinference/model/llm/vllm/core.py +38 -18
  34. xinference/model/llm/vllm/xavier/test/test_xavier.py +1 -10
  35. xinference/model/rerank/__init__.py +13 -24
  36. xinference/model/video/__init__.py +15 -25
  37. xinference/model/video/core.py +3 -3
  38. xinference/model/video/diffusers.py +133 -16
  39. xinference/model/video/model_spec.json +54 -0
  40. xinference/model/video/model_spec_modelscope.json +56 -0
  41. xinference/thirdparty/cosyvoice/bin/average_model.py +5 -4
  42. xinference/thirdparty/cosyvoice/bin/export_jit.py +50 -20
  43. xinference/thirdparty/cosyvoice/bin/export_onnx.py +136 -51
  44. xinference/thirdparty/cosyvoice/bin/inference.py +15 -5
  45. xinference/thirdparty/cosyvoice/bin/train.py +7 -2
  46. xinference/thirdparty/cosyvoice/cli/cosyvoice.py +72 -52
  47. xinference/thirdparty/cosyvoice/cli/frontend.py +58 -58
  48. xinference/thirdparty/cosyvoice/cli/model.py +140 -155
  49. xinference/thirdparty/cosyvoice/dataset/processor.py +9 -5
  50. xinference/thirdparty/cosyvoice/flow/decoder.py +656 -54
  51. xinference/thirdparty/cosyvoice/flow/flow.py +69 -11
  52. xinference/thirdparty/cosyvoice/flow/flow_matching.py +167 -63
  53. xinference/thirdparty/cosyvoice/flow/length_regulator.py +1 -0
  54. xinference/thirdparty/cosyvoice/hifigan/discriminator.py +91 -1
  55. xinference/thirdparty/cosyvoice/hifigan/f0_predictor.py +4 -1
  56. xinference/thirdparty/cosyvoice/hifigan/generator.py +4 -1
  57. xinference/thirdparty/cosyvoice/hifigan/hifigan.py +2 -2
  58. xinference/thirdparty/cosyvoice/llm/llm.py +198 -18
  59. xinference/thirdparty/cosyvoice/transformer/embedding.py +12 -4
  60. xinference/thirdparty/cosyvoice/transformer/upsample_encoder.py +124 -21
  61. xinference/thirdparty/cosyvoice/utils/class_utils.py +13 -0
  62. xinference/thirdparty/cosyvoice/utils/common.py +1 -1
  63. xinference/thirdparty/cosyvoice/utils/file_utils.py +40 -2
  64. xinference/thirdparty/cosyvoice/utils/frontend_utils.py +7 -0
  65. xinference/thirdparty/cosyvoice/utils/mask.py +4 -0
  66. xinference/thirdparty/cosyvoice/utils/train_utils.py +5 -1
  67. xinference/thirdparty/matcha/hifigan/xutils.py +3 -3
  68. xinference/types.py +0 -71
  69. xinference/web/ui/build/asset-manifest.json +3 -3
  70. xinference/web/ui/build/index.html +1 -1
  71. xinference/web/ui/build/static/js/main.ae579a97.js +3 -0
  72. xinference/web/ui/build/static/js/main.ae579a97.js.map +1 -0
  73. xinference/web/ui/node_modules/.cache/babel-loader/0196a4b09e3264614e54360d5f832c46b31d964ec58296765ebff191ace6adbf.json +1 -0
  74. xinference/web/ui/node_modules/.cache/babel-loader/12e02ee790dbf57ead09a241a93bb5f893393aa36628ca741d44390e836a103f.json +1 -0
  75. xinference/web/ui/node_modules/.cache/babel-loader/18fa271456b31cded36c05c4c71c6b2b1cf4e4128c1e32f0e45d8b9f21764397.json +1 -0
  76. xinference/web/ui/node_modules/.cache/babel-loader/2fdc61dcb6a9d1fbcb44be592d0e87d8c3f21297a7327559ef5345665f8343f7.json +1 -0
  77. xinference/web/ui/node_modules/.cache/babel-loader/3d596a3e8dd6430d7ce81d164e32c31f8d47cfa5f725c328a298754d78563e14.json +1 -0
  78. xinference/web/ui/node_modules/.cache/babel-loader/8472e58a31720892d534f3febda31f746b25ec4aa60787eef34217b074e67965.json +1 -0
  79. xinference/web/ui/src/locales/en.json +6 -4
  80. xinference/web/ui/src/locales/zh.json +6 -4
  81. {xinference-1.5.1.dist-info → xinference-1.6.0.post1.dist-info}/METADATA +59 -39
  82. {xinference-1.5.1.dist-info → xinference-1.6.0.post1.dist-info}/RECORD +87 -87
  83. {xinference-1.5.1.dist-info → xinference-1.6.0.post1.dist-info}/WHEEL +1 -1
  84. xinference/core/image_interface.py +0 -377
  85. xinference/thirdparty/cosyvoice/bin/export_trt.sh +0 -9
  86. xinference/web/ui/build/static/js/main.91e77b5c.js +0 -3
  87. xinference/web/ui/build/static/js/main.91e77b5c.js.map +0 -1
  88. xinference/web/ui/node_modules/.cache/babel-loader/0f0adb2283a8f469d097a7a0ebb754624fa52414c83b83696c41f2e6a737ceda.json +0 -1
  89. xinference/web/ui/node_modules/.cache/babel-loader/5e6edb0fb87e3798f142e9abf8dd2dc46bab33a60d31dff525797c0c99887097.json +0 -1
  90. xinference/web/ui/node_modules/.cache/babel-loader/6087820be1bd5c02c42dff797e7df365448ef35ab26dd5d6bd33e967e05cbfd4.json +0 -1
  91. xinference/web/ui/node_modules/.cache/babel-loader/8157db83995c671eb57abc316c337f867d1dc63fb83520bb4ff351fee57dcce2.json +0 -1
  92. xinference/web/ui/node_modules/.cache/babel-loader/f04f666b77b44d7be3e16034d6b0074de2ba9c254f1fae15222b3148608fa8b3.json +0 -1
  93. /xinference/web/ui/build/static/js/{main.91e77b5c.js.LICENSE.txt → main.ae579a97.js.LICENSE.txt} +0 -0
  94. {xinference-1.5.1.dist-info → xinference-1.6.0.post1.dist-info}/entry_points.txt +0 -0
  95. {xinference-1.5.1.dist-info → xinference-1.6.0.post1.dist-info}/licenses/LICENSE +0 -0
  96. {xinference-1.5.1.dist-info → xinference-1.6.0.post1.dist-info}/top_level.txt +0 -0
@@ -1,377 +0,0 @@
1
- # Copyright 2022-2023 XProbe Inc.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import base64
16
- import io
17
- import logging
18
- import os
19
- import threading
20
- import time
21
- import uuid
22
- from typing import Dict, List, Optional, Union
23
-
24
- import gradio as gr
25
- import PIL.Image
26
- from gradio import Markdown
27
-
28
- from ..client.restful.restful_client import RESTfulImageModelHandle
29
-
30
- logger = logging.getLogger(__name__)
31
-
32
-
33
- class ImageInterface:
34
- def __init__(
35
- self,
36
- endpoint: str,
37
- model_uid: str,
38
- model_family: str,
39
- model_name: str,
40
- model_id: str,
41
- model_revision: str,
42
- model_ability: List[str],
43
- controlnet: Union[None, List[Dict[str, Union[str, None]]]],
44
- access_token: Optional[str],
45
- ):
46
- self.endpoint = endpoint
47
- self.model_uid = model_uid
48
- self.model_family = model_family
49
- self.model_name = model_name
50
- self.model_id = model_id
51
- self.model_revision = model_revision
52
- self.model_ability = model_ability
53
- self.controlnet = controlnet
54
- self.access_token = (
55
- access_token.replace("Bearer ", "") if access_token is not None else None
56
- )
57
-
58
- def build(self) -> gr.Blocks:
59
- assert "stable_diffusion" in self.model_family
60
-
61
- interface = self.build_main_interface()
62
- interface.queue()
63
- # Gradio initiates the queue during a startup event, but since the app has already been
64
- # started, that event will not run, so manually invoke the startup events.
65
- # See: https://github.com/gradio-app/gradio/issues/5228
66
- try:
67
- interface.run_startup_events()
68
- except AttributeError:
69
- # compatibility
70
- interface.startup_events()
71
- favicon_path = os.path.join(
72
- os.path.dirname(os.path.abspath(__file__)),
73
- os.path.pardir,
74
- "web",
75
- "ui",
76
- "public",
77
- "favicon.svg",
78
- )
79
- interface.favicon_path = favicon_path
80
- return interface
81
-
82
- def text2image_interface(self) -> "gr.Blocks":
83
- from ..model.image.stable_diffusion.core import SAMPLING_METHODS
84
-
85
- def text_generate_image(
86
- prompt: str,
87
- n: int,
88
- size_width: int,
89
- size_height: int,
90
- guidance_scale: int,
91
- num_inference_steps: int,
92
- negative_prompt: Optional[str] = None,
93
- sampler_name: Optional[str] = None,
94
- progress=gr.Progress(),
95
- ) -> PIL.Image.Image:
96
- from ..client import RESTfulClient
97
-
98
- client = RESTfulClient(self.endpoint)
99
- client._set_token(self.access_token)
100
- model = client.get_model(self.model_uid)
101
- assert isinstance(model, RESTfulImageModelHandle)
102
-
103
- size = f"{int(size_width)}*{int(size_height)}"
104
- guidance_scale = None if guidance_scale == -1 else guidance_scale # type: ignore
105
- num_inference_steps = (
106
- None if num_inference_steps == -1 else num_inference_steps # type: ignore
107
- )
108
- sampler_name = None if sampler_name == "default" else sampler_name
109
-
110
- response = None
111
- exc = None
112
- request_id = str(uuid.uuid4())
113
-
114
- def run_in_thread():
115
- nonlocal exc, response
116
- try:
117
- response = model.text_to_image(
118
- request_id=request_id,
119
- prompt=prompt,
120
- n=n,
121
- size=size,
122
- num_inference_steps=num_inference_steps,
123
- guidance_scale=guidance_scale,
124
- negative_prompt=negative_prompt,
125
- sampler_name=sampler_name,
126
- response_format="b64_json",
127
- )
128
- except Exception as e:
129
- exc = e
130
-
131
- t = threading.Thread(target=run_in_thread)
132
- t.start()
133
- while t.is_alive():
134
- try:
135
- cur_progress = client.get_progress(request_id)["progress"]
136
- except (KeyError, RuntimeError):
137
- cur_progress = 0.0
138
-
139
- progress(cur_progress, desc="Generating images")
140
- time.sleep(1)
141
-
142
- if exc:
143
- raise exc
144
-
145
- images = []
146
- for image_dict in response["data"]: # type: ignore
147
- assert image_dict["b64_json"] is not None
148
- image_data = base64.b64decode(image_dict["b64_json"])
149
- image = PIL.Image.open(io.BytesIO(image_data))
150
- images.append(image)
151
-
152
- return images
153
-
154
- with gr.Blocks() as text2image_vl_interface:
155
- with gr.Column():
156
- with gr.Row():
157
- with gr.Column(scale=10):
158
- prompt = gr.Textbox(
159
- label="Prompt",
160
- show_label=True,
161
- placeholder="Enter prompt here...",
162
- )
163
- negative_prompt = gr.Textbox(
164
- label="Negative prompt",
165
- show_label=True,
166
- placeholder="Enter negative prompt here...",
167
- )
168
- with gr.Column(scale=1):
169
- generate_button = gr.Button("Generate")
170
-
171
- with gr.Row():
172
- n = gr.Number(label="Number of Images", value=1)
173
- size_width = gr.Number(label="Width", value=1024)
174
- size_height = gr.Number(label="Height", value=1024)
175
- with gr.Row():
176
- guidance_scale = gr.Number(label="Guidance scale", value=-1)
177
- num_inference_steps = gr.Number(
178
- label="Inference Step Number", value=-1
179
- )
180
- sampler_name = gr.Dropdown(
181
- choices=SAMPLING_METHODS,
182
- value="default",
183
- label="Sampling method",
184
- )
185
-
186
- with gr.Column():
187
- image_output = gr.Gallery()
188
-
189
- generate_button.click(
190
- text_generate_image,
191
- inputs=[
192
- prompt,
193
- n,
194
- size_width,
195
- size_height,
196
- guidance_scale,
197
- num_inference_steps,
198
- negative_prompt,
199
- sampler_name,
200
- ],
201
- outputs=image_output,
202
- )
203
-
204
- return text2image_vl_interface
205
-
206
- def image2image_interface(self) -> "gr.Blocks":
207
- from ..model.image.stable_diffusion.core import SAMPLING_METHODS
208
-
209
- def image_generate_image(
210
- prompt: str,
211
- negative_prompt: str,
212
- image: PIL.Image.Image,
213
- n: int,
214
- size_width: int,
215
- size_height: int,
216
- num_inference_steps: int,
217
- padding_image_to_multiple: int,
218
- sampler_name: Optional[str] = None,
219
- progress=gr.Progress(),
220
- ) -> PIL.Image.Image:
221
- from ..client import RESTfulClient
222
-
223
- client = RESTfulClient(self.endpoint)
224
- client._set_token(self.access_token)
225
- model = client.get_model(self.model_uid)
226
- assert isinstance(model, RESTfulImageModelHandle)
227
-
228
- if size_width > 0 and size_height > 0:
229
- size = f"{int(size_width)}*{int(size_height)}"
230
- else:
231
- size = None
232
- num_inference_steps = (
233
- None if num_inference_steps == -1 else num_inference_steps # type: ignore
234
- )
235
- padding_image_to_multiple = None if padding_image_to_multiple == -1 else padding_image_to_multiple # type: ignore
236
- sampler_name = None if sampler_name == "default" else sampler_name
237
-
238
- bio = io.BytesIO()
239
- image.save(bio, format="png")
240
-
241
- response = None
242
- exc = None
243
- request_id = str(uuid.uuid4())
244
-
245
- def run_in_thread():
246
- nonlocal exc, response
247
- try:
248
- response = model.image_to_image(
249
- request_id=request_id,
250
- prompt=prompt,
251
- negative_prompt=negative_prompt,
252
- n=n,
253
- image=bio.getvalue(),
254
- size=size,
255
- response_format="b64_json",
256
- num_inference_steps=num_inference_steps,
257
- padding_image_to_multiple=padding_image_to_multiple,
258
- sampler_name=sampler_name,
259
- )
260
- except Exception as e:
261
- exc = e
262
-
263
- t = threading.Thread(target=run_in_thread)
264
- t.start()
265
- while t.is_alive():
266
- try:
267
- cur_progress = client.get_progress(request_id)["progress"]
268
- except (KeyError, RuntimeError):
269
- cur_progress = 0.0
270
-
271
- progress(cur_progress, desc="Generating images")
272
- time.sleep(1)
273
-
274
- if exc:
275
- raise exc
276
-
277
- images = []
278
- for image_dict in response["data"]: # type: ignore
279
- assert image_dict["b64_json"] is not None
280
- image_data = base64.b64decode(image_dict["b64_json"])
281
- image = PIL.Image.open(io.BytesIO(image_data))
282
- images.append(image)
283
-
284
- return images
285
-
286
- with gr.Blocks() as image2image_inteface:
287
- with gr.Column():
288
- with gr.Row():
289
- with gr.Column(scale=10):
290
- prompt = gr.Textbox(
291
- label="Prompt",
292
- show_label=True,
293
- placeholder="Enter prompt here...",
294
- )
295
- negative_prompt = gr.Textbox(
296
- label="Negative Prompt",
297
- show_label=True,
298
- placeholder="Enter negative prompt here...",
299
- )
300
- with gr.Column(scale=1):
301
- generate_button = gr.Button("Generate")
302
-
303
- with gr.Row():
304
- n = gr.Number(label="Number of image", value=1)
305
- size_width = gr.Number(label="Width", value=-1)
306
- size_height = gr.Number(label="Height", value=-1)
307
-
308
- with gr.Row():
309
- num_inference_steps = gr.Number(
310
- label="Inference Step Number", value=-1
311
- )
312
- padding_image_to_multiple = gr.Number(
313
- label="Padding image to multiple", value=-1
314
- )
315
- sampler_name = gr.Dropdown(
316
- choices=SAMPLING_METHODS,
317
- value="default",
318
- label="Sampling method",
319
- )
320
-
321
- with gr.Row():
322
- with gr.Column(scale=1):
323
- uploaded_image = gr.Image(type="pil", label="Upload Image")
324
- with gr.Column(scale=1):
325
- output_gallery = gr.Gallery()
326
-
327
- generate_button.click(
328
- image_generate_image,
329
- inputs=[
330
- prompt,
331
- negative_prompt,
332
- uploaded_image,
333
- n,
334
- size_width,
335
- size_height,
336
- num_inference_steps,
337
- padding_image_to_multiple,
338
- sampler_name,
339
- ],
340
- outputs=output_gallery,
341
- )
342
- return image2image_inteface
343
-
344
- def build_main_interface(self) -> "gr.Blocks":
345
- with gr.Blocks(
346
- title=f"🎨 Xinference Stable Diffusion: {self.model_name} 🎨",
347
- css="""
348
- .center{
349
- display: flex;
350
- justify-content: center;
351
- align-items: center;
352
- padding: 0px;
353
- color: #9ea4b0 !important;
354
- }
355
- """,
356
- analytics_enabled=False,
357
- ) as app:
358
- Markdown(
359
- f"""
360
- <h1 class="center" style='text-align: center; margin-bottom: 1rem'>🎨 Xinference Stable Diffusion: {self.model_name} 🎨</h1>
361
- """
362
- )
363
- Markdown(
364
- f"""
365
- <div class="center">
366
- Model ID: {self.model_uid}
367
- </div>
368
- """
369
- )
370
- if "text2image" in self.model_ability:
371
- with gr.Tab("Text to Image"):
372
- self.text2image_interface()
373
- if "image2image" in self.model_ability:
374
- with gr.Tab("Image to Image"):
375
- self.image2image_interface()
376
-
377
- return app
@@ -1,9 +0,0 @@
1
- #!/bin/bash
2
- # Copyright 2024 Alibaba Inc. All Rights Reserved.
3
- # download tensorrt from https://developer.nvidia.com/tensorrt/download/10x, check your system and cuda for compatibability
4
- # for example for linux + cuda12.4, you can download https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz
5
- TRT_DIR=<YOUR_TRT_DIR>
6
- MODEL_DIR=<COSYVOICE2_MODEL_DIR>
7
-
8
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$TRT_DIR/lib:/usr/local/cuda/lib64
9
- $TRT_DIR/bin/trtexec --onnx=$MODEL_DIR/flow.decoder.estimator.fp32.onnx --saveEngine=$MODEL_DIR/flow.decoder.estimator.fp16.mygpu.plan --fp16 --minShapes=x:2x80x4,mask:2x1x4,mu:2x80x4,cond:2x80x4 --optShapes=x:2x80x193,mask:2x1x193,mu:2x80x193,cond:2x80x193 --maxShapes=x:2x80x6800,mask:2x1x6800,mu:2x80x6800,cond:2x80x6800 --inputIOFormats=fp16:chw,fp16:chw,fp16:chw,fp16:chw,fp16:chw,fp16:chw --outputIOFormats=fp16:chw