xinference 0.16.3__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_compat.py +22 -2
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +148 -12
- xinference/client/restful/restful_client.py +47 -2
- xinference/constants.py +1 -0
- xinference/core/model.py +45 -15
- xinference/core/supervisor.py +8 -2
- xinference/core/utils.py +67 -2
- xinference/model/audio/__init__.py +12 -0
- xinference/model/audio/core.py +21 -4
- xinference/model/audio/fish_speech.py +70 -35
- xinference/model/audio/model_spec.json +81 -1
- xinference/model/audio/whisper_mlx.py +208 -0
- xinference/model/embedding/core.py +259 -4
- xinference/model/embedding/model_spec.json +1 -1
- xinference/model/embedding/model_spec_modelscope.json +1 -1
- xinference/model/image/stable_diffusion/core.py +5 -2
- xinference/model/llm/__init__.py +2 -0
- xinference/model/llm/llm_family.json +485 -6
- xinference/model/llm/llm_family_modelscope.json +519 -0
- xinference/model/llm/mlx/core.py +45 -3
- xinference/model/llm/sglang/core.py +1 -0
- xinference/model/llm/transformers/core.py +1 -0
- xinference/model/llm/transformers/glm_edge_v.py +230 -0
- xinference/model/llm/utils.py +19 -0
- xinference/model/llm/vllm/core.py +84 -2
- xinference/model/rerank/core.py +11 -4
- xinference/thirdparty/fish_speech/fish_speech/conversation.py +254 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/en_US.json +2 -1
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/es_ES.json +2 -1
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/ja_JP.json +2 -2
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/ko_KR.json +123 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/zh_CN.json +2 -1
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/llama.py +76 -11
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/firefly.py +9 -9
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/fsq.py +1 -1
- xinference/thirdparty/fish_speech/fish_speech/text/clean.py +32 -1
- xinference/thirdparty/fish_speech/fish_speech/utils/__init__.py +2 -1
- xinference/thirdparty/fish_speech/fish_speech/utils/utils.py +22 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/launch_utils.py +1 -1
- xinference/thirdparty/fish_speech/fish_speech/webui/manage.py +1 -1
- xinference/thirdparty/fish_speech/tools/api.py +578 -75
- xinference/thirdparty/fish_speech/tools/e2e_webui.py +232 -0
- xinference/thirdparty/fish_speech/tools/fish_e2e.py +298 -0
- xinference/thirdparty/fish_speech/tools/llama/generate.py +393 -9
- xinference/thirdparty/fish_speech/tools/msgpack_api.py +90 -29
- xinference/thirdparty/fish_speech/tools/post_api.py +37 -15
- xinference/thirdparty/fish_speech/tools/schema.py +187 -0
- xinference/thirdparty/fish_speech/tools/vqgan/extract_vq.py +7 -1
- xinference/thirdparty/fish_speech/tools/vqgan/inference.py +2 -3
- xinference/thirdparty/fish_speech/tools/webui.py +138 -75
- xinference/types.py +2 -1
- {xinference-0.16.3.dist-info → xinference-1.0.1.dist-info}/METADATA +30 -6
- {xinference-0.16.3.dist-info → xinference-1.0.1.dist-info}/RECORD +58 -63
- {xinference-0.16.3.dist-info → xinference-1.0.1.dist-info}/WHEEL +1 -1
- xinference/thirdparty/fish_speech/fish_speech/configs/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/configs/lora/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/datasets/protos/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/i18n/locale/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/models/vqgan/modules/__init__.py +0 -0
- xinference/thirdparty/fish_speech/fish_speech/webui/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/commons.py +0 -35
- xinference/thirdparty/fish_speech/tools/llama/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/vqgan/__init__.py +0 -0
- {xinference-0.16.3.dist-info → xinference-1.0.1.dist-info}/LICENSE +0 -0
- {xinference-0.16.3.dist-info → xinference-1.0.1.dist-info}/entry_points.txt +0 -0
- {xinference-0.16.3.dist-info → xinference-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -21,6 +21,7 @@ from typing import Dict, List, Literal, Optional, Tuple, Union, no_type_check
|
|
|
21
21
|
import numpy as np
|
|
22
22
|
import torch
|
|
23
23
|
|
|
24
|
+
from ..._compat import ROOT_KEY, ErrorWrapper, ValidationError
|
|
24
25
|
from ...device_utils import empty_cache
|
|
25
26
|
from ...types import Embedding, EmbeddingData, EmbeddingUsage
|
|
26
27
|
from ..core import CacheableModelSpec, ModelDescription
|
|
@@ -193,6 +194,27 @@ class EmbeddingModel:
|
|
|
193
194
|
device=self._device,
|
|
194
195
|
model_kwargs=model_kwargs,
|
|
195
196
|
)
|
|
197
|
+
elif (
|
|
198
|
+
self._kwargs.get("hybrid_mode")
|
|
199
|
+
and "m3" in self._model_spec.model_name.lower()
|
|
200
|
+
):
|
|
201
|
+
try:
|
|
202
|
+
from FlagEmbedding import BGEM3FlagModel
|
|
203
|
+
except ImportError:
|
|
204
|
+
error_message = "Failed to import module 'BGEM3FlagModel'"
|
|
205
|
+
installation_guide = [
|
|
206
|
+
"Please make sure 'FlagEmbedding' is installed. ",
|
|
207
|
+
"You can install it by `pip install FlagEmbedding`\n",
|
|
208
|
+
]
|
|
209
|
+
raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}")
|
|
210
|
+
|
|
211
|
+
model_kwargs = {"torch_dtype": torch_dtype} if torch_dtype else None
|
|
212
|
+
self._model = BGEM3FlagModel(
|
|
213
|
+
self._model_path,
|
|
214
|
+
device=self._device,
|
|
215
|
+
model_kwargs=model_kwargs,
|
|
216
|
+
trust_remote_code=True,
|
|
217
|
+
)
|
|
196
218
|
else:
|
|
197
219
|
model_kwargs = {"torch_dtype": torch_dtype} if torch_dtype else None
|
|
198
220
|
self._model = SentenceTransformer(
|
|
@@ -202,11 +224,192 @@ class EmbeddingModel:
|
|
|
202
224
|
trust_remote_code=True,
|
|
203
225
|
)
|
|
204
226
|
|
|
227
|
+
def _fix_langchain_openai_inputs(self, sentences: Union[str, List[str]]):
|
|
228
|
+
# Check if sentences is a two-dimensional list of integers
|
|
229
|
+
if (
|
|
230
|
+
isinstance(sentences, list)
|
|
231
|
+
and len(sentences) > 0
|
|
232
|
+
and isinstance(sentences[0], list)
|
|
233
|
+
and len(sentences[0]) > 0
|
|
234
|
+
and isinstance(sentences[0][0], int)
|
|
235
|
+
):
|
|
236
|
+
# List[List[int]] stands for encoded inputs
|
|
237
|
+
import tiktoken
|
|
238
|
+
|
|
239
|
+
enc = tiktoken.get_encoding("cl100k_base")
|
|
240
|
+
lines_decoded = []
|
|
241
|
+
|
|
242
|
+
for line in sentences:
|
|
243
|
+
try:
|
|
244
|
+
# Decode each token into bytes, then join them into a complete string
|
|
245
|
+
output = b"".join(
|
|
246
|
+
enc.decode_single_token_bytes(token) for token in line
|
|
247
|
+
)
|
|
248
|
+
# Convert the byte sequence into a UTF-8 encoded string
|
|
249
|
+
decoded_line = output.decode("utf-8")
|
|
250
|
+
lines_decoded.append(decoded_line)
|
|
251
|
+
except (ValueError, TypeError, UnicodeDecodeError) as e:
|
|
252
|
+
raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], self)
|
|
253
|
+
|
|
254
|
+
# Update sentences to be the list of decoded strings
|
|
255
|
+
if len(lines_decoded) == 1:
|
|
256
|
+
sentences = lines_decoded[0]
|
|
257
|
+
else:
|
|
258
|
+
sentences = lines_decoded
|
|
259
|
+
return sentences
|
|
260
|
+
|
|
205
261
|
def create_embedding(self, sentences: Union[str, List[str]], **kwargs):
|
|
262
|
+
sentences = self._fix_langchain_openai_inputs(sentences)
|
|
263
|
+
|
|
264
|
+
from FlagEmbedding import BGEM3FlagModel
|
|
206
265
|
from sentence_transformers import SentenceTransformer
|
|
207
266
|
|
|
208
267
|
kwargs.setdefault("normalize_embeddings", True)
|
|
209
268
|
|
|
269
|
+
@no_type_check
|
|
270
|
+
def _encode_bgem3(
|
|
271
|
+
model: Union[SentenceTransformer, BGEM3FlagModel],
|
|
272
|
+
sentences: Union[str, List[str]],
|
|
273
|
+
batch_size: int = 32,
|
|
274
|
+
show_progress_bar: bool = None,
|
|
275
|
+
output_value: str = "sparse_embedding",
|
|
276
|
+
convert_to_numpy: bool = True,
|
|
277
|
+
convert_to_tensor: bool = False,
|
|
278
|
+
device: str = None,
|
|
279
|
+
normalize_embeddings: bool = False,
|
|
280
|
+
**kwargs,
|
|
281
|
+
):
|
|
282
|
+
"""
|
|
283
|
+
Computes sentence embeddings with bge-m3 model
|
|
284
|
+
Nothing special here, just replace sentence-transformer with FlagEmbedding
|
|
285
|
+
TODO: think about how to solve the redundant code of encode method in the future
|
|
286
|
+
|
|
287
|
+
:param sentences: the sentences to embed
|
|
288
|
+
:param batch_size: the batch size used for the computation
|
|
289
|
+
:param show_progress_bar: Output a progress bar when encode sentences
|
|
290
|
+
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings. Set to None, to get all output values
|
|
291
|
+
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
|
|
292
|
+
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
|
|
293
|
+
:param device: Which torch.device to use for the computation
|
|
294
|
+
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
|
|
295
|
+
|
|
296
|
+
:return:
|
|
297
|
+
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
|
|
298
|
+
"""
|
|
299
|
+
import torch
|
|
300
|
+
from tqdm.autonotebook import trange
|
|
301
|
+
|
|
302
|
+
if show_progress_bar is None:
|
|
303
|
+
show_progress_bar = (
|
|
304
|
+
logger.getEffectiveLevel() == logging.INFO
|
|
305
|
+
or logger.getEffectiveLevel() == logging.DEBUG
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
if convert_to_tensor:
|
|
309
|
+
convert_to_numpy = False
|
|
310
|
+
|
|
311
|
+
if output_value != "sparse_embedding":
|
|
312
|
+
convert_to_tensor = False
|
|
313
|
+
convert_to_numpy = False
|
|
314
|
+
|
|
315
|
+
input_was_string = False
|
|
316
|
+
if isinstance(sentences, str) or not hasattr(
|
|
317
|
+
sentences, "__len__"
|
|
318
|
+
): # Cast an individual sentence to a list with length 1
|
|
319
|
+
sentences = [sentences]
|
|
320
|
+
input_was_string = True
|
|
321
|
+
|
|
322
|
+
if device is None:
|
|
323
|
+
# Same as SentenceTransformer.py
|
|
324
|
+
from sentence_transformers.util import get_device_name
|
|
325
|
+
|
|
326
|
+
device = get_device_name()
|
|
327
|
+
logger.info(f"Use pytorch device_name: {device}")
|
|
328
|
+
|
|
329
|
+
all_embeddings = []
|
|
330
|
+
all_token_nums = 0
|
|
331
|
+
|
|
332
|
+
# The original code does not support other inference engines
|
|
333
|
+
def _text_length(text):
|
|
334
|
+
if isinstance(text, dict): # {key: value} case
|
|
335
|
+
return len(next(iter(text.values())))
|
|
336
|
+
elif not hasattr(text, "__len__"): # Object has no len() method
|
|
337
|
+
return 1
|
|
338
|
+
elif len(text) == 0 or isinstance(
|
|
339
|
+
text[0], int
|
|
340
|
+
): # Empty string or list of ints
|
|
341
|
+
return len(text)
|
|
342
|
+
else:
|
|
343
|
+
return sum(
|
|
344
|
+
[len(t) for t in text]
|
|
345
|
+
) # Sum of length of individual strings
|
|
346
|
+
|
|
347
|
+
length_sorted_idx = np.argsort([-_text_length(sen) for sen in sentences])
|
|
348
|
+
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
|
|
349
|
+
|
|
350
|
+
for start_index in trange(
|
|
351
|
+
0,
|
|
352
|
+
len(sentences),
|
|
353
|
+
batch_size,
|
|
354
|
+
desc="Batches",
|
|
355
|
+
disable=not show_progress_bar,
|
|
356
|
+
):
|
|
357
|
+
sentences_batch = sentences_sorted[
|
|
358
|
+
start_index : start_index + batch_size
|
|
359
|
+
]
|
|
360
|
+
|
|
361
|
+
with torch.no_grad():
|
|
362
|
+
out_features = model.encode(sentences_batch, **kwargs)
|
|
363
|
+
|
|
364
|
+
if output_value == "token_embeddings":
|
|
365
|
+
embeddings = []
|
|
366
|
+
for token_emb, attention in zip(
|
|
367
|
+
out_features[output_value], out_features["attention_mask"]
|
|
368
|
+
):
|
|
369
|
+
last_mask_id = len(attention) - 1
|
|
370
|
+
while (
|
|
371
|
+
last_mask_id > 0 and attention[last_mask_id].item() == 0
|
|
372
|
+
):
|
|
373
|
+
last_mask_id -= 1
|
|
374
|
+
|
|
375
|
+
embeddings.append(token_emb[0 : last_mask_id + 1])
|
|
376
|
+
elif output_value is None: # Return all outputs
|
|
377
|
+
embeddings = []
|
|
378
|
+
for sent_idx in range(len(out_features["sentence_embedding"])):
|
|
379
|
+
row = {
|
|
380
|
+
name: out_features[name][sent_idx]
|
|
381
|
+
for name in out_features
|
|
382
|
+
}
|
|
383
|
+
embeddings.append(row)
|
|
384
|
+
# for sparse embedding
|
|
385
|
+
else:
|
|
386
|
+
if kwargs.get("return_sparse"):
|
|
387
|
+
embeddings = out_features["lexical_weights"]
|
|
388
|
+
else:
|
|
389
|
+
embeddings = out_features["dense_vecs"]
|
|
390
|
+
|
|
391
|
+
if convert_to_numpy:
|
|
392
|
+
embeddings = embeddings.cpu()
|
|
393
|
+
|
|
394
|
+
all_embeddings.extend(embeddings)
|
|
395
|
+
|
|
396
|
+
all_embeddings = [
|
|
397
|
+
all_embeddings[idx] for idx in np.argsort(length_sorted_idx)
|
|
398
|
+
]
|
|
399
|
+
|
|
400
|
+
if convert_to_tensor:
|
|
401
|
+
if len(all_embeddings):
|
|
402
|
+
all_embeddings = torch.stack(all_embeddings)
|
|
403
|
+
else:
|
|
404
|
+
all_embeddings = torch.Tensor()
|
|
405
|
+
elif convert_to_numpy:
|
|
406
|
+
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
|
|
407
|
+
|
|
408
|
+
if input_was_string:
|
|
409
|
+
all_embeddings = all_embeddings[0]
|
|
410
|
+
|
|
411
|
+
return all_embeddings, all_token_nums
|
|
412
|
+
|
|
210
413
|
# copied from sentence-transformers, and modify it to return tokens num
|
|
211
414
|
@no_type_check
|
|
212
415
|
def encode(
|
|
@@ -390,6 +593,10 @@ class EmbeddingModel:
|
|
|
390
593
|
convert_to_numpy=False,
|
|
391
594
|
**kwargs,
|
|
392
595
|
)
|
|
596
|
+
elif isinstance(self._model, BGEM3FlagModel):
|
|
597
|
+
all_embeddings, all_token_nums = _encode_bgem3(
|
|
598
|
+
self._model, sentences, convert_to_numpy=False, **kwargs
|
|
599
|
+
)
|
|
393
600
|
else:
|
|
394
601
|
all_embeddings, all_token_nums = encode(
|
|
395
602
|
self._model,
|
|
@@ -401,14 +608,30 @@ class EmbeddingModel:
|
|
|
401
608
|
all_embeddings = [all_embeddings]
|
|
402
609
|
embedding_list = []
|
|
403
610
|
for index, data in enumerate(all_embeddings):
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
611
|
+
if kwargs.get("return_sparse") and isinstance(self._model, BGEM3FlagModel):
|
|
612
|
+
embedding_list.append(
|
|
613
|
+
EmbeddingData(
|
|
614
|
+
index=index,
|
|
615
|
+
object="embedding",
|
|
616
|
+
embedding={k: float(v) for k, v in data.items()},
|
|
617
|
+
)
|
|
618
|
+
)
|
|
619
|
+
else:
|
|
620
|
+
embedding_list.append(
|
|
621
|
+
EmbeddingData(
|
|
622
|
+
index=index, object="embedding", embedding=data.tolist()
|
|
623
|
+
)
|
|
624
|
+
)
|
|
407
625
|
usage = EmbeddingUsage(
|
|
408
626
|
prompt_tokens=all_token_nums, total_tokens=all_token_nums
|
|
409
627
|
)
|
|
410
628
|
result = Embedding(
|
|
411
|
-
object=
|
|
629
|
+
object=(
|
|
630
|
+
"list" # type: ignore
|
|
631
|
+
if not isinstance(self._model, BGEM3FlagModel)
|
|
632
|
+
and not kwargs.get("return_sparse")
|
|
633
|
+
else "dict"
|
|
634
|
+
),
|
|
412
635
|
model=self._model_uid,
|
|
413
636
|
data=embedding_list,
|
|
414
637
|
usage=usage,
|
|
@@ -430,6 +653,38 @@ class EmbeddingModel:
|
|
|
430
653
|
|
|
431
654
|
return result
|
|
432
655
|
|
|
656
|
+
def convert_ids_to_tokens(
|
|
657
|
+
self,
|
|
658
|
+
batch_token_ids: Union[List[Union[int, str]], List[List[Union[int, str]]]],
|
|
659
|
+
**kwargs,
|
|
660
|
+
) -> Union[List[str]]:
|
|
661
|
+
batch_decoded_texts: List[str] = []
|
|
662
|
+
|
|
663
|
+
assert self._model is not None
|
|
664
|
+
|
|
665
|
+
if isinstance(batch_token_ids, (int, str)):
|
|
666
|
+
return self._model.tokenizer.convert_ids_to_tokens(
|
|
667
|
+
[int(str(batch_token_ids))]
|
|
668
|
+
)[0]
|
|
669
|
+
|
|
670
|
+
# check if it's a nested list
|
|
671
|
+
if (
|
|
672
|
+
isinstance(batch_token_ids, list)
|
|
673
|
+
and batch_token_ids
|
|
674
|
+
and isinstance(batch_token_ids[0], list)
|
|
675
|
+
):
|
|
676
|
+
for token_ids in batch_token_ids:
|
|
677
|
+
token_ids = [int(token_id) for token_id in token_ids]
|
|
678
|
+
batch_decoded_texts.append(
|
|
679
|
+
self._model.tokenizer.convert_ids_to_tokens(token_ids)
|
|
680
|
+
)
|
|
681
|
+
else:
|
|
682
|
+
batch_token_ids = [int(token_id) for token_id in batch_token_ids]
|
|
683
|
+
batch_decoded_texts = self._model.tokenizer.convert_ids_to_tokens(
|
|
684
|
+
batch_token_ids
|
|
685
|
+
)
|
|
686
|
+
return batch_decoded_texts
|
|
687
|
+
|
|
433
688
|
|
|
434
689
|
def match_embedding(
|
|
435
690
|
model_name: str,
|
|
@@ -17,9 +17,11 @@ import gc
|
|
|
17
17
|
import inspect
|
|
18
18
|
import itertools
|
|
19
19
|
import logging
|
|
20
|
+
import os
|
|
20
21
|
import re
|
|
21
22
|
import sys
|
|
22
23
|
import warnings
|
|
24
|
+
from glob import glob
|
|
23
25
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
|
24
26
|
|
|
25
27
|
import PIL.Image
|
|
@@ -194,8 +196,9 @@ class DiffusionModel(SDAPIDiffusionModelMixin):
|
|
|
194
196
|
if sys.platform != "darwin" and torch_dtype is None:
|
|
195
197
|
# The following params crashes on Mac M2
|
|
196
198
|
self._torch_dtype = self._kwargs["torch_dtype"] = torch.float16
|
|
197
|
-
self._kwargs["
|
|
198
|
-
|
|
199
|
+
self._kwargs["use_safetensors"] = any(
|
|
200
|
+
glob(os.path.join(self._model_path, "*/*.safetensors"))
|
|
201
|
+
)
|
|
199
202
|
if isinstance(torch_dtype, str):
|
|
200
203
|
self._kwargs["torch_dtype"] = getattr(torch, torch_dtype)
|
|
201
204
|
|
xinference/model/llm/__init__.py
CHANGED
|
@@ -143,6 +143,7 @@ def _install():
|
|
|
143
143
|
)
|
|
144
144
|
from .transformers.deepseek_vl import DeepSeekVLChatModel
|
|
145
145
|
from .transformers.glm4v import Glm4VModel
|
|
146
|
+
from .transformers.glm_edge_v import GlmEdgeVModel
|
|
146
147
|
from .transformers.intern_vl import InternVLChatModel
|
|
147
148
|
from .transformers.internlm2 import Internlm2PytorchChatModel
|
|
148
149
|
from .transformers.minicpmv25 import MiniCPMV25Model
|
|
@@ -193,6 +194,7 @@ def _install():
|
|
|
193
194
|
DeepSeekV2PytorchModel,
|
|
194
195
|
DeepSeekV2PytorchChatModel,
|
|
195
196
|
OptPytorchModel,
|
|
197
|
+
GlmEdgeVModel,
|
|
196
198
|
]
|
|
197
199
|
)
|
|
198
200
|
if OmniLMMModel: # type: ignore
|