xinference 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_compat.py +1 -0
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +54 -1
- xinference/client/restful/restful_client.py +82 -2
- xinference/constants.py +3 -0
- xinference/core/chat_interface.py +297 -83
- xinference/core/model.py +24 -3
- xinference/core/progress_tracker.py +16 -8
- xinference/core/supervisor.py +51 -1
- xinference/core/worker.py +315 -47
- xinference/deploy/cmdline.py +33 -1
- xinference/model/audio/core.py +11 -1
- xinference/model/audio/megatts.py +105 -0
- xinference/model/audio/model_spec.json +24 -1
- xinference/model/audio/model_spec_modelscope.json +26 -1
- xinference/model/core.py +14 -0
- xinference/model/embedding/core.py +6 -1
- xinference/model/flexible/core.py +6 -1
- xinference/model/image/core.py +6 -1
- xinference/model/image/model_spec.json +17 -1
- xinference/model/image/model_spec_modelscope.json +17 -1
- xinference/model/llm/__init__.py +4 -6
- xinference/model/llm/core.py +5 -0
- xinference/model/llm/llama_cpp/core.py +46 -17
- xinference/model/llm/llm_family.json +530 -85
- xinference/model/llm/llm_family.py +24 -1
- xinference/model/llm/llm_family_modelscope.json +572 -1
- xinference/model/llm/mlx/core.py +16 -2
- xinference/model/llm/reasoning_parser.py +3 -3
- xinference/model/llm/sglang/core.py +111 -13
- xinference/model/llm/transformers/__init__.py +14 -0
- xinference/model/llm/transformers/core.py +31 -6
- xinference/model/llm/transformers/deepseek_vl.py +1 -1
- xinference/model/llm/transformers/deepseek_vl2.py +287 -0
- xinference/model/llm/transformers/gemma3.py +17 -2
- xinference/model/llm/transformers/intern_vl.py +28 -18
- xinference/model/llm/transformers/minicpmv26.py +21 -2
- xinference/model/llm/transformers/qwen-omni.py +308 -0
- xinference/model/llm/transformers/qwen2_audio.py +1 -1
- xinference/model/llm/transformers/qwen2_vl.py +20 -4
- xinference/model/llm/utils.py +37 -15
- xinference/model/llm/vllm/core.py +184 -8
- xinference/model/llm/vllm/distributed_executor.py +320 -0
- xinference/model/rerank/core.py +22 -12
- xinference/model/utils.py +118 -1
- xinference/model/video/core.py +6 -1
- xinference/thirdparty/deepseek_vl2/__init__.py +31 -0
- xinference/thirdparty/deepseek_vl2/models/__init__.py +26 -0
- xinference/thirdparty/deepseek_vl2/models/configuration_deepseek.py +210 -0
- xinference/thirdparty/deepseek_vl2/models/conversation.py +310 -0
- xinference/thirdparty/deepseek_vl2/models/modeling_deepseek.py +1975 -0
- xinference/thirdparty/deepseek_vl2/models/modeling_deepseek_vl_v2.py +697 -0
- xinference/thirdparty/deepseek_vl2/models/processing_deepseek_vl_v2.py +675 -0
- xinference/thirdparty/deepseek_vl2/models/siglip_vit.py +661 -0
- xinference/thirdparty/deepseek_vl2/serve/__init__.py +0 -0
- xinference/thirdparty/deepseek_vl2/serve/app_modules/__init__.py +0 -0
- xinference/thirdparty/deepseek_vl2/serve/app_modules/gradio_utils.py +83 -0
- xinference/thirdparty/deepseek_vl2/serve/app_modules/overwrites.py +81 -0
- xinference/thirdparty/deepseek_vl2/serve/app_modules/presets.py +115 -0
- xinference/thirdparty/deepseek_vl2/serve/app_modules/utils.py +333 -0
- xinference/thirdparty/deepseek_vl2/serve/assets/Kelpy-Codos.js +100 -0
- xinference/thirdparty/deepseek_vl2/serve/assets/avatar.png +0 -0
- xinference/thirdparty/deepseek_vl2/serve/assets/custom.css +355 -0
- xinference/thirdparty/deepseek_vl2/serve/assets/custom.js +22 -0
- xinference/thirdparty/deepseek_vl2/serve/assets/favicon.ico +0 -0
- xinference/thirdparty/deepseek_vl2/serve/assets/simsun.ttc +0 -0
- xinference/thirdparty/deepseek_vl2/serve/inference.py +197 -0
- xinference/thirdparty/deepseek_vl2/utils/__init__.py +18 -0
- xinference/thirdparty/deepseek_vl2/utils/io.py +80 -0
- xinference/thirdparty/megatts3/__init__.py +0 -0
- xinference/thirdparty/megatts3/tts/frontend_function.py +175 -0
- xinference/thirdparty/megatts3/tts/gradio_api.py +93 -0
- xinference/thirdparty/megatts3/tts/infer_cli.py +277 -0
- xinference/thirdparty/megatts3/tts/modules/aligner/whisper_small.py +318 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/ar_dur_predictor.py +362 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/commons/layers.py +64 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/commons/nar_tts_modules.py +73 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/commons/rel_transformer.py +403 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/commons/rot_transformer.py +649 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/commons/seq_utils.py +342 -0
- xinference/thirdparty/megatts3/tts/modules/ar_dur/commons/transformer.py +767 -0
- xinference/thirdparty/megatts3/tts/modules/llm_dit/cfm.py +309 -0
- xinference/thirdparty/megatts3/tts/modules/llm_dit/dit.py +180 -0
- xinference/thirdparty/megatts3/tts/modules/llm_dit/time_embedding.py +44 -0
- xinference/thirdparty/megatts3/tts/modules/llm_dit/transformer.py +230 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/decoder/diag_gaussian.py +67 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/decoder/hifigan_modules.py +283 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/decoder/seanet_encoder.py +38 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/decoder/wavvae_v3.py +60 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/encoder/common_modules/conv.py +154 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/encoder/common_modules/lstm.py +51 -0
- xinference/thirdparty/megatts3/tts/modules/wavvae/encoder/common_modules/seanet.py +126 -0
- xinference/thirdparty/megatts3/tts/utils/audio_utils/align.py +36 -0
- xinference/thirdparty/megatts3/tts/utils/audio_utils/io.py +95 -0
- xinference/thirdparty/megatts3/tts/utils/audio_utils/plot.py +90 -0
- xinference/thirdparty/megatts3/tts/utils/commons/ckpt_utils.py +171 -0
- xinference/thirdparty/megatts3/tts/utils/commons/hparams.py +215 -0
- xinference/thirdparty/megatts3/tts/utils/text_utils/dict.json +1 -0
- xinference/thirdparty/megatts3/tts/utils/text_utils/ph_tone_convert.py +94 -0
- xinference/thirdparty/megatts3/tts/utils/text_utils/split_text.py +90 -0
- xinference/thirdparty/megatts3/tts/utils/text_utils/text_encoder.py +280 -0
- xinference/types.py +10 -0
- xinference/utils.py +54 -0
- xinference/web/ui/build/asset-manifest.json +6 -6
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/css/main.0f6523be.css +2 -0
- xinference/web/ui/build/static/css/main.0f6523be.css.map +1 -0
- xinference/web/ui/build/static/js/main.58bd483c.js +3 -0
- xinference/web/ui/build/static/js/main.58bd483c.js.map +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/3bff8cbe9141f937f4d98879a9771b0f48e0e4e0dbee8e647adbfe23859e7048.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/4500b1a622a031011f0a291701e306b87e08cbc749c50e285103536b85b6a914.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/51709f5d3e53bcf19e613662ef9b91fb9174942c5518987a248348dd4e1e0e02.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/69081049f0c7447544b7cfd73dd13d8846c02fe5febe4d81587e95c89a412d5b.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/b8551e9775a01b28ae674125c688febe763732ea969ae344512e64ea01bf632e.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/bf2b211b0d1b6465eff512d64c869d748f803c5651a7c24e48de6ea3484a7bfe.json +1 -0
- xinference/web/ui/src/locales/en.json +2 -1
- xinference/web/ui/src/locales/zh.json +2 -1
- {xinference-1.4.0.dist-info → xinference-1.5.0.dist-info}/METADATA +128 -115
- {xinference-1.4.0.dist-info → xinference-1.5.0.dist-info}/RECORD +124 -63
- {xinference-1.4.0.dist-info → xinference-1.5.0.dist-info}/WHEEL +1 -1
- xinference/web/ui/build/static/css/main.b494ae7e.css +0 -2
- xinference/web/ui/build/static/css/main.b494ae7e.css.map +0 -1
- xinference/web/ui/build/static/js/main.3cea968e.js +0 -3
- xinference/web/ui/build/static/js/main.3cea968e.js.map +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/27bcada3ee8f89d21184b359f022fc965f350ffaca52c9814c29f1fc37121173.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/7f59e45e3f268ab8a4788b6fb024cf8dab088736dff22f5a3a39c122a83ab930.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/dcd60488509450bfff37bfff56de2c096d51de17dd00ec60d4db49c8b483ada1.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/e547bbb18abb4a474b675a8d5782d25617566bea0af8caa9b836ce5649e2250a.json +0 -1
- /xinference/web/ui/build/static/js/{main.3cea968e.js.LICENSE.txt → main.58bd483c.js.LICENSE.txt} +0 -0
- {xinference-1.4.0.dist-info → xinference-1.5.0.dist-info}/entry_points.txt +0 -0
- {xinference-1.4.0.dist-info → xinference-1.5.0.dist-info/licenses}/LICENSE +0 -0
- {xinference-1.4.0.dist-info → xinference-1.5.0.dist-info}/top_level.txt +0 -0
xinference/model/utils.py
CHANGED
|
@@ -11,17 +11,21 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
14
16
|
import json
|
|
15
17
|
import logging
|
|
16
18
|
import os
|
|
17
19
|
import random
|
|
20
|
+
import threading
|
|
18
21
|
from json import JSONDecodeError
|
|
19
22
|
from pathlib import Path
|
|
20
|
-
from typing import Any, Callable, Dict, Optional, Tuple, Union
|
|
23
|
+
from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, Union
|
|
21
24
|
|
|
22
25
|
import huggingface_hub
|
|
23
26
|
import numpy as np
|
|
24
27
|
import torch
|
|
28
|
+
from tqdm.auto import tqdm
|
|
25
29
|
|
|
26
30
|
from ..constants import (
|
|
27
31
|
XINFERENCE_CACHE_DIR,
|
|
@@ -343,3 +347,116 @@ def set_all_random_seed(seed: int):
|
|
|
343
347
|
np.random.seed(seed)
|
|
344
348
|
torch.manual_seed(seed)
|
|
345
349
|
torch.cuda.manual_seed_all(seed)
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
class CancellableDownloader:
|
|
353
|
+
def __init__(
|
|
354
|
+
self,
|
|
355
|
+
cancel_error_cls: Type[BaseException] = asyncio.CancelledError,
|
|
356
|
+
cancelled_event: Optional[threading.Event] = None,
|
|
357
|
+
):
|
|
358
|
+
self._cancelled = cancelled_event
|
|
359
|
+
if self._cancelled is None:
|
|
360
|
+
self._cancelled = threading.Event()
|
|
361
|
+
self._done_event = threading.Event()
|
|
362
|
+
self._cancel_error_cls = cancel_error_cls
|
|
363
|
+
self._original_update = None
|
|
364
|
+
# progress for tqdm that is main
|
|
365
|
+
self._main_progresses: Set[tqdm] = set()
|
|
366
|
+
# progress for file downloader
|
|
367
|
+
# mainly when tqdm unit is set
|
|
368
|
+
self._download_progresses: Set[tqdm] = set()
|
|
369
|
+
# tqdm original update
|
|
370
|
+
self._original_tqdm_update = None
|
|
371
|
+
|
|
372
|
+
def reset(self):
|
|
373
|
+
self._main_progresses.clear()
|
|
374
|
+
self._download_progresses.clear()
|
|
375
|
+
|
|
376
|
+
def get_progress(self) -> float:
|
|
377
|
+
if self.cancelled or self.done:
|
|
378
|
+
# directly return 1.0 when cancelled or finished
|
|
379
|
+
return 1.0
|
|
380
|
+
|
|
381
|
+
tasks = finished_tasks = 0
|
|
382
|
+
for main_progress in self._main_progresses:
|
|
383
|
+
tasks += main_progress.total or 0
|
|
384
|
+
finished_tasks += main_progress.n
|
|
385
|
+
|
|
386
|
+
if tasks == 0:
|
|
387
|
+
# we assumed at least 1 task
|
|
388
|
+
tasks = 1
|
|
389
|
+
|
|
390
|
+
finished_ratio = finished_tasks / tasks
|
|
391
|
+
|
|
392
|
+
all_download_progress = finished_download_progress = 0
|
|
393
|
+
for download_progress in self._download_progresses:
|
|
394
|
+
# we skip finished download
|
|
395
|
+
if download_progress.n == download_progress.total:
|
|
396
|
+
continue
|
|
397
|
+
all_download_progress += download_progress.total or (
|
|
398
|
+
download_progress.n * 10
|
|
399
|
+
)
|
|
400
|
+
finished_download_progress += download_progress.n
|
|
401
|
+
|
|
402
|
+
if all_download_progress > 0:
|
|
403
|
+
rest_ratio = (
|
|
404
|
+
(tasks - finished_tasks)
|
|
405
|
+
/ tasks
|
|
406
|
+
* (finished_download_progress / all_download_progress)
|
|
407
|
+
)
|
|
408
|
+
return finished_ratio + rest_ratio
|
|
409
|
+
else:
|
|
410
|
+
return finished_ratio
|
|
411
|
+
|
|
412
|
+
def cancel(self):
|
|
413
|
+
self._cancelled.set()
|
|
414
|
+
|
|
415
|
+
@property
|
|
416
|
+
def cancelled(self):
|
|
417
|
+
return self._cancelled.is_set()
|
|
418
|
+
|
|
419
|
+
@property
|
|
420
|
+
def done(self):
|
|
421
|
+
return self._done_event.is_set()
|
|
422
|
+
|
|
423
|
+
def wait(self, timeout: float):
|
|
424
|
+
self._done_event.wait(timeout)
|
|
425
|
+
|
|
426
|
+
def raise_error(self, error_msg: str = "Download cancelled"):
|
|
427
|
+
raise self._cancel_error_cls(error_msg)
|
|
428
|
+
|
|
429
|
+
def patch_tqdm(self):
|
|
430
|
+
# patch tqdm
|
|
431
|
+
# raise error if cancelled
|
|
432
|
+
self._original_update = original_update = tqdm.update
|
|
433
|
+
downloader = self
|
|
434
|
+
|
|
435
|
+
def patched_update(self, n):
|
|
436
|
+
if downloader.cancelled:
|
|
437
|
+
downloader.raise_error()
|
|
438
|
+
if not self.disable:
|
|
439
|
+
progresses = (
|
|
440
|
+
downloader._main_progresses
|
|
441
|
+
if getattr(self, "unit", "it") == "it"
|
|
442
|
+
else downloader._download_progresses
|
|
443
|
+
)
|
|
444
|
+
progresses.add(self)
|
|
445
|
+
return original_update(self, n)
|
|
446
|
+
|
|
447
|
+
tqdm.update = patched_update
|
|
448
|
+
|
|
449
|
+
def unpatch_tqdm(self):
|
|
450
|
+
from tqdm.auto import tqdm
|
|
451
|
+
|
|
452
|
+
if self._original_update:
|
|
453
|
+
tqdm.update = self._original_update
|
|
454
|
+
|
|
455
|
+
def __enter__(self):
|
|
456
|
+
self.patch_tqdm()
|
|
457
|
+
return self
|
|
458
|
+
|
|
459
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
460
|
+
self.unpatch_tqdm()
|
|
461
|
+
self._done_event.set()
|
|
462
|
+
self.reset()
|
xinference/model/video/core.py
CHANGED
|
@@ -17,7 +17,7 @@ from collections import defaultdict
|
|
|
17
17
|
from typing import Any, Dict, List, Literal, Optional, Tuple
|
|
18
18
|
|
|
19
19
|
from ...constants import XINFERENCE_CACHE_DIR
|
|
20
|
-
from ..core import CacheableModelSpec, ModelDescription
|
|
20
|
+
from ..core import CacheableModelSpec, ModelDescription, VirtualEnvSettings
|
|
21
21
|
from ..utils import valid_model_revision
|
|
22
22
|
from .diffusers import DiffUsersVideoModel
|
|
23
23
|
|
|
@@ -44,6 +44,7 @@ class VideoModelFamilyV1(CacheableModelSpec):
|
|
|
44
44
|
model_ability: Optional[List[str]]
|
|
45
45
|
default_model_config: Optional[Dict[str, Any]]
|
|
46
46
|
default_generate_config: Optional[Dict[str, Any]]
|
|
47
|
+
virtualenv: Optional[VirtualEnvSettings]
|
|
47
48
|
|
|
48
49
|
|
|
49
50
|
class VideoModelDescription(ModelDescription):
|
|
@@ -57,6 +58,10 @@ class VideoModelDescription(ModelDescription):
|
|
|
57
58
|
super().__init__(address, devices, model_path=model_path)
|
|
58
59
|
self._model_spec = model_spec
|
|
59
60
|
|
|
61
|
+
@property
|
|
62
|
+
def spec(self):
|
|
63
|
+
return self._model_spec
|
|
64
|
+
|
|
60
65
|
def to_dict(self):
|
|
61
66
|
return {
|
|
62
67
|
"model_type": "video",
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright (c) 2023-2024 DeepSeek.
|
|
2
|
+
#
|
|
3
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
4
|
+
# this software and associated documentation files (the "Software"), to deal in
|
|
5
|
+
# the Software without restriction, including without limitation the rights to
|
|
6
|
+
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
7
|
+
# the Software, and to permit persons to whom the Software is furnished to do so,
|
|
8
|
+
# subject to the following conditions:
|
|
9
|
+
#
|
|
10
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
11
|
+
# copies or substantial portions of the Software.
|
|
12
|
+
#
|
|
13
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
14
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
15
|
+
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
16
|
+
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
17
|
+
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
18
|
+
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# check if python version is above 3.10
|
|
22
|
+
import sys
|
|
23
|
+
|
|
24
|
+
if sys.version_info >= (3, 10):
|
|
25
|
+
print("Python version is above 3.10, patching the collections module.")
|
|
26
|
+
# Monkey patch collections
|
|
27
|
+
import collections
|
|
28
|
+
import collections.abc
|
|
29
|
+
|
|
30
|
+
for type_name in collections.abc.__all__:
|
|
31
|
+
setattr(collections, type_name, getattr(collections.abc, type_name))
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# Copyright (c) 2023-2024 DeepSeek.
|
|
2
|
+
#
|
|
3
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
4
|
+
# this software and associated documentation files (the "Software"), to deal in
|
|
5
|
+
# the Software without restriction, including without limitation the rights to
|
|
6
|
+
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
7
|
+
# the Software, and to permit persons to whom the Software is furnished to do so,
|
|
8
|
+
# subject to the following conditions:
|
|
9
|
+
#
|
|
10
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
11
|
+
# copies or substantial portions of the Software.
|
|
12
|
+
#
|
|
13
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
14
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
15
|
+
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
16
|
+
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
17
|
+
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
18
|
+
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
19
|
+
|
|
20
|
+
from .processing_deepseek_vl_v2 import DeepseekVLV2Processor
|
|
21
|
+
from .modeling_deepseek_vl_v2 import DeepseekVLV2ForCausalLM
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"DeepseekVLV2Processor",
|
|
25
|
+
"DeepseekVLV2ForCausalLM",
|
|
26
|
+
]
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
from transformers.configuration_utils import PretrainedConfig
|
|
2
|
+
from transformers.utils import logging
|
|
3
|
+
|
|
4
|
+
logger = logging.get_logger(__name__)
|
|
5
|
+
|
|
6
|
+
DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
|
7
|
+
class DeepseekV2Config(PretrainedConfig):
|
|
8
|
+
r"""
|
|
9
|
+
This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate an DeepSeek
|
|
10
|
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
|
11
|
+
defaults will yield a similar configuration to that of the DeepSeek-V2 with multi-latent attention.
|
|
12
|
+
|
|
13
|
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
|
14
|
+
documentation from [`PretrainedConfig`] for more information.
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
vocab_size (`int`, *optional*, defaults to 102400):
|
|
19
|
+
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
|
|
20
|
+
`inputs_ids` passed when calling [`DeepseekV2Model`]
|
|
21
|
+
hidden_size (`int`, *optional*, defaults to 4096):
|
|
22
|
+
Dimension of the hidden representations.
|
|
23
|
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
|
24
|
+
Dimension of the MLP representations.
|
|
25
|
+
moe_intermediate_size (`int`, *optional*, defaults to 1407):
|
|
26
|
+
Dimension of the MoE representations.
|
|
27
|
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
|
28
|
+
Number of hidden layers in the Transformer decoder.
|
|
29
|
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
|
30
|
+
Number of attention heads for each attention layer in the Transformer decoder.
|
|
31
|
+
n_shared_experts (`int`, *optional*, defaults to None):
|
|
32
|
+
Number of shared experts, None means dense model.
|
|
33
|
+
n_routed_experts (`int`, *optional*, defaults to None):
|
|
34
|
+
Number of routed experts, None means dense model.
|
|
35
|
+
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
|
|
36
|
+
Scaling factor or routed experts.
|
|
37
|
+
topk_method (`str`, *optional*, defaults to `gready`):
|
|
38
|
+
Topk method used in routed gate.
|
|
39
|
+
n_group (`int`, *optional*, defaults to None):
|
|
40
|
+
Number of groups for routed experts.
|
|
41
|
+
topk_group (`int`, *optional*, defaults to None):
|
|
42
|
+
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
|
|
43
|
+
num_experts_per_tok (`int`, *optional*, defaults to None):
|
|
44
|
+
Number of selected experts, None means dense model.
|
|
45
|
+
moe_layer_freq (`int`, *optional*, defaults to 1):
|
|
46
|
+
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
|
|
47
|
+
first_k_dense_replace (`int`, *optional*, defaults to 0):
|
|
48
|
+
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
|
|
49
|
+
\--k dense layers--/
|
|
50
|
+
norm_topk_prob (`bool`, *optional*, defaults to False):
|
|
51
|
+
Whether to normalize the weights of the routed experts.
|
|
52
|
+
scoring_func (`str`, *optional*, defaults to 'softmax'):
|
|
53
|
+
Method of computing expert weights.
|
|
54
|
+
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
|
|
55
|
+
Auxiliary loss weight coefficient.
|
|
56
|
+
seq_aux = (`bool`, *optional*, defaults to True):
|
|
57
|
+
Whether to compute the auxiliary loss for each individual sample.
|
|
58
|
+
num_key_value_heads (`int`, *optional*):
|
|
59
|
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
|
60
|
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
|
61
|
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
|
62
|
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
|
63
|
+
by meanpooling all the original heads within that group. For more details checkout [this
|
|
64
|
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
|
65
|
+
`num_attention_heads`.
|
|
66
|
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
|
67
|
+
The non-linear activation function (function or string) in the decoder.
|
|
68
|
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
|
69
|
+
The maximum sequence length that this model might ever be used with.
|
|
70
|
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
|
71
|
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
|
72
|
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
|
73
|
+
The epsilon used by the rms normalization layers.
|
|
74
|
+
use_cache (`bool`, *optional*, defaults to `True`):
|
|
75
|
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
|
76
|
+
relevant if `config.is_decoder=True`.
|
|
77
|
+
pad_token_id (`int`, *optional*):
|
|
78
|
+
Padding token id.
|
|
79
|
+
bos_token_id (`int`, *optional*, defaults to 1):
|
|
80
|
+
Beginning of stream token id.
|
|
81
|
+
eos_token_id (`int`, *optional*, defaults to 2):
|
|
82
|
+
End of stream token id.
|
|
83
|
+
pretraining_tp (`int`, *optional*, defaults to 1):
|
|
84
|
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
|
85
|
+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
|
|
86
|
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
|
87
|
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
|
88
|
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
|
89
|
+
Whether to tie weight embeddings
|
|
90
|
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
|
91
|
+
The base period of the RoPE embeddings.
|
|
92
|
+
rope_scaling (`Dict`, *optional*):
|
|
93
|
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
|
94
|
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
|
95
|
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
|
96
|
+
`max_position_embeddings` to the expected new maximum.
|
|
97
|
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
|
98
|
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
|
99
|
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
|
100
|
+
The dropout ratio for the attention probabilities.
|
|
101
|
+
use_mla (`bool`, *optional*, defaults to `True`): Use multi-latent attention or multi-head attention. If True,
|
|
102
|
+
the model will use multi-latent attention, otherwise, it will use multi-head attention.
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
>>> from transformers import DeepseekV2Model, DeepseekV2Config
|
|
106
|
+
|
|
107
|
+
>>> # Initializing a Deepseek-V2 style configuration
|
|
108
|
+
>>> configuration = DeepseekV2Config()
|
|
109
|
+
|
|
110
|
+
>>> # Accessing the model configuration
|
|
111
|
+
>>> configuration = model.config
|
|
112
|
+
```"""
|
|
113
|
+
|
|
114
|
+
model_type = "deepseek_v2"
|
|
115
|
+
keys_to_ignore_at_inference = ["past_key_values"]
|
|
116
|
+
|
|
117
|
+
def __init__(
|
|
118
|
+
self,
|
|
119
|
+
vocab_size=102400,
|
|
120
|
+
hidden_size=4096,
|
|
121
|
+
intermediate_size=11008,
|
|
122
|
+
moe_intermediate_size = 1407,
|
|
123
|
+
num_hidden_layers=30,
|
|
124
|
+
num_attention_heads=32,
|
|
125
|
+
num_key_value_heads=32,
|
|
126
|
+
n_shared_experts = None,
|
|
127
|
+
n_routed_experts = None,
|
|
128
|
+
ep_size = 1,
|
|
129
|
+
routed_scaling_factor = 1.0,
|
|
130
|
+
kv_lora_rank = 512,
|
|
131
|
+
q_lora_rank = 1536,
|
|
132
|
+
qk_rope_head_dim = 64,
|
|
133
|
+
v_head_dim = 128,
|
|
134
|
+
qk_nope_head_dim = 128,
|
|
135
|
+
topk_method = 'gready',
|
|
136
|
+
n_group = None,
|
|
137
|
+
topk_group = None,
|
|
138
|
+
num_experts_per_tok = None,
|
|
139
|
+
moe_layer_freq = 1,
|
|
140
|
+
first_k_dense_replace = 0,
|
|
141
|
+
norm_topk_prob = False,
|
|
142
|
+
scoring_func = 'softmax',
|
|
143
|
+
aux_loss_alpha = 0.001,
|
|
144
|
+
seq_aux = True,
|
|
145
|
+
hidden_act="silu",
|
|
146
|
+
max_position_embeddings=2048,
|
|
147
|
+
initializer_range=0.02,
|
|
148
|
+
rms_norm_eps=1e-6,
|
|
149
|
+
use_cache=True,
|
|
150
|
+
pad_token_id=None,
|
|
151
|
+
bos_token_id=100000,
|
|
152
|
+
eos_token_id=100001,
|
|
153
|
+
pretraining_tp=1,
|
|
154
|
+
tie_word_embeddings=False,
|
|
155
|
+
rope_theta=10000.0,
|
|
156
|
+
rope_scaling=None,
|
|
157
|
+
attention_bias=False,
|
|
158
|
+
attention_dropout=0.0,
|
|
159
|
+
use_mla=True,
|
|
160
|
+
**kwargs,
|
|
161
|
+
):
|
|
162
|
+
self.vocab_size = vocab_size
|
|
163
|
+
self.max_position_embeddings = max_position_embeddings
|
|
164
|
+
self.hidden_size = hidden_size
|
|
165
|
+
self.intermediate_size = intermediate_size
|
|
166
|
+
self.moe_intermediate_size = moe_intermediate_size
|
|
167
|
+
self.num_hidden_layers = num_hidden_layers
|
|
168
|
+
self.num_attention_heads = num_attention_heads
|
|
169
|
+
self.n_shared_experts = n_shared_experts
|
|
170
|
+
self.n_routed_experts = n_routed_experts
|
|
171
|
+
self.ep_size = ep_size
|
|
172
|
+
self.routed_scaling_factor = routed_scaling_factor
|
|
173
|
+
self.kv_lora_rank = kv_lora_rank
|
|
174
|
+
self.q_lora_rank = q_lora_rank
|
|
175
|
+
self.qk_rope_head_dim = qk_rope_head_dim
|
|
176
|
+
self.v_head_dim = v_head_dim
|
|
177
|
+
self.qk_nope_head_dim = qk_nope_head_dim
|
|
178
|
+
self.topk_method = topk_method
|
|
179
|
+
self.n_group = n_group
|
|
180
|
+
self.topk_group = topk_group
|
|
181
|
+
self.num_experts_per_tok = num_experts_per_tok
|
|
182
|
+
self.moe_layer_freq = moe_layer_freq
|
|
183
|
+
self.first_k_dense_replace = first_k_dense_replace
|
|
184
|
+
self.norm_topk_prob = norm_topk_prob
|
|
185
|
+
self.scoring_func = scoring_func
|
|
186
|
+
self.aux_loss_alpha = aux_loss_alpha
|
|
187
|
+
self.seq_aux = seq_aux
|
|
188
|
+
# for backward compatibility
|
|
189
|
+
if num_key_value_heads is None:
|
|
190
|
+
num_key_value_heads = num_attention_heads
|
|
191
|
+
|
|
192
|
+
self.num_key_value_heads = num_key_value_heads
|
|
193
|
+
self.hidden_act = hidden_act
|
|
194
|
+
self.initializer_range = initializer_range
|
|
195
|
+
self.rms_norm_eps = float(rms_norm_eps)
|
|
196
|
+
self.pretraining_tp = pretraining_tp
|
|
197
|
+
self.use_cache = use_cache
|
|
198
|
+
self.rope_theta = rope_theta
|
|
199
|
+
self.rope_scaling = rope_scaling
|
|
200
|
+
self.attention_bias = attention_bias
|
|
201
|
+
self.attention_dropout = attention_dropout
|
|
202
|
+
self.use_mla = use_mla
|
|
203
|
+
|
|
204
|
+
super().__init__(
|
|
205
|
+
pad_token_id=pad_token_id,
|
|
206
|
+
bos_token_id=bos_token_id,
|
|
207
|
+
eos_token_id=eos_token_id,
|
|
208
|
+
tie_word_embeddings=tie_word_embeddings,
|
|
209
|
+
**kwargs,
|
|
210
|
+
)
|