xinference 1.7.1__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (136) hide show
  1. xinference/_version.py +3 -3
  2. xinference/client/restful/async_restful_client.py +8 -13
  3. xinference/client/restful/restful_client.py +6 -2
  4. xinference/core/chat_interface.py +6 -4
  5. xinference/core/media_interface.py +5 -0
  6. xinference/core/model.py +1 -5
  7. xinference/core/supervisor.py +117 -68
  8. xinference/core/worker.py +49 -37
  9. xinference/deploy/test/test_cmdline.py +2 -6
  10. xinference/model/audio/__init__.py +26 -23
  11. xinference/model/audio/chattts.py +3 -2
  12. xinference/model/audio/core.py +49 -98
  13. xinference/model/audio/cosyvoice.py +3 -2
  14. xinference/model/audio/custom.py +28 -73
  15. xinference/model/audio/f5tts.py +3 -2
  16. xinference/model/audio/f5tts_mlx.py +3 -2
  17. xinference/model/audio/fish_speech.py +3 -2
  18. xinference/model/audio/funasr.py +17 -4
  19. xinference/model/audio/kokoro.py +3 -2
  20. xinference/model/audio/megatts.py +3 -2
  21. xinference/model/audio/melotts.py +3 -2
  22. xinference/model/audio/model_spec.json +572 -171
  23. xinference/model/audio/utils.py +0 -6
  24. xinference/model/audio/whisper.py +3 -2
  25. xinference/model/audio/whisper_mlx.py +3 -2
  26. xinference/model/cache_manager.py +141 -0
  27. xinference/model/core.py +6 -49
  28. xinference/model/custom.py +174 -0
  29. xinference/model/embedding/__init__.py +67 -56
  30. xinference/model/embedding/cache_manager.py +35 -0
  31. xinference/model/embedding/core.py +104 -84
  32. xinference/model/embedding/custom.py +55 -78
  33. xinference/model/embedding/embed_family.py +80 -31
  34. xinference/model/embedding/flag/core.py +21 -5
  35. xinference/model/embedding/llama_cpp/__init__.py +0 -0
  36. xinference/model/embedding/llama_cpp/core.py +234 -0
  37. xinference/model/embedding/model_spec.json +968 -103
  38. xinference/model/embedding/sentence_transformers/core.py +30 -20
  39. xinference/model/embedding/vllm/core.py +11 -5
  40. xinference/model/flexible/__init__.py +8 -2
  41. xinference/model/flexible/core.py +26 -119
  42. xinference/model/flexible/custom.py +69 -0
  43. xinference/model/flexible/launchers/image_process_launcher.py +1 -0
  44. xinference/model/flexible/launchers/modelscope_launcher.py +5 -1
  45. xinference/model/flexible/launchers/transformers_launcher.py +15 -3
  46. xinference/model/flexible/launchers/yolo_launcher.py +5 -1
  47. xinference/model/image/__init__.py +20 -20
  48. xinference/model/image/cache_manager.py +62 -0
  49. xinference/model/image/core.py +70 -182
  50. xinference/model/image/custom.py +28 -72
  51. xinference/model/image/model_spec.json +402 -119
  52. xinference/model/image/ocr/got_ocr2.py +3 -2
  53. xinference/model/image/stable_diffusion/core.py +22 -7
  54. xinference/model/image/stable_diffusion/mlx.py +6 -6
  55. xinference/model/image/utils.py +2 -2
  56. xinference/model/llm/__init__.py +71 -94
  57. xinference/model/llm/cache_manager.py +292 -0
  58. xinference/model/llm/core.py +37 -111
  59. xinference/model/llm/custom.py +88 -0
  60. xinference/model/llm/llama_cpp/core.py +5 -7
  61. xinference/model/llm/llm_family.json +16260 -8151
  62. xinference/model/llm/llm_family.py +138 -839
  63. xinference/model/llm/lmdeploy/core.py +5 -7
  64. xinference/model/llm/memory.py +3 -4
  65. xinference/model/llm/mlx/core.py +6 -8
  66. xinference/model/llm/reasoning_parser.py +3 -1
  67. xinference/model/llm/sglang/core.py +32 -14
  68. xinference/model/llm/transformers/chatglm.py +3 -7
  69. xinference/model/llm/transformers/core.py +49 -27
  70. xinference/model/llm/transformers/deepseek_v2.py +2 -2
  71. xinference/model/llm/transformers/gemma3.py +2 -2
  72. xinference/model/llm/transformers/multimodal/cogagent.py +2 -2
  73. xinference/model/llm/transformers/multimodal/deepseek_vl2.py +2 -2
  74. xinference/model/llm/transformers/multimodal/gemma3.py +2 -2
  75. xinference/model/llm/transformers/multimodal/glm4_1v.py +167 -0
  76. xinference/model/llm/transformers/multimodal/glm4v.py +2 -2
  77. xinference/model/llm/transformers/multimodal/intern_vl.py +2 -2
  78. xinference/model/llm/transformers/multimodal/minicpmv26.py +3 -3
  79. xinference/model/llm/transformers/multimodal/ovis2.py +2 -2
  80. xinference/model/llm/transformers/multimodal/qwen-omni.py +2 -2
  81. xinference/model/llm/transformers/multimodal/qwen2_audio.py +2 -2
  82. xinference/model/llm/transformers/multimodal/qwen2_vl.py +2 -2
  83. xinference/model/llm/transformers/opt.py +3 -7
  84. xinference/model/llm/utils.py +34 -49
  85. xinference/model/llm/vllm/core.py +77 -27
  86. xinference/model/llm/vllm/xavier/engine.py +5 -3
  87. xinference/model/llm/vllm/xavier/scheduler.py +10 -6
  88. xinference/model/llm/vllm/xavier/transfer.py +1 -1
  89. xinference/model/rerank/__init__.py +26 -25
  90. xinference/model/rerank/core.py +47 -87
  91. xinference/model/rerank/custom.py +25 -71
  92. xinference/model/rerank/model_spec.json +158 -33
  93. xinference/model/rerank/utils.py +2 -2
  94. xinference/model/utils.py +115 -54
  95. xinference/model/video/__init__.py +13 -17
  96. xinference/model/video/core.py +44 -102
  97. xinference/model/video/diffusers.py +4 -3
  98. xinference/model/video/model_spec.json +90 -21
  99. xinference/types.py +5 -3
  100. xinference/web/ui/build/asset-manifest.json +3 -3
  101. xinference/web/ui/build/index.html +1 -1
  102. xinference/web/ui/build/static/js/main.7d24df53.js +3 -0
  103. xinference/web/ui/build/static/js/main.7d24df53.js.map +1 -0
  104. xinference/web/ui/node_modules/.cache/babel-loader/2704ff66a5f73ca78b341eb3edec60154369df9d87fbc8c6dd60121abc5e1b0a.json +1 -0
  105. xinference/web/ui/node_modules/.cache/babel-loader/607dfef23d33e6b594518c0c6434567639f24f356b877c80c60575184ec50ed0.json +1 -0
  106. xinference/web/ui/node_modules/.cache/babel-loader/9be3d56173aacc3efd0b497bcb13c4f6365de30069176ee9403b40e717542326.json +1 -0
  107. xinference/web/ui/node_modules/.cache/babel-loader/9f9dd6c32c78a222d07da5987ae902effe16bcf20aac00774acdccc4de3c9ff2.json +1 -0
  108. xinference/web/ui/node_modules/.cache/babel-loader/b2ab5ee972c60d15eb9abf5845705f8ab7e1d125d324d9a9b1bcae5d6fd7ffb2.json +1 -0
  109. xinference/web/ui/src/locales/en.json +0 -1
  110. xinference/web/ui/src/locales/ja.json +0 -1
  111. xinference/web/ui/src/locales/ko.json +0 -1
  112. xinference/web/ui/src/locales/zh.json +0 -1
  113. {xinference-1.7.1.dist-info → xinference-1.8.0.dist-info}/METADATA +9 -11
  114. {xinference-1.7.1.dist-info → xinference-1.8.0.dist-info}/RECORD +119 -119
  115. xinference/model/audio/model_spec_modelscope.json +0 -231
  116. xinference/model/embedding/model_spec_modelscope.json +0 -293
  117. xinference/model/embedding/utils.py +0 -18
  118. xinference/model/image/model_spec_modelscope.json +0 -375
  119. xinference/model/llm/llama_cpp/memory.py +0 -457
  120. xinference/model/llm/llm_family_csghub.json +0 -56
  121. xinference/model/llm/llm_family_modelscope.json +0 -8700
  122. xinference/model/llm/llm_family_openmind_hub.json +0 -1019
  123. xinference/model/rerank/model_spec_modelscope.json +0 -85
  124. xinference/model/video/model_spec_modelscope.json +0 -184
  125. xinference/web/ui/build/static/js/main.9b12b7f9.js +0 -3
  126. xinference/web/ui/build/static/js/main.9b12b7f9.js.map +0 -1
  127. xinference/web/ui/node_modules/.cache/babel-loader/1460361af6975e63576708039f1cb732faf9c672d97c494d4055fc6331460be0.json +0 -1
  128. xinference/web/ui/node_modules/.cache/babel-loader/4efd8dda58fda83ed9546bf2f587df67f8d98e639117bee2d9326a9a1d9bebb2.json +0 -1
  129. xinference/web/ui/node_modules/.cache/babel-loader/55b9fb40b57fa926e8f05f31c2f96467e76e5ad62f033dca97c03f9e8c4eb4fe.json +0 -1
  130. xinference/web/ui/node_modules/.cache/babel-loader/5b2dafe5aa9e1105e0244a2b6751807342fa86aa0144b4e84d947a1686102715.json +0 -1
  131. xinference/web/ui/node_modules/.cache/babel-loader/611fa2c6c53b66039991d06dfb0473b5ab37fc63b4564e0f6e1718523768a045.json +0 -1
  132. /xinference/web/ui/build/static/js/{main.9b12b7f9.js.LICENSE.txt → main.7d24df53.js.LICENSE.txt} +0 -0
  133. {xinference-1.7.1.dist-info → xinference-1.8.0.dist-info}/WHEEL +0 -0
  134. {xinference-1.7.1.dist-info → xinference-1.8.0.dist-info}/entry_points.txt +0 -0
  135. {xinference-1.7.1.dist-info → xinference-1.8.0.dist-info}/licenses/LICENSE +0 -0
  136. {xinference-1.7.1.dist-info → xinference-1.8.0.dist-info}/top_level.txt +0 -0
@@ -12,21 +12,17 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import logging
15
- import os
16
15
  from collections import defaultdict
17
- from typing import Any, Dict, List, Literal, Optional, Tuple
16
+ from typing import Any, Dict, List, Literal, Optional
18
17
 
19
- from ...constants import XINFERENCE_CACHE_DIR
20
- from ..core import CacheableModelSpec, ModelDescription, VirtualEnvSettings
21
- from ..utils import valid_model_revision
18
+ from ..core import CacheableModelSpec, VirtualEnvSettings
19
+ from ..utils import ModelInstanceInfoMixin
22
20
  from .diffusers import DiffusersVideoModel
23
21
 
24
22
  logger = logging.getLogger(__name__)
25
23
 
26
- MODEL_NAME_TO_REVISION: Dict[str, List[str]] = defaultdict(list)
27
24
  VIDEO_MODEL_DESCRIPTIONS: Dict[str, List[Dict]] = defaultdict(list)
28
- BUILTIN_VIDEO_MODELS: Dict[str, "VideoModelFamilyV1"] = {}
29
- MODELSCOPE_VIDEO_MODELS: Dict[str, "VideoModelFamilyV1"] = {}
25
+ BUILTIN_VIDEO_MODELS: Dict[str, List["VideoModelFamilyV2"]] = {}
30
26
 
31
27
 
32
28
  def get_video_model_descriptions():
@@ -35,7 +31,8 @@ def get_video_model_descriptions():
35
31
  return copy.deepcopy(VIDEO_MODEL_DESCRIPTIONS)
36
32
 
37
33
 
38
- class VideoModelFamilyV1(CacheableModelSpec):
34
+ class VideoModelFamilyV2(CacheableModelSpec, ModelInstanceInfoMixin):
35
+ version: Literal[2]
39
36
  model_family: str
40
37
  model_name: str
41
38
  model_id: str
@@ -46,57 +43,37 @@ class VideoModelFamilyV1(CacheableModelSpec):
46
43
  default_generate_config: Optional[Dict[str, Any]]
47
44
  virtualenv: Optional[VirtualEnvSettings]
48
45
 
46
+ class Config:
47
+ extra = "allow"
49
48
 
50
- class VideoModelDescription(ModelDescription):
51
- def __init__(
52
- self,
53
- address: Optional[str],
54
- devices: Optional[List[str]],
55
- model_spec: VideoModelFamilyV1,
56
- model_path: Optional[str] = None,
57
- ):
58
- super().__init__(address, devices, model_path=model_path)
59
- self._model_spec = model_spec
60
-
61
- @property
62
- def spec(self):
63
- return self._model_spec
64
-
65
- def to_dict(self):
49
+ def to_description(self):
66
50
  return {
67
51
  "model_type": "video",
68
- "address": self.address,
69
- "accelerators": self.devices,
70
- "model_name": self._model_spec.model_name,
71
- "model_family": self._model_spec.model_family,
72
- "model_revision": self._model_spec.model_revision,
73
- "model_ability": self._model_spec.model_ability,
52
+ "address": getattr(self, "address", None),
53
+ "accelerators": getattr(self, "accelerators", None),
54
+ "model_name": self.model_name,
55
+ "model_family": self.model_family,
56
+ "model_revision": self.model_revision,
57
+ "model_ability": self.model_ability,
74
58
  }
75
59
 
76
60
  def to_version_info(self):
77
- if self._model_path is None:
78
- is_cached = get_cache_status(self._model_spec)
79
- file_location = get_cache_dir(self._model_spec)
80
- else:
81
- is_cached = True
82
- file_location = self._model_path
61
+ from ..cache_manager import CacheManager
83
62
 
84
- return [
85
- {
86
- "model_version": self._model_spec.model_name,
87
- "model_file_location": file_location,
88
- "cache_status": is_cached,
89
- }
90
- ]
63
+ cache_manager = CacheManager(self)
64
+
65
+ return {
66
+ "model_version": self.model_name,
67
+ "model_file_location": cache_manager.get_cache_dir(),
68
+ "cache_status": cache_manager.get_cache_status(),
69
+ }
91
70
 
92
71
 
93
72
  def generate_video_description(
94
- video_model: VideoModelFamilyV1,
73
+ video_model: VideoModelFamilyV2,
95
74
  ) -> Dict[str, List[Dict]]:
96
75
  res = defaultdict(list)
97
- res[video_model.model_name].extend(
98
- VideoModelDescription(None, None, video_model).to_version_info()
99
- )
76
+ res[video_model.model_name].append(video_model.to_version_info())
100
77
  return res
101
78
 
102
79
 
@@ -105,22 +82,19 @@ def match_diffusion(
105
82
  download_hub: Optional[
106
83
  Literal["huggingface", "modelscope", "openmind_hub", "csghub"]
107
84
  ] = None,
108
- ) -> VideoModelFamilyV1:
85
+ ) -> VideoModelFamilyV2:
109
86
  from ..utils import download_from_modelscope
110
- from . import BUILTIN_VIDEO_MODELS, MODELSCOPE_VIDEO_MODELS
111
-
112
- if download_hub == "modelscope" and model_name in MODELSCOPE_VIDEO_MODELS:
113
- logger.debug(f"Video model {model_name} found in ModelScope.")
114
- return MODELSCOPE_VIDEO_MODELS[model_name]
115
- elif download_hub == "huggingface" and model_name in BUILTIN_VIDEO_MODELS:
116
- logger.debug(f"Video model {model_name} found in Huggingface.")
117
- return BUILTIN_VIDEO_MODELS[model_name]
118
- elif download_from_modelscope() and model_name in MODELSCOPE_VIDEO_MODELS:
119
- logger.debug(f"Video model {model_name} found in ModelScope.")
120
- return MODELSCOPE_VIDEO_MODELS[model_name]
121
- elif model_name in BUILTIN_VIDEO_MODELS:
122
- logger.debug(f"Video model {model_name} found in Huggingface.")
123
- return BUILTIN_VIDEO_MODELS[model_name]
87
+ from . import BUILTIN_VIDEO_MODELS
88
+
89
+ if model_name in BUILTIN_VIDEO_MODELS:
90
+ model_families = BUILTIN_VIDEO_MODELS[model_name]
91
+ if download_hub == "modelscope" or download_from_modelscope():
92
+ return (
93
+ [x for x in model_families if x.model_hub == "modelscope"]
94
+ + [x for x in model_families if x.model_hub == "huggingface"]
95
+ )[0]
96
+ else:
97
+ return [x for x in model_families if x.model_hub == "huggingface"][0]
124
98
  else:
125
99
  raise ValueError(
126
100
  f"Video model {model_name} not found, available"
@@ -128,40 +102,7 @@ def match_diffusion(
128
102
  )
129
103
 
130
104
 
131
- def cache(model_spec: VideoModelFamilyV1):
132
- from ..utils import cache
133
-
134
- return cache(model_spec, VideoModelDescription)
135
-
136
-
137
- def get_cache_dir(model_spec: VideoModelFamilyV1):
138
- return os.path.realpath(os.path.join(XINFERENCE_CACHE_DIR, model_spec.model_name))
139
-
140
-
141
- def get_cache_status(
142
- model_spec: VideoModelFamilyV1,
143
- ) -> bool:
144
- cache_dir = get_cache_dir(model_spec)
145
- meta_path = os.path.join(cache_dir, "__valid_download")
146
-
147
- model_name = model_spec.model_name
148
- if model_name in BUILTIN_VIDEO_MODELS and model_name in MODELSCOPE_VIDEO_MODELS:
149
- hf_spec = BUILTIN_VIDEO_MODELS[model_name]
150
- ms_spec = MODELSCOPE_VIDEO_MODELS[model_name]
151
-
152
- return any(
153
- [
154
- valid_model_revision(meta_path, hf_spec.model_revision),
155
- valid_model_revision(meta_path, ms_spec.model_revision),
156
- ]
157
- )
158
- else: # Usually for UT
159
- return valid_model_revision(meta_path, model_spec.model_revision)
160
-
161
-
162
105
  def create_video_model_instance(
163
- subpool_addr: str,
164
- devices: List[str],
165
106
  model_uid: str,
166
107
  model_name: str,
167
108
  download_hub: Optional[
@@ -169,10 +110,14 @@ def create_video_model_instance(
169
110
  ] = None,
170
111
  model_path: Optional[str] = None,
171
112
  **kwargs,
172
- ) -> Tuple[DiffusersVideoModel, VideoModelDescription]:
113
+ ) -> DiffusersVideoModel:
114
+ from ..cache_manager import CacheManager
115
+
173
116
  model_spec = match_diffusion(model_name, download_hub)
117
+
174
118
  if not model_path:
175
- model_path = cache(model_spec)
119
+ cache_manager = CacheManager(model_spec)
120
+ model_path = cache_manager.cache()
176
121
  assert model_path is not None
177
122
 
178
123
  model = DiffusersVideoModel(
@@ -181,7 +126,4 @@ def create_video_model_instance(
181
126
  model_spec,
182
127
  **kwargs,
183
128
  )
184
- model_description = VideoModelDescription(
185
- subpool_addr, devices, model_spec, model_path=model_path
186
- )
187
- return model, model_description
129
+ return model
@@ -30,8 +30,8 @@ from ...device_utils import gpu_count, move_model_to_available_device
30
30
  from ...types import Video, VideoList
31
31
 
32
32
  if TYPE_CHECKING:
33
- from ....core.progress_tracker import Progressor
34
- from .core import VideoModelFamilyV1
33
+ from ...core.progress_tracker import Progressor
34
+ from .core import VideoModelFamilyV2
35
35
 
36
36
 
37
37
  logger = logging.getLogger(__name__)
@@ -60,9 +60,10 @@ class DiffusersVideoModel:
60
60
  self,
61
61
  model_uid: str,
62
62
  model_path: str,
63
- model_spec: "VideoModelFamilyV1",
63
+ model_spec: "VideoModelFamilyV2",
64
64
  **kwargs,
65
65
  ):
66
+ self.model_family = model_spec
66
67
  self._model_uid = model_uid
67
68
  self._model_path = model_path
68
69
  self._model_spec = model_spec
@@ -1,9 +1,8 @@
1
1
  [
2
2
  {
3
+ "version": 2,
3
4
  "model_name": "CogVideoX-2b",
4
5
  "model_family": "CogVideoX",
5
- "model_id": "THUDM/CogVideoX-2b",
6
- "model_revision": "4bbfb1de622b80bc1b77b6e9aced75f816be0e38",
7
6
  "model_ability": [
8
7
  "text2video"
9
8
  ],
@@ -13,13 +12,22 @@
13
12
  },
14
13
  "default_generate_config": {
15
14
  "guidance_scale": 6
15
+ },
16
+ "model_src": {
17
+ "huggingface": {
18
+ "model_id": "THUDM/CogVideoX-2b",
19
+ "model_revision": "4bbfb1de622b80bc1b77b6e9aced75f816be0e38"
20
+ },
21
+ "modelscope": {
22
+ "model_id": "ZhipuAI/CogVideoX-2b",
23
+ "model_revision": "master"
24
+ }
16
25
  }
17
26
  },
18
27
  {
28
+ "version": 2,
19
29
  "model_name": "CogVideoX-5b",
20
30
  "model_family": "CogVideoX",
21
- "model_id": "THUDM/CogVideoX-5b",
22
- "model_revision": "8d6ea3f817438460b25595a120f109b88d5fdfad",
23
31
  "model_ability": [
24
32
  "text2video"
25
33
  ],
@@ -29,13 +37,22 @@
29
37
  },
30
38
  "default_generate_config": {
31
39
  "guidance_scale": 7
40
+ },
41
+ "model_src": {
42
+ "huggingface": {
43
+ "model_id": "THUDM/CogVideoX-5b",
44
+ "model_revision": "8d6ea3f817438460b25595a120f109b88d5fdfad"
45
+ },
46
+ "modelscope": {
47
+ "model_id": "ZhipuAI/CogVideoX-5b",
48
+ "model_revision": "master"
49
+ }
32
50
  }
33
51
  },
34
52
  {
53
+ "version": 2,
35
54
  "model_name": "HunyuanVideo",
36
55
  "model_family": "HunyuanVideo",
37
- "model_id": "hunyuanvideo-community/HunyuanVideo",
38
- "model_revision": "e8c2aaa66fe3742a32c11a6766aecbf07c56e773",
39
56
  "model_ability": [
40
57
  "text2video"
41
58
  ],
@@ -43,22 +60,29 @@
43
60
  "transformer_torch_dtype": "bfloat16",
44
61
  "torch_dtype": "float16"
45
62
  },
46
- "default_generate_config": {
63
+ "default_generate_config": {},
64
+ "model_src": {
65
+ "huggingface": {
66
+ "model_id": "hunyuanvideo-community/HunyuanVideo",
67
+ "model_revision": "e8c2aaa66fe3742a32c11a6766aecbf07c56e773"
68
+ },
69
+ "modelscope": {
70
+ "model_id": "Xorbits/HunyuanVideo",
71
+ "model_revision": "master"
72
+ }
47
73
  }
48
74
  },
49
75
  {
76
+ "version": 2,
50
77
  "model_name": "Wan2.1-1.3B",
51
78
  "model_family": "Wan",
52
- "model_id": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
53
- "model_revision": "0fad780a534b6463e45facd96134c9f345acfa5b",
54
79
  "model_ability": [
55
80
  "text2video"
56
81
  ],
57
82
  "default_model_config": {
58
83
  "torch_dtype": "bfloat16"
59
84
  },
60
- "default_generate_config": {
61
- },
85
+ "default_generate_config": {},
62
86
  "virtualenv": {
63
87
  "packages": [
64
88
  "diffusers>=0.33.0",
@@ -67,21 +91,29 @@
67
91
  "imageio",
68
92
  "#system_numpy#"
69
93
  ]
94
+ },
95
+ "model_src": {
96
+ "huggingface": {
97
+ "model_id": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
98
+ "model_revision": "0fad780a534b6463e45facd96134c9f345acfa5b"
99
+ },
100
+ "modelscope": {
101
+ "model_id": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
102
+ "model_revision": "master"
103
+ }
70
104
  }
71
105
  },
72
106
  {
107
+ "version": 2,
73
108
  "model_name": "Wan2.1-14B",
74
109
  "model_family": "Wan",
75
- "model_id": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
76
- "model_revision": "38ec498cb3208fb688890f8cc7e94ede2cbd7f68",
77
110
  "model_ability": [
78
111
  "text2video"
79
112
  ],
80
113
  "default_model_config": {
81
114
  "torch_dtype": "bfloat16"
82
115
  },
83
- "default_generate_config": {
84
- },
116
+ "default_generate_config": {},
85
117
  "virtualenv": {
86
118
  "packages": [
87
119
  "diffusers>=0.33.0",
@@ -90,13 +122,22 @@
90
122
  "imageio",
91
123
  "#system_numpy#"
92
124
  ]
125
+ },
126
+ "model_src": {
127
+ "huggingface": {
128
+ "model_id": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
129
+ "model_revision": "38ec498cb3208fb688890f8cc7e94ede2cbd7f68"
130
+ },
131
+ "modelscope": {
132
+ "model_id": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
133
+ "model_revision": "master"
134
+ }
93
135
  }
94
136
  },
95
137
  {
138
+ "version": 2,
96
139
  "model_name": "Wan2.1-i2v-14B-480p",
97
140
  "model_family": "Wan",
98
- "model_id": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
99
- "model_revision": "b184e23a8a16b20f108f727c902e769e873ffc73",
100
141
  "model_ability": [
101
142
  "image2video"
102
143
  ],
@@ -117,13 +158,22 @@
117
158
  "imageio",
118
159
  "#system_numpy#"
119
160
  ]
161
+ },
162
+ "model_src": {
163
+ "huggingface": {
164
+ "model_id": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
165
+ "model_revision": "b184e23a8a16b20f108f727c902e769e873ffc73"
166
+ },
167
+ "modelscope": {
168
+ "model_id": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
169
+ "model_revision": "master"
170
+ }
120
171
  }
121
172
  },
122
173
  {
174
+ "version": 2,
123
175
  "model_name": "Wan2.1-i2v-14B-720p",
124
176
  "model_family": "Wan",
125
- "model_id": "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers",
126
- "model_revision": "eb849f76dfa246545b65774a9e25943ee69b3fa3",
127
177
  "model_ability": [
128
178
  "image2video"
129
179
  ],
@@ -144,13 +194,22 @@
144
194
  "imageio",
145
195
  "#system_numpy#"
146
196
  ]
197
+ },
198
+ "model_src": {
199
+ "huggingface": {
200
+ "model_id": "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers",
201
+ "model_revision": "eb849f76dfa246545b65774a9e25943ee69b3fa3"
202
+ },
203
+ "modelscope": {
204
+ "model_id": "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers",
205
+ "model_revision": "master"
206
+ }
147
207
  }
148
208
  },
149
209
  {
210
+ "version": 2,
150
211
  "model_name": "Wan2.1-flf2v-14B-720p",
151
212
  "model_family": "Wan",
152
- "model_id": "Wan-AI/Wan2.1-FLF2V-14B-720P-diffusers",
153
- "model_revision": "17c30769b1e0b5dcaa1799b117bf20a9c31f59d7",
154
213
  "model_ability": [
155
214
  "firstlastframe2video"
156
215
  ],
@@ -171,6 +230,16 @@
171
230
  "imageio",
172
231
  "#system_numpy#"
173
232
  ]
233
+ },
234
+ "model_src": {
235
+ "huggingface": {
236
+ "model_id": "Wan-AI/Wan2.1-FLF2V-14B-720P-diffusers",
237
+ "model_revision": "17c30769b1e0b5dcaa1799b117bf20a9c31f59d7"
238
+ },
239
+ "modelscope": {
240
+ "model_id": "Wan-AI/Wan2.1-FLF2V-14B-720P-diffusers",
241
+ "model_revision": "master"
242
+ }
174
243
  }
175
244
  }
176
245
  ]
xinference/types.py CHANGED
@@ -436,9 +436,11 @@ class PeftModelConfig:
436
436
 
437
437
  def to_dict(self):
438
438
  return {
439
- "lora_list": [lora.to_dict() for lora in self.peft_model]
440
- if self.peft_model
441
- else None,
439
+ "lora_list": (
440
+ [lora.to_dict() for lora in self.peft_model]
441
+ if self.peft_model
442
+ else None
443
+ ),
442
444
  "image_lora_load_kwargs": self.image_lora_load_kwargs,
443
445
  "image_lora_fuse_kwargs": self.image_lora_fuse_kwargs,
444
446
  }
@@ -1,14 +1,14 @@
1
1
  {
2
2
  "files": {
3
3
  "main.css": "./static/css/main.013f296b.css",
4
- "main.js": "./static/js/main.9b12b7f9.js",
4
+ "main.js": "./static/js/main.7d24df53.js",
5
5
  "static/media/icon.webp": "./static/media/icon.4603d52c63041e5dfbfd.webp",
6
6
  "index.html": "./index.html",
7
7
  "main.013f296b.css.map": "./static/css/main.013f296b.css.map",
8
- "main.9b12b7f9.js.map": "./static/js/main.9b12b7f9.js.map"
8
+ "main.7d24df53.js.map": "./static/js/main.7d24df53.js.map"
9
9
  },
10
10
  "entrypoints": [
11
11
  "static/css/main.013f296b.css",
12
- "static/js/main.9b12b7f9.js"
12
+ "static/js/main.7d24df53.js"
13
13
  ]
14
14
  }
@@ -1 +1 @@
1
- <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>Xinference</title><script defer="defer" src="./static/js/main.9b12b7f9.js"></script><link href="./static/css/main.013f296b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
1
+ <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>Xinference</title><script defer="defer" src="./static/js/main.7d24df53.js"></script><link href="./static/css/main.013f296b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>