bizyengine 1.2.45__py3-none-any.whl → 1.2.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. bizyengine/bizy_server/errno.py +21 -0
  2. bizyengine/bizy_server/server.py +130 -160
  3. bizyengine/bizy_server/utils.py +3 -0
  4. bizyengine/bizyair_extras/__init__.py +38 -31
  5. bizyengine/bizyair_extras/third_party_api/__init__.py +15 -0
  6. bizyengine/bizyair_extras/third_party_api/nodes_doubao.py +535 -0
  7. bizyengine/bizyair_extras/third_party_api/nodes_flux.py +173 -0
  8. bizyengine/bizyair_extras/third_party_api/nodes_gemini.py +403 -0
  9. bizyengine/bizyair_extras/third_party_api/nodes_gpt.py +101 -0
  10. bizyengine/bizyair_extras/third_party_api/nodes_hailuo.py +115 -0
  11. bizyengine/bizyair_extras/third_party_api/nodes_kling.py +404 -0
  12. bizyengine/bizyair_extras/third_party_api/nodes_sora.py +218 -0
  13. bizyengine/bizyair_extras/third_party_api/nodes_veo3.py +193 -0
  14. bizyengine/bizyair_extras/third_party_api/nodes_wan_api.py +198 -0
  15. bizyengine/bizyair_extras/third_party_api/trd_nodes_base.py +183 -0
  16. bizyengine/bizyair_extras/utils/aliyun_oss.py +92 -0
  17. bizyengine/bizyair_extras/utils/audio.py +88 -0
  18. bizyengine/bizybot/__init__.py +12 -0
  19. bizyengine/bizybot/client.py +774 -0
  20. bizyengine/bizybot/config.py +129 -0
  21. bizyengine/bizybot/coordinator.py +556 -0
  22. bizyengine/bizybot/exceptions.py +186 -0
  23. bizyengine/bizybot/mcp/__init__.py +3 -0
  24. bizyengine/bizybot/mcp/manager.py +520 -0
  25. bizyengine/bizybot/mcp/models.py +46 -0
  26. bizyengine/bizybot/mcp/registry.py +129 -0
  27. bizyengine/bizybot/mcp/routing.py +378 -0
  28. bizyengine/bizybot/models.py +344 -0
  29. bizyengine/core/__init__.py +1 -0
  30. bizyengine/core/commands/servers/prompt_server.py +10 -1
  31. bizyengine/core/common/client.py +8 -7
  32. bizyengine/core/common/utils.py +30 -1
  33. bizyengine/core/image_utils.py +12 -283
  34. bizyengine/misc/llm.py +32 -15
  35. bizyengine/misc/utils.py +179 -2
  36. bizyengine/version.txt +1 -1
  37. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/METADATA +3 -1
  38. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/RECORD +40 -16
  39. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/WHEEL +0 -0
  40. {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/top_level.txt +0 -0
@@ -1,292 +1,21 @@
1
- import base64
2
- import io
3
- import json
4
- import os
5
- import pickle
6
- import zlib
7
1
  from enum import Enum
8
- from functools import singledispatch
9
- from typing import Any, List, Union
10
2
 
11
- import numpy as np
12
- import torch
13
- from PIL import Image
3
+ from bizyairsdk import (
4
+ base64_to_tensor,
5
+ decode_base64_to_image,
6
+ decode_base64_to_np,
7
+ decode_comfy_image,
8
+ decode_data,
9
+ encode_comfy_image,
10
+ encode_data,
11
+ encode_image_to_base64,
12
+ numpy_to_base64,
13
+ )
14
14
 
15
- from .common.env_var import BIZYAIR_DEBUG
16
-
17
- # Marker to identify base64-encoded tensors
18
- TENSOR_MARKER = "TENSOR:"
19
- IMAGE_MARKER = "IMAGE:"
15
+ # from .common.env_var import BIZYAIR_DEBUG
20
16
 
21
17
 
22
18
  class TaskStatus(Enum):
23
19
  PENDING = "pending"
24
20
  PROCESSING = "processing"
25
21
  COMPLETED = "completed"
26
-
27
-
28
- def convert_image_to_rgb(image: Image.Image) -> Image.Image:
29
- if image.mode != "RGB":
30
- return image.convert("RGB")
31
- return image
32
-
33
-
34
- def encode_image_to_base64(
35
- image: Image.Image, format: str = "png", quality: int = 100, lossless=False
36
- ) -> str:
37
- image = convert_image_to_rgb(image)
38
- with io.BytesIO() as output:
39
- image.save(output, format=format, quality=quality, lossless=lossless)
40
- output.seek(0)
41
- img_bytes = output.getvalue()
42
- if BIZYAIR_DEBUG:
43
- print(f"encode_image_to_base64: {format_bytes(len(img_bytes))}")
44
- return base64.b64encode(img_bytes).decode("utf-8")
45
-
46
-
47
- def decode_base64_to_np(img_data: str, format: str = "png") -> np.ndarray:
48
- img_bytes = base64.b64decode(img_data)
49
- if BIZYAIR_DEBUG:
50
- print(f"decode_base64_to_np: {format_bytes(len(img_bytes))}")
51
- with io.BytesIO(img_bytes) as input_buffer:
52
- img = Image.open(input_buffer)
53
- # https://github.com/comfyanonymous/ComfyUI/blob/a178e25912b01abf436eba1cfaab316ba02d272d/nodes.py#L1511
54
- img = img.convert("RGB")
55
- return np.array(img)
56
-
57
-
58
- def decode_base64_to_image(img_data: str) -> Image.Image:
59
- img_bytes = base64.b64decode(img_data)
60
- with io.BytesIO(img_bytes) as input_buffer:
61
- img = Image.open(input_buffer)
62
- if BIZYAIR_DEBUG:
63
- format_info = img.format.upper() if img.format else "Unknown"
64
- print(f"decode image format: {format_info}")
65
- return img
66
-
67
-
68
- def format_bytes(num_bytes: int) -> str:
69
- """
70
- Converts a number of bytes to a human-readable string with units (B, KB, or MB).
71
-
72
- :param num_bytes: The number of bytes to convert.
73
- :return: A string representing the number of bytes in a human-readable format.
74
- """
75
- if num_bytes < 1024:
76
- return f"{num_bytes} B"
77
- elif num_bytes < 1024 * 1024:
78
- return f"{num_bytes / 1024:.2f} KB"
79
- else:
80
- return f"{num_bytes / (1024 * 1024):.2f} MB"
81
-
82
-
83
- def _legacy_encode_comfy_image(image: torch.Tensor, image_format="png") -> str:
84
- input_image = image.cpu().detach().numpy()
85
- i = 255.0 * input_image[0]
86
- input_image = np.clip(i, 0, 255).astype(np.uint8)
87
- base64ed_image = encode_image_to_base64(
88
- Image.fromarray(input_image), format=image_format
89
- )
90
- return base64ed_image
91
-
92
-
93
- def _legacy_decode_comfy_image(
94
- img_data: Union[List, str], image_format="png"
95
- ) -> torch.tensor:
96
- if isinstance(img_data, List):
97
- decoded_imgs = [decode_comfy_image(x, old_version=True) for x in img_data]
98
-
99
- combined_imgs = torch.cat(decoded_imgs, dim=0)
100
- return combined_imgs
101
-
102
- out = decode_base64_to_np(img_data, format=image_format)
103
- out = np.array(out).astype(np.float32) / 255.0
104
- output = torch.from_numpy(out)[None,]
105
- return output
106
-
107
-
108
- def _new_encode_comfy_image(images: torch.Tensor, image_format="WEBP", **kwargs) -> str:
109
- """https://docs.comfy.org/essentials/custom_node_snippets#save-an-image-batch
110
- Encode a batch of images to base64 strings.
111
-
112
- Args:
113
- images (torch.Tensor): A batch of images.
114
- image_format (str, optional): The format of the images. Defaults to "WEBP".
115
-
116
- Returns:
117
- str: A JSON string containing the base64-encoded images.
118
- """
119
- results = {}
120
- for batch_number, image in enumerate(images):
121
- i = 255.0 * image.cpu().numpy()
122
- img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
123
- base64ed_image = encode_image_to_base64(img, format=image_format, **kwargs)
124
- results[batch_number] = base64ed_image
125
-
126
- return json.dumps(results)
127
-
128
-
129
- def _new_decode_comfy_image(img_datas: str, image_format="WEBP") -> torch.tensor:
130
- """
131
- Decode a batch of base64-encoded images.
132
-
133
- Args:
134
- img_datas (str): A JSON string containing the base64-encoded images.
135
- image_format (str, optional): The format of the images. Defaults to "WEBP".
136
-
137
- Returns:
138
- torch.Tensor: A tensor containing the decoded images.
139
- """
140
- img_datas = json.loads(img_datas)
141
-
142
- decoded_imgs = []
143
- for img_data in img_datas.values():
144
- decoded_image = decode_base64_to_np(img_data, format=image_format)
145
- decoded_image = np.array(decoded_image).astype(np.float32) / 255.0
146
- decoded_imgs.append(torch.from_numpy(decoded_image)[None,])
147
-
148
- return torch.cat(decoded_imgs, dim=0)
149
-
150
-
151
- def encode_comfy_image(
152
- image: torch.Tensor, image_format="WEBP", old_version=False, lossless=False
153
- ) -> str:
154
- if old_version:
155
- return _legacy_encode_comfy_image(image, image_format)
156
- return _new_encode_comfy_image(image, image_format, lossless=lossless)
157
-
158
-
159
- def decode_comfy_image(
160
- img_data: Union[List, str], image_format="WEBP", old_version=False
161
- ) -> torch.tensor:
162
- if old_version:
163
- return _legacy_decode_comfy_image(img_data, image_format)
164
- return _new_decode_comfy_image(img_data, image_format)
165
-
166
-
167
- def tensor_to_base64(tensor: torch.Tensor, compress=True) -> str:
168
- tensor_np = tensor.cpu().detach().numpy()
169
-
170
- tensor_bytes = pickle.dumps(tensor_np)
171
- if compress:
172
- tensor_bytes = zlib.compress(tensor_bytes)
173
-
174
- tensor_b64 = base64.b64encode(tensor_bytes).decode("utf-8")
175
- return tensor_b64
176
-
177
-
178
- def base64_to_tensor(tensor_b64: str, compress=True) -> torch.Tensor:
179
- tensor_bytes = base64.b64decode(tensor_b64)
180
-
181
- if compress:
182
- tensor_bytes = zlib.decompress(tensor_bytes)
183
-
184
- tensor_np = pickle.loads(tensor_bytes)
185
-
186
- tensor = torch.from_numpy(tensor_np)
187
- return tensor
188
-
189
-
190
- @singledispatch
191
- def decode_data(input, old_version=False):
192
- raise NotImplementedError(f"Unsupported type: {type(input)}")
193
-
194
-
195
- @decode_data.register(int)
196
- @decode_data.register(float)
197
- @decode_data.register(bool)
198
- def _(input, **kwargs):
199
- return input
200
-
201
-
202
- @decode_data.register(type(None))
203
- def _(input, **kwargs):
204
- return [None]
205
-
206
-
207
- @decode_data.register(dict)
208
- def _(input, **kwargs):
209
- return {k: decode_data(v, **kwargs) for k, v in input.items()}
210
-
211
-
212
- @decode_data.register(list)
213
- def _(input, **kwargs):
214
- return [decode_data(x, **kwargs) for x in input]
215
-
216
-
217
- @decode_data.register(str)
218
- def _(input: str, **kwargs):
219
- if input.startswith(TENSOR_MARKER):
220
- tensor_b64 = input[len(TENSOR_MARKER) :]
221
- return base64_to_tensor(tensor_b64)
222
- elif input.startswith(IMAGE_MARKER):
223
- tensor_b64 = input[len(IMAGE_MARKER) :]
224
- old_version = kwargs.get("old_version", False)
225
- return decode_comfy_image(tensor_b64, old_version=old_version)
226
- return input
227
-
228
-
229
- @singledispatch
230
- def encode_data(output, disable_image_marker=False, old_version=False):
231
- raise NotImplementedError(f"Unsupported type: {type(output)}")
232
-
233
-
234
- @encode_data.register(dict)
235
- def _(output, **kwargs):
236
- return {k: encode_data(v, **kwargs) for k, v in output.items()}
237
-
238
-
239
- @encode_data.register(list)
240
- def _(output, **kwargs):
241
- return [encode_data(x, **kwargs) for x in output]
242
-
243
-
244
- def is_image_tensor(tensor) -> bool:
245
- """https://docs.comfy.org/essentials/custom_node_datatypes#image
246
-
247
- Check if the given tensor is in the format of an IMAGE (shape [B, H, W, C] where C=3).
248
-
249
- `Args`:
250
- tensor (torch.Tensor): The tensor to check.
251
-
252
- `Returns`:
253
- bool: True if the tensor is in the IMAGE format, False otherwise.
254
- """
255
- try:
256
- if not isinstance(tensor, torch.Tensor):
257
- return False
258
-
259
- if len(tensor.shape) != 4:
260
- return False
261
-
262
- B, H, W, C = tensor.shape
263
- if C != 3:
264
- return False
265
-
266
- return True
267
- except:
268
- return False
269
-
270
-
271
- @encode_data.register(torch.Tensor)
272
- def _(output, **kwargs):
273
- if is_image_tensor(output) and not kwargs.get("disable_image_marker", False):
274
- old_version = kwargs.get("old_version", False)
275
- lossless = kwargs.get("lossless", True)
276
- return IMAGE_MARKER + encode_comfy_image(
277
- output, image_format="WEBP", old_version=old_version, lossless=lossless
278
- )
279
- return TENSOR_MARKER + tensor_to_base64(output)
280
-
281
-
282
- @encode_data.register(int)
283
- @encode_data.register(float)
284
- @encode_data.register(bool)
285
- @encode_data.register(type(None))
286
- def _(output, **kwargs):
287
- return output
288
-
289
-
290
- @encode_data.register(str)
291
- def _(output, **kwargs):
292
- return output
bizyengine/misc/llm.py CHANGED
@@ -381,22 +381,39 @@ class BizyAirJoyCaption2(BizyAirMiscBaseNode):
381
381
  )
382
382
 
383
383
  try:
384
- if "result" in ret:
385
- ret = json.loads(ret["result"])
386
- if ret["type"] == "error":
387
- raise Exception(ret["message"])
384
+ if isinstance(ret, dict) and "result" in ret:
385
+ if isinstance(ret["result"], str):
386
+ ret = json.loads(ret["result"])
387
+
388
+ if not isinstance(ret, dict):
389
+ raise ValueError(
390
+ f"Response is not a dictionary after processing: {ret}"
391
+ )
392
+
393
+ if ret.get("type") == "error":
394
+ error_message = ret.get("message", "Unknown error from API")
395
+ raise Exception(f"API returned an error: {error_message}")
396
+
397
+ msg = ret["data"]
398
+ if not isinstance(msg, dict) or msg.get("type") not in (
399
+ "comfyair",
400
+ "bizyair",
401
+ ):
402
+ raise Exception(f"Unexpected response type: {msg}")
403
+
404
+ caption = msg["data"]
405
+ return (caption,)
406
+
407
+ except json.JSONDecodeError as e:
408
+ raise Exception(f"Failed to parse JSON response: {ret}. Error: {e}")
409
+ except (KeyError, TypeError) as e:
410
+ raise Exception(
411
+ f"Invalid response format, missing key or wrong type. Response: {ret}. Error: {e}"
412
+ )
388
413
  except Exception as e:
389
- raise Exception(f"Unexpected response: {ret} {e=}")
390
-
391
- msg = ret["data"]
392
- if msg["type"] not in (
393
- "comfyair",
394
- "bizyair",
395
- ):
396
- raise Exception(f"Unexpected response type: {msg}")
397
-
398
- caption = msg["data"]
399
- return (caption,)
414
+ raise Exception(
415
+ f"An unexpected error occurred while processing the response: {ret}. Error: {e}"
416
+ )
400
417
 
401
418
 
402
419
  class BizyAirJoyCaption3(BizyAirJoyCaption2):
bizyengine/misc/utils.py CHANGED
@@ -1,14 +1,21 @@
1
+ import asyncio
1
2
  import base64
3
+ import concurrent.futures
2
4
  import json
5
+ import logging
3
6
  import os
4
7
  import pickle
8
+ import re
9
+ import threading
10
+ import time
5
11
  import urllib.parse
6
12
  import urllib.request
7
13
  import zlib
8
- from typing import List, Tuple, Union
14
+ from typing import Callable, Dict, Generic, List, Optional, Tuple, TypeVar, Union
9
15
 
10
16
  import numpy as np
11
17
 
18
+ from bizyengine.bizy_server.api_client import APIClient
12
19
  from bizyengine.core import pop_api_key_and_prompt_id
13
20
  from bizyengine.core.common import client
14
21
  from bizyengine.core.common.env_var import BIZYAIR_SERVER_ADDRESS
@@ -36,7 +43,7 @@ def send_post_request(api_url, payload, headers):
36
43
  try:
37
44
  data = json.dumps(payload).encode("utf-8")
38
45
  req = urllib.request.Request(api_url, data=data, headers=headers, method="POST")
39
- with urllib.request.urlopen(req) as response:
46
+ with urllib.request.urlopen(req, timeout=3600) as response:
40
47
  response_data = response.read().decode("utf-8")
41
48
  return response_data
42
49
  except urllib.error.URLError as e:
@@ -143,6 +150,18 @@ def get_llm_response(
143
150
  extra_data = pop_api_key_and_prompt_id(kwargs)
144
151
  headers = client.headers(api_key=extra_data["api_key"])
145
152
 
153
+ # 如果model已不可用,选择第一个可用model
154
+ if _MODELS_CACHE.get("llm_models") is None:
155
+ cache_models(extra_data["api_key"])
156
+ llm_models = _MODELS_CACHE.get("llm_models")
157
+ if llm_models is None:
158
+ logging.warning(f"No LLM models available, keeping the original model {model}")
159
+ elif model not in llm_models:
160
+ logging.warning(
161
+ f"Model {model} is not available, using the first available model {llm_models[0]}"
162
+ )
163
+ model = llm_models[0]
164
+
146
165
  payload = {
147
166
  "model": model,
148
167
  "messages": [
@@ -183,6 +202,18 @@ def get_vlm_response(
183
202
  extra_data = pop_api_key_and_prompt_id(kwargs)
184
203
  headers = client.headers(api_key=extra_data["api_key"])
185
204
 
205
+ # 如果model已不可用,选择第一个可用model
206
+ if _MODELS_CACHE.get("vlm_models") is None:
207
+ cache_models(extra_data["api_key"])
208
+ vlm_models = _MODELS_CACHE.get("vlm_models")
209
+ if vlm_models is None:
210
+ logging.warning(f"No VLM models available, keeping the original model {model}")
211
+ elif model not in vlm_models:
212
+ logging.warning(
213
+ f"Model {model} is not available, using the first available model {vlm_models[0]}"
214
+ )
215
+ model = vlm_models[0]
216
+
186
217
  messages = [
187
218
  {
188
219
  "role": "system",
@@ -230,3 +261,149 @@ def get_vlm_response(
230
261
  callback=None,
231
262
  )
232
263
  return response
264
+
265
+
266
+ K = TypeVar("K")
267
+ V = TypeVar("V")
268
+ R = TypeVar("R")
269
+
270
+
271
+ class TTLCache(Generic[K, V]):
272
+ """线程安全 TTL 内存缓存(仅依赖标准库)"""
273
+
274
+ def __init__(self, ttl_sec: float):
275
+ self.ttl = ttl_sec
276
+ self._data: Dict[K, tuple[V, float]] = {}
277
+ self._lock = threading.RLock()
278
+ self._stop_evt = threading.Event()
279
+ # 后台清扫线程
280
+ self._cleaner = threading.Thread(target=self._cleanup, daemon=True)
281
+ self._cleaner.start()
282
+
283
+ # ---------- 公共 API ----------
284
+ def set(self, key: K, value: V) -> None:
285
+ """写入/刷新键值"""
286
+ with self._lock:
287
+ self._data[key] = (value, time.time() + self.ttl)
288
+
289
+ def get(self, key: K) -> Optional[V]:
290
+ """读取键值;不存在或已过期返回 None"""
291
+ with self._lock:
292
+ val, expire = self._data.get(key, (None, 0))
293
+ if val is None or time.time() > expire:
294
+ self._data.pop(key, None)
295
+ return None
296
+ return val
297
+
298
+ def delete(self, key: K) -> None:
299
+ """手动删除"""
300
+ with self._lock:
301
+ self._data.pop(key, None)
302
+
303
+ def stop(self):
304
+ """停止后台线程(程序退出前调用)"""
305
+ self._stop_evt.set()
306
+ self._cleaner.join(timeout=self.ttl + 1)
307
+
308
+ # ---------- 内部 ----------
309
+ def _cleanup(self):
310
+ """周期清扫过期键"""
311
+ while not self._stop_evt.wait(self.ttl / 2):
312
+ with self._lock:
313
+ now = time.time()
314
+ for key, (_, expire) in list(self._data.items()):
315
+ if now > expire:
316
+ self._data.pop(key, None)
317
+
318
+
319
+ class SingleFlight(Generic[R]):
320
+ """Python 版 singleflight.Group(线程安全)"""
321
+
322
+ def __init__(self):
323
+ self._lock = threading.Lock()
324
+ self._call_map: dict[str, SingleFlight._Call[R]] = {}
325
+
326
+ class _Call(Generic[R]):
327
+ __slots__ = ("mu", "done", "result", "err", "waiters")
328
+
329
+ def __init__(self):
330
+ self.mu = threading.Lock()
331
+ self.done = False
332
+ self.result: Optional[R] = None
333
+ self.err: Optional[BaseException] = None
334
+ self.waiters = 0
335
+
336
+ def do(
337
+ self, key: str, fn: Callable[[], R]
338
+ ) -> tuple[R, bool, Optional[BaseException]]:
339
+ """
340
+ 返回值: (result, shared?, exception)
341
+ shared=True 表示本次未真正执行 fn,复用了别人结果
342
+ """
343
+ with self._lock:
344
+ call = self._call_map.get(key)
345
+ if call is None: # 我是第一个
346
+ call = self._Call[R]()
347
+ call.waiters = 1
348
+ self._call_map[key] = call
349
+ first = True
350
+ else: # 已有并发请求
351
+ call.waiters += 1
352
+ first = False
353
+
354
+ if first: # 只有第一个真正执行
355
+ try:
356
+ result = fn()
357
+ with call.mu:
358
+ call.result = result
359
+ call.done = True
360
+ except BaseException as e:
361
+ with call.mu:
362
+ call.err = e
363
+ call.done = True
364
+ raise
365
+ finally: # 把自己从 map 摘掉
366
+ with self._lock:
367
+ if call.waiters == 0:
368
+ self._call_map.pop(key, None)
369
+ else: # 其它人阻塞等待
370
+ with call.mu:
371
+ while not call.done:
372
+ call.mu.wait()
373
+ # 读取结果
374
+ with call.mu:
375
+ if call.err is not None:
376
+ return call.result, True, call.err
377
+ return call.result, True, None
378
+
379
+
380
+ _MODELS_CACHE = TTLCache[str, list[str]](ttl_sec=600)
381
+ _SF = SingleFlight[None]()
382
+
383
+
384
+ def cache_models(request_api_key: str):
385
+ # TODO: 效果待验证,目前节点只会被ComfyUI串行执行,所以不会出现竞争
386
+ _SF.do("_cache_models", lambda: _cache_models(request_api_key))
387
+
388
+
389
+ def _cache_models(request_api_key: str):
390
+ # ① 开一条新线程专门跑协程 - 应该不需要在prompt那层上锁,因为并发只有1
391
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
392
+ api_client = APIClient()
393
+ all_models = pool.submit(
394
+ asyncio.run, api_client.fetch_all_llm_models(request_api_key)
395
+ ).result()
396
+ if len(all_models) == 0:
397
+ return
398
+ llm_models = [
399
+ model
400
+ for model in all_models
401
+ if not (re.search(r"\d+(\.\d+)?v", model.lower()) or "vl" in model.lower())
402
+ ]
403
+ vlm_models = [
404
+ model
405
+ for model in all_models
406
+ if re.search(r"\d+(\.\d+)?v", model.lower()) or "vl" in model.lower()
407
+ ]
408
+ _MODELS_CACHE.set("llm_models", llm_models)
409
+ _MODELS_CACHE.set("vlm_models", vlm_models)
bizyengine/version.txt CHANGED
@@ -1 +1 @@
1
- 1.2.45
1
+ 1.2.71
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: bizyengine
3
- Version: 1.2.45
3
+ Version: 1.2.71
4
4
  Summary: [a/BizyAir](https://github.com/siliconflow/BizyAir) Comfy Nodes that can run in any environment.
5
5
  Author-email: SiliconFlow <yaochi@siliconflow.cn>
6
6
  Project-URL: Repository, https://github.com/siliconflow/BizyAir
@@ -13,6 +13,8 @@ Requires-Dist: requests
13
13
  Requires-Dist: inputimeout
14
14
  Requires-Dist: openai>=1.77.0
15
15
  Requires-Dist: pycryptodome
16
+ Requires-Dist: mcp>=1.18.0
17
+ Requires-Dist: bizyairsdk>=0.1.5
16
18
 
17
19
  ## BizyEngine
18
20