bizyengine 1.2.45__py3-none-any.whl → 1.2.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bizyengine/bizy_server/errno.py +21 -0
- bizyengine/bizy_server/server.py +130 -160
- bizyengine/bizy_server/utils.py +3 -0
- bizyengine/bizyair_extras/__init__.py +38 -31
- bizyengine/bizyair_extras/third_party_api/__init__.py +15 -0
- bizyengine/bizyair_extras/third_party_api/nodes_doubao.py +535 -0
- bizyengine/bizyair_extras/third_party_api/nodes_flux.py +173 -0
- bizyengine/bizyair_extras/third_party_api/nodes_gemini.py +403 -0
- bizyengine/bizyair_extras/third_party_api/nodes_gpt.py +101 -0
- bizyengine/bizyair_extras/third_party_api/nodes_hailuo.py +115 -0
- bizyengine/bizyair_extras/third_party_api/nodes_kling.py +404 -0
- bizyengine/bizyair_extras/third_party_api/nodes_sora.py +218 -0
- bizyengine/bizyair_extras/third_party_api/nodes_veo3.py +193 -0
- bizyengine/bizyair_extras/third_party_api/nodes_wan_api.py +198 -0
- bizyengine/bizyair_extras/third_party_api/trd_nodes_base.py +183 -0
- bizyengine/bizyair_extras/utils/aliyun_oss.py +92 -0
- bizyengine/bizyair_extras/utils/audio.py +88 -0
- bizyengine/bizybot/__init__.py +12 -0
- bizyengine/bizybot/client.py +774 -0
- bizyengine/bizybot/config.py +129 -0
- bizyengine/bizybot/coordinator.py +556 -0
- bizyengine/bizybot/exceptions.py +186 -0
- bizyengine/bizybot/mcp/__init__.py +3 -0
- bizyengine/bizybot/mcp/manager.py +520 -0
- bizyengine/bizybot/mcp/models.py +46 -0
- bizyengine/bizybot/mcp/registry.py +129 -0
- bizyengine/bizybot/mcp/routing.py +378 -0
- bizyengine/bizybot/models.py +344 -0
- bizyengine/core/__init__.py +1 -0
- bizyengine/core/commands/servers/prompt_server.py +10 -1
- bizyengine/core/common/client.py +8 -7
- bizyengine/core/common/utils.py +30 -1
- bizyengine/core/image_utils.py +12 -283
- bizyengine/misc/llm.py +32 -15
- bizyengine/misc/utils.py +179 -2
- bizyengine/version.txt +1 -1
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/METADATA +3 -1
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/RECORD +40 -16
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/WHEEL +0 -0
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
import io
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import time
|
|
6
|
+
from typing import List, Tuple
|
|
7
|
+
|
|
8
|
+
import requests
|
|
9
|
+
import torch
|
|
10
|
+
from bizyairsdk import bytesio_to_image_tensor, common_upscale
|
|
11
|
+
from comfy_api.latest._input_impl import VideoFromFile
|
|
12
|
+
|
|
13
|
+
from bizyengine.core import (
|
|
14
|
+
BizyAirMiscBaseNode,
|
|
15
|
+
pop_api_key_and_prompt_id,
|
|
16
|
+
register_node,
|
|
17
|
+
)
|
|
18
|
+
from bizyengine.core.common import client
|
|
19
|
+
from bizyengine.core.common.client import send_request
|
|
20
|
+
from bizyengine.core.common.env_var import BIZYAIR_X_SERVER
|
|
21
|
+
from bizyengine.core.nodes_base import PREFIX
|
|
22
|
+
|
|
23
|
+
from ..utils.aliyun_oss import parse_upload_token, upload_file_without_sdk
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TrdBase(abc.ABC):
|
|
27
|
+
@abc.abstractmethod
|
|
28
|
+
# Return: data, model, prompt
|
|
29
|
+
def handle_inputs(self, headers, prompt_id, **kwargs) -> Tuple[dict, str]:
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
@abc.abstractmethod
|
|
33
|
+
# Return: videos, images, texts
|
|
34
|
+
def handle_outputs(
|
|
35
|
+
self, outputs: Tuple[List[VideoFromFile], List[torch.Tensor], List[str]]
|
|
36
|
+
) -> Tuple:
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class BizyAirTrdApiBaseNode(BizyAirMiscBaseNode, TrdBase):
|
|
41
|
+
FUNCTION = "api_call"
|
|
42
|
+
OUTPUT_NODE = False
|
|
43
|
+
|
|
44
|
+
def __init_subclass__(cls, **kwargs):
|
|
45
|
+
super().__init_subclass__(**kwargs)
|
|
46
|
+
register_node(cls, PREFIX)
|
|
47
|
+
|
|
48
|
+
def api_call(self, **kwargs):
|
|
49
|
+
extra_data = pop_api_key_and_prompt_id(kwargs)
|
|
50
|
+
headers = client.headers(api_key=extra_data["api_key"])
|
|
51
|
+
prompt_id = extra_data["prompt_id"]
|
|
52
|
+
headers["X-BIZYAIR-PROMPT-ID"] = prompt_id
|
|
53
|
+
|
|
54
|
+
data, model = self.handle_inputs(headers, prompt_id, **kwargs)
|
|
55
|
+
outputs = self.create_task_and_wait_for_completion(data, model, headers)
|
|
56
|
+
return self.handle_outputs(outputs)
|
|
57
|
+
|
|
58
|
+
def create_task_and_wait_for_completion(
|
|
59
|
+
self, data, model, headers
|
|
60
|
+
) -> Tuple[List[VideoFromFile], List[torch.Tensor], List[str]]:
|
|
61
|
+
# 创建任务
|
|
62
|
+
create_task_url = f"{BIZYAIR_X_SERVER}/trd_api/{model}"
|
|
63
|
+
json_payload = json.dumps(data).encode("utf-8")
|
|
64
|
+
logging.debug(f"json_payload: {json_payload}")
|
|
65
|
+
create_api_resp = send_request(
|
|
66
|
+
url=create_task_url,
|
|
67
|
+
data=json_payload,
|
|
68
|
+
headers=headers,
|
|
69
|
+
)
|
|
70
|
+
logging.debug(
|
|
71
|
+
f"{self.NODE_DISPLAY_NAME} create task api resp: {create_api_resp}"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# 检查任务创建是否成功
|
|
75
|
+
if "data" not in create_api_resp or "request_id" not in create_api_resp["data"]:
|
|
76
|
+
raise ValueError(f"Invalid response: {create_api_resp}")
|
|
77
|
+
|
|
78
|
+
# 轮询获取结果,最多等待1小时
|
|
79
|
+
request_id = create_api_resp["data"]["request_id"]
|
|
80
|
+
logging.info(f"{self.NODE_DISPLAY_NAME} task created, request_id: {request_id}")
|
|
81
|
+
start_time = time.time()
|
|
82
|
+
status_url = f"{BIZYAIR_X_SERVER}/trd_api/{request_id}"
|
|
83
|
+
while time.time() - start_time < 3600:
|
|
84
|
+
time.sleep(10)
|
|
85
|
+
try:
|
|
86
|
+
status_api_resp = send_request(
|
|
87
|
+
method="GET",
|
|
88
|
+
url=status_url,
|
|
89
|
+
headers=headers,
|
|
90
|
+
)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logging.error(
|
|
93
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} status api error: {e}"
|
|
94
|
+
)
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
if "data" not in status_api_resp:
|
|
98
|
+
logging.error(
|
|
99
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} status api resp no data: {status_api_resp}"
|
|
100
|
+
)
|
|
101
|
+
continue
|
|
102
|
+
if "status" not in status_api_resp["data"]:
|
|
103
|
+
logging.error(
|
|
104
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} status api resp no status: {status_api_resp}"
|
|
105
|
+
)
|
|
106
|
+
continue
|
|
107
|
+
status = status_api_resp["data"]["status"]
|
|
108
|
+
if status == "failed":
|
|
109
|
+
raise ValueError(
|
|
110
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} failed: {status_api_resp}"
|
|
111
|
+
)
|
|
112
|
+
if status == "running":
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
# 成功,获取输出结果
|
|
116
|
+
if "outputs" not in status_api_resp["data"]:
|
|
117
|
+
raise ValueError(
|
|
118
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} no outputs: {status_api_resp}"
|
|
119
|
+
)
|
|
120
|
+
logging.info(
|
|
121
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} success: {status_api_resp}"
|
|
122
|
+
)
|
|
123
|
+
# 分别处理视频、图片、文本
|
|
124
|
+
videos = []
|
|
125
|
+
images = []
|
|
126
|
+
texts = []
|
|
127
|
+
outputs = status_api_resp["data"]["outputs"]
|
|
128
|
+
try:
|
|
129
|
+
if "videos" in outputs:
|
|
130
|
+
for video_url in outputs["videos"]:
|
|
131
|
+
video_resp = requests.get(video_url, stream=True, timeout=3600)
|
|
132
|
+
video_resp.raise_for_status() # 非 2xx 会抛异常
|
|
133
|
+
videos.append(VideoFromFile(io.BytesIO(video_resp.content)))
|
|
134
|
+
if "images" in outputs:
|
|
135
|
+
for image_url in outputs["images"]:
|
|
136
|
+
image_resp = requests.get(image_url, stream=True, timeout=3600)
|
|
137
|
+
image_resp.raise_for_status() # 非 2xx 会抛异常
|
|
138
|
+
images.append(
|
|
139
|
+
bytesio_to_image_tensor(io.BytesIO(image_resp.content))
|
|
140
|
+
)
|
|
141
|
+
if "texts" in outputs:
|
|
142
|
+
for text in outputs["texts"]:
|
|
143
|
+
texts.append(text)
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logging.error(
|
|
146
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} handle outputs error: {e}"
|
|
147
|
+
)
|
|
148
|
+
raise ValueError(
|
|
149
|
+
f"{self.NODE_DISPLAY_NAME} task {request_id} handle outputs error: {e}, please download the outputs manually, outputs: {outputs}"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
return (videos, images, texts)
|
|
153
|
+
|
|
154
|
+
raise ValueError(
|
|
155
|
+
f"{self.NODE_DISPLAY_NAME} task timed out, request ID: {request_id}"
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def upload_file(self, bytes, file_name, headers):
|
|
159
|
+
oss_token_url = (
|
|
160
|
+
f"{BIZYAIR_X_SERVER}/upload/token?file_name={file_name}&file_type=inputs"
|
|
161
|
+
)
|
|
162
|
+
token_resp = send_request("GET", oss_token_url, headers=headers)
|
|
163
|
+
auth_info = parse_upload_token(token_resp)
|
|
164
|
+
return upload_file_without_sdk(file_content=bytes, **auth_info)
|
|
165
|
+
|
|
166
|
+
def combine_images(self, images: List[torch.Tensor]) -> torch.Tensor:
|
|
167
|
+
s = None
|
|
168
|
+
if images is not None and len(images) > 0:
|
|
169
|
+
for _, image in enumerate(images):
|
|
170
|
+
if s is None:
|
|
171
|
+
s = image
|
|
172
|
+
else:
|
|
173
|
+
# ComfyUI BatchImage logic
|
|
174
|
+
if s.shape[1:] != image.shape[1:]:
|
|
175
|
+
image = common_upscale(
|
|
176
|
+
image.movedim(-1, 1),
|
|
177
|
+
s.shape[2],
|
|
178
|
+
image.shape[1],
|
|
179
|
+
"bilinear",
|
|
180
|
+
"center",
|
|
181
|
+
).movedim(1, -1)
|
|
182
|
+
s = torch.cat((s, image), dim=0)
|
|
183
|
+
return s
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import datetime
|
|
3
|
+
import hashlib
|
|
4
|
+
import hmac
|
|
5
|
+
import io
|
|
6
|
+
import logging
|
|
7
|
+
|
|
8
|
+
import requests
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def sign_request(method, bucket, object_key, headers, access_key_id, access_key_secret):
|
|
12
|
+
# This is a simplified representation. Actual OSS signing is more complex.
|
|
13
|
+
# It involves creating a canonicalized resource and headers string.
|
|
14
|
+
# Refer to Alibaba Cloud OSS documentation for precise signing details.
|
|
15
|
+
|
|
16
|
+
canonical_string = f"{method}\n"
|
|
17
|
+
canonical_string += f"{headers.get('Content-MD5', '')}\n"
|
|
18
|
+
canonical_string += f"{headers.get('Content-Type', '')}\n"
|
|
19
|
+
canonical_string += f"{headers.get('Date', '')}\n"
|
|
20
|
+
|
|
21
|
+
# Add canonicalized OSS headers if present (e.g., x-oss-meta-*)
|
|
22
|
+
for key in sorted(headers.keys()):
|
|
23
|
+
if key.lower().startswith("x-oss-"):
|
|
24
|
+
canonical_string += f"{key.lower()}:{headers[key]}\n"
|
|
25
|
+
|
|
26
|
+
canonical_string += f"/{bucket}/{object_key}"
|
|
27
|
+
|
|
28
|
+
h = hmac.new(
|
|
29
|
+
access_key_secret.encode("utf-8"),
|
|
30
|
+
canonical_string.encode("utf-8"),
|
|
31
|
+
hashlib.sha1,
|
|
32
|
+
)
|
|
33
|
+
signature = base64.b64encode(h.digest()).decode("utf-8")
|
|
34
|
+
return f"OSS {access_key_id}:{signature}"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def upload_file_without_sdk(
|
|
38
|
+
file_content: io.BytesIO,
|
|
39
|
+
bucket,
|
|
40
|
+
object_key,
|
|
41
|
+
endpoint,
|
|
42
|
+
access_key_id,
|
|
43
|
+
access_key_secret,
|
|
44
|
+
security_token,
|
|
45
|
+
**kwargs,
|
|
46
|
+
):
|
|
47
|
+
logging.info(f"Uploading file to {bucket}.{endpoint}/{object_key}")
|
|
48
|
+
date = datetime.datetime.now(datetime.timezone.utc).strftime(
|
|
49
|
+
"%a, %d %b %Y %H:%M:%S GMT"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
headers = {
|
|
53
|
+
"Host": f"{bucket}.{endpoint}",
|
|
54
|
+
"Date": date,
|
|
55
|
+
"Content-Type": "application/octet-stream",
|
|
56
|
+
"Content-Length": str(file_content.getbuffer().nbytes),
|
|
57
|
+
"x-oss-security-token": security_token,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
headers["Authorization"] = sign_request(
|
|
61
|
+
"PUT", bucket, object_key, headers, access_key_id, access_key_secret
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
url = f"https://{bucket}.{endpoint}/{object_key}"
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
response = requests.put(url, headers=headers, data=file_content)
|
|
68
|
+
response.raise_for_status() # Raise an exception for bad status codes
|
|
69
|
+
logging.info(f"File '{object_key}' uploaded successfully.")
|
|
70
|
+
return url
|
|
71
|
+
except requests.exceptions.RequestException as e:
|
|
72
|
+
logging.error(f"Error uploading file: {e}")
|
|
73
|
+
if response is not None:
|
|
74
|
+
logging.error(f"Response content: {response.text}")
|
|
75
|
+
raise e
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def parse_upload_token(resp) -> dict:
|
|
79
|
+
logging.debug(f"parsing token resp: {resp}")
|
|
80
|
+
if "data" not in resp:
|
|
81
|
+
logging.error(f"Invalid response, data not found: {resp}")
|
|
82
|
+
raise ValueError(f"Invalid response: {resp}")
|
|
83
|
+
data = resp["data"]
|
|
84
|
+
if "file" not in data:
|
|
85
|
+
logging.error(f"Invalid response, file not found: {resp}")
|
|
86
|
+
raise ValueError(f"Invalid response: {resp}")
|
|
87
|
+
file = data["file"]
|
|
88
|
+
if "storage" not in data:
|
|
89
|
+
logging.error(f"Invalid response, storage not found: {resp}")
|
|
90
|
+
raise ValueError(f"Invalid response: {resp}")
|
|
91
|
+
storage = data["storage"]
|
|
92
|
+
return file | storage
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import io
|
|
2
|
+
|
|
3
|
+
import av
|
|
4
|
+
import torchaudio
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# Adapted from ComfyUI save_audio function
|
|
8
|
+
def save_audio(
|
|
9
|
+
audio,
|
|
10
|
+
filename_prefix="ComfyUI",
|
|
11
|
+
format="flac",
|
|
12
|
+
prompt=None,
|
|
13
|
+
extra_pnginfo=None,
|
|
14
|
+
quality="128k",
|
|
15
|
+
) -> io.BytesIO:
|
|
16
|
+
# Opus supported sample rates
|
|
17
|
+
OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
|
|
18
|
+
|
|
19
|
+
for batch_number, waveform in enumerate(audio["waveform"].cpu()):
|
|
20
|
+
# Use original sample rate initially
|
|
21
|
+
sample_rate = audio["sample_rate"]
|
|
22
|
+
|
|
23
|
+
# Handle Opus sample rate requirements
|
|
24
|
+
if format == "opus":
|
|
25
|
+
if sample_rate > 48000:
|
|
26
|
+
sample_rate = 48000
|
|
27
|
+
elif sample_rate not in OPUS_RATES:
|
|
28
|
+
# Find the next highest supported rate
|
|
29
|
+
for rate in sorted(OPUS_RATES):
|
|
30
|
+
if rate > sample_rate:
|
|
31
|
+
sample_rate = rate
|
|
32
|
+
break
|
|
33
|
+
if sample_rate not in OPUS_RATES: # Fallback if still not supported
|
|
34
|
+
sample_rate = 48000
|
|
35
|
+
|
|
36
|
+
# Resample if necessary
|
|
37
|
+
if sample_rate != audio["sample_rate"]:
|
|
38
|
+
waveform = torchaudio.functional.resample(
|
|
39
|
+
waveform, audio["sample_rate"], sample_rate
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# Create output with specified format
|
|
43
|
+
output_buffer = io.BytesIO()
|
|
44
|
+
output_container = av.open(output_buffer, mode="w", format=format)
|
|
45
|
+
|
|
46
|
+
# Set up the output stream with appropriate properties
|
|
47
|
+
if format == "opus":
|
|
48
|
+
out_stream = output_container.add_stream("libopus", rate=sample_rate)
|
|
49
|
+
if quality == "64k":
|
|
50
|
+
out_stream.bit_rate = 64000
|
|
51
|
+
elif quality == "96k":
|
|
52
|
+
out_stream.bit_rate = 96000
|
|
53
|
+
elif quality == "128k":
|
|
54
|
+
out_stream.bit_rate = 128000
|
|
55
|
+
elif quality == "192k":
|
|
56
|
+
out_stream.bit_rate = 192000
|
|
57
|
+
elif quality == "320k":
|
|
58
|
+
out_stream.bit_rate = 320000
|
|
59
|
+
elif format == "mp3":
|
|
60
|
+
out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
|
|
61
|
+
if quality == "V0":
|
|
62
|
+
# TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
|
|
63
|
+
out_stream.codec_context.qscale = 1
|
|
64
|
+
elif quality == "128k":
|
|
65
|
+
out_stream.bit_rate = 128000
|
|
66
|
+
elif quality == "320k":
|
|
67
|
+
out_stream.bit_rate = 320000
|
|
68
|
+
else: # format == "flac":
|
|
69
|
+
out_stream = output_container.add_stream("flac", rate=sample_rate)
|
|
70
|
+
|
|
71
|
+
frame = av.AudioFrame.from_ndarray(
|
|
72
|
+
waveform.movedim(0, 1).reshape(1, -1).float().numpy(),
|
|
73
|
+
format="flt",
|
|
74
|
+
layout="mono" if waveform.shape[0] == 1 else "stereo",
|
|
75
|
+
)
|
|
76
|
+
frame.sample_rate = sample_rate
|
|
77
|
+
frame.pts = 0
|
|
78
|
+
output_container.mux(out_stream.encode(frame))
|
|
79
|
+
|
|
80
|
+
# Flush encoder
|
|
81
|
+
output_container.mux(out_stream.encode(None))
|
|
82
|
+
|
|
83
|
+
# Close containers
|
|
84
|
+
output_container.close()
|
|
85
|
+
|
|
86
|
+
# Write the output to file
|
|
87
|
+
output_buffer.seek(0)
|
|
88
|
+
return output_buffer
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Coordinator - A lightweight AI application coordinator
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
__version__ = "0.1.0"
|
|
6
|
+
|
|
7
|
+
from .config import Config, LLMConfig
|
|
8
|
+
|
|
9
|
+
# Import main modules to make them available
|
|
10
|
+
from .coordinator import Coordinator
|
|
11
|
+
|
|
12
|
+
__all__ = ["Coordinator", "Config", "LLMConfig"]
|