bizyengine 1.2.68__py3-none-any.whl → 1.2.69__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,193 @@
1
+ from bizyairsdk import tensor_to_bytesio
2
+
3
+ from .trd_nodes_base import BizyAirTrdApiBaseNode
4
+
5
+
6
+ class Veo_V3_1_I2V_API(BizyAirTrdApiBaseNode):
7
+ @classmethod
8
+ def INPUT_TYPES(cls):
9
+ return {
10
+ "required": {
11
+ "prompt": (
12
+ "STRING",
13
+ {
14
+ "multiline": True,
15
+ "default": "",
16
+ },
17
+ ),
18
+ "first_frame_image": ("IMAGE", {"tooltip": "首帧图片"}),
19
+ "model": (["veo3.1-fast", "veo3.1-pro"], {"default": "veo3.1-fast"}),
20
+ },
21
+ "optional": {
22
+ "last_frame_image": ("IMAGE", {"tooltip": "尾帧图片"}),
23
+ "aspect_ratio": (
24
+ ["9:16", "16:9"],
25
+ {"default": "16:9"},
26
+ ),
27
+ },
28
+ }
29
+
30
+ NODE_DISPLAY_NAME = "Veo3.1 Image To Video"
31
+ RETURN_TYPES = ("VIDEO",)
32
+ RETURN_NAMES = ("video",)
33
+ CATEGORY = "☁️BizyAir/External APIs/Veo"
34
+
35
+ def handle_inputs(self, headers, prompt_id, **kwargs):
36
+ # 参数
37
+ model = kwargs.get("model", "veo3.1-fast")
38
+ prompt = kwargs.get("prompt", "")
39
+ aspect_ratio = kwargs.get("aspect_ratio", "16:9")
40
+ first_frame_image = kwargs.get("first_frame_image", None)
41
+ last_frame_image = kwargs.get("last_frame_image", None)
42
+
43
+ if prompt is None or prompt.strip() == "":
44
+ raise ValueError("Prompt is required")
45
+
46
+ # 上传图片
47
+ if first_frame_image is None:
48
+ raise ValueError("First frame image is required")
49
+ first_frame_image_url = self.upload_file(
50
+ tensor_to_bytesio(image=first_frame_image, total_pixels=4096 * 4096),
51
+ f"{prompt_id}_first.png",
52
+ headers,
53
+ )
54
+ data = {
55
+ "aspect_ratio": aspect_ratio,
56
+ "model": model,
57
+ "prompt": prompt,
58
+ "first_frame_image": first_frame_image_url,
59
+ }
60
+ if last_frame_image is not None:
61
+ last_frame_image_url = self.upload_file(
62
+ tensor_to_bytesio(image=last_frame_image, total_pixels=4096 * 4096),
63
+ f"{prompt_id}_last.png",
64
+ headers,
65
+ )
66
+ data["last_frame_image"] = last_frame_image_url
67
+
68
+ return data, "veo3.1"
69
+
70
+ def handle_outputs(self, outputs):
71
+ return (outputs[0][0],)
72
+
73
+
74
+ class Veo_V3_1_I2V_REF_API(BizyAirTrdApiBaseNode):
75
+ @classmethod
76
+ def INPUT_TYPES(cls):
77
+ return {
78
+ "required": {
79
+ "prompt": (
80
+ "STRING",
81
+ {
82
+ "multiline": True,
83
+ "default": "",
84
+ },
85
+ ),
86
+ "ref_image_1": ("IMAGE", {"tooltip": "参考图片1"}),
87
+ "model": (["veo3.1-fast"], {"default": "veo3.1-fast"}),
88
+ },
89
+ "optional": {
90
+ "ref_image_2": ("IMAGE", {"tooltip": "参考图片2"}),
91
+ "ref_image_3": ("IMAGE", {"tooltip": "参考图片3"}),
92
+ "aspect_ratio": (["16:9"], {"default": "16:9"}),
93
+ },
94
+ }
95
+
96
+ NODE_DISPLAY_NAME = "Veo3.1 Image To Video (Reference Images)"
97
+ RETURN_TYPES = ("VIDEO",)
98
+ RETURN_NAMES = ("video",)
99
+ CATEGORY = "☁️BizyAir/External APIs/Veo"
100
+ FUNCTION = "api_call"
101
+
102
+ def handle_inputs(self, headers, prompt_id, **kwargs):
103
+ # 参数
104
+ model = kwargs.get("model", "veo3.1-fast")
105
+ prompt = kwargs.get("prompt", "")
106
+ aspect_ratio = kwargs.get("aspect_ratio", "16:9")
107
+ ref_image_1 = kwargs.get("ref_image_1", None)
108
+ ref_image_2 = kwargs.get("ref_image_2", None)
109
+ ref_image_3 = kwargs.get("ref_image_3", None)
110
+
111
+ if prompt is None or prompt.strip() == "":
112
+ raise ValueError("Prompt is required")
113
+
114
+ # 上传图片
115
+ ref_images = []
116
+ if ref_image_1 is not None:
117
+ ref_image_1_url = self.upload_file(
118
+ tensor_to_bytesio(image=ref_image_1, total_pixels=4096 * 4096),
119
+ f"{prompt_id}_ref_1.png",
120
+ headers,
121
+ )
122
+ ref_images.append(ref_image_1_url)
123
+ if ref_image_2 is not None:
124
+ ref_image_2_url = self.upload_file(
125
+ tensor_to_bytesio(image=ref_image_2, total_pixels=4096 * 4096),
126
+ f"{prompt_id}_ref_2.png",
127
+ headers,
128
+ )
129
+ ref_images.append(ref_image_2_url)
130
+ if ref_image_3 is not None:
131
+ ref_image_3_url = self.upload_file(
132
+ tensor_to_bytesio(image=ref_image_3, total_pixels=4096 * 4096),
133
+ f"{prompt_id}_ref_3.png",
134
+ headers,
135
+ )
136
+ ref_images.append(ref_image_3_url)
137
+ if len(ref_images) == 0:
138
+ raise ValueError("At least one reference image is required")
139
+ data = {
140
+ "aspect_ratio": aspect_ratio,
141
+ "model": model,
142
+ "prompt": prompt,
143
+ "urls": ref_images,
144
+ }
145
+ return data, "veo3.1"
146
+
147
+ def handle_outputs(self, outputs):
148
+ return (outputs[0][0],)
149
+
150
+
151
+ class Veo_V3_1_T2V_API(BizyAirTrdApiBaseNode):
152
+ @classmethod
153
+ def INPUT_TYPES(cls):
154
+ return {
155
+ "required": {
156
+ "prompt": (
157
+ "STRING",
158
+ {
159
+ "multiline": True,
160
+ "default": "",
161
+ },
162
+ ),
163
+ "model": (["veo3.1-fast", "veo3.1-pro"], {"default": "veo3.1-fast"}),
164
+ },
165
+ "optional": {
166
+ "aspect_ratio": (
167
+ ["9:16", "16:9"],
168
+ {"default": "16:9"},
169
+ ),
170
+ },
171
+ }
172
+
173
+ NODE_DISPLAY_NAME = "Veo3.1 Text To Video"
174
+ RETURN_TYPES = ("VIDEO",)
175
+ RETURN_NAMES = ("video",)
176
+ CATEGORY = "☁️BizyAir/External APIs/Veo"
177
+
178
+ def handle_inputs(self, headers, prompt_id, **kwargs):
179
+ model = kwargs.get("model", "veo3.1-fast")
180
+ prompt = kwargs.get("prompt", "")
181
+ aspect_ratio = kwargs.get("aspect_ratio", "16:9")
182
+
183
+ if prompt is None or prompt.strip() == "":
184
+ raise ValueError("Prompt is required")
185
+ data = {
186
+ "aspect_ratio": aspect_ratio,
187
+ "model": model,
188
+ "prompt": prompt,
189
+ }
190
+ return data, "veo3.1"
191
+
192
+ def handle_outputs(self, outputs):
193
+ return (outputs[0][0],)
@@ -0,0 +1,198 @@
1
+ from bizyairsdk import tensor_to_bytesio
2
+
3
+ from bizyengine.bizyair_extras.utils.audio import save_audio
4
+
5
+ from .trd_nodes_base import BizyAirTrdApiBaseNode
6
+
7
+
8
+ class Wan_V2_5_I2V_API(BizyAirTrdApiBaseNode):
9
+ @classmethod
10
+ def INPUT_TYPES(cls):
11
+ return {
12
+ "required": {
13
+ "image": ("IMAGE",),
14
+ },
15
+ "optional": {
16
+ "audio": ("AUDIO",),
17
+ "prompt": (
18
+ "STRING",
19
+ {
20
+ "multiline": True,
21
+ "default": "",
22
+ },
23
+ ),
24
+ "negative_prompt": (
25
+ "STRING",
26
+ {
27
+ "multiline": True,
28
+ "default": "",
29
+ },
30
+ ),
31
+ "resolution": (
32
+ ["480P", "720P", "1080P"],
33
+ {"default": "1080P"},
34
+ ),
35
+ "duration": ([5, 10], {"default": 5}),
36
+ "prompt_extend": (
37
+ "BOOLEAN",
38
+ {
39
+ "default": True,
40
+ "tooltip": "是否开启prompt智能改写。开启后使用大模型对输入prompt进行智能改写。对于较短的prompt生成效果提升明显,但会增加耗时。",
41
+ },
42
+ ),
43
+ "auto_audio": (
44
+ "BOOLEAN",
45
+ {
46
+ "default": True,
47
+ "tooltip": "是否由模型自动生成声音,优先级低于audio参数。",
48
+ },
49
+ ),
50
+ },
51
+ }
52
+
53
+ NODE_DISPLAY_NAME = "Wan2.5 Image To Video"
54
+ RETURN_TYPES = ("VIDEO", "STRING")
55
+ RETURN_NAMES = ("video", "actual_prompt")
56
+ CATEGORY = "☁️BizyAir/External APIs/WanVideo"
57
+
58
+ def handle_inputs(self, headers, prompt_id, **kwargs):
59
+ # 参数
60
+ prompt = kwargs.get("prompt", "")
61
+ negative_prompt = kwargs.get("negative_prompt", "")
62
+ audio = kwargs.get("audio", None)
63
+ resolution = kwargs.get("resolution", "1080P")
64
+ duration = kwargs.get("duration", 5)
65
+ prompt_extend = kwargs.get("prompt_extend", True)
66
+ auto_audio = kwargs.get("auto_audio", True)
67
+ image = kwargs.get("image", None)
68
+
69
+ model = "wan2.5-i2v-preview"
70
+ input = {
71
+ "resolution": resolution,
72
+ "prompt_extend": prompt_extend,
73
+ "duration": duration,
74
+ "audio": auto_audio,
75
+ "model": model,
76
+ }
77
+ if prompt is not None and prompt.strip() != "":
78
+ input["prompt"] = prompt
79
+ if negative_prompt is not None and negative_prompt.strip() != "":
80
+ input["negative_prompt"] = negative_prompt
81
+
82
+ # 上传图片&音频
83
+ if image is not None:
84
+ image_url = self.upload_file(
85
+ tensor_to_bytesio(image=image, total_pixels=4096 * 4096),
86
+ f"{prompt_id}.png",
87
+ headers,
88
+ )
89
+ input["img_url"] = image_url
90
+ if audio is not None:
91
+ audio_url = self.upload_file(
92
+ save_audio(audio=audio, format="mp3"), f"{prompt_id}.mp3", headers
93
+ )
94
+ input["audio_url"] = audio_url
95
+
96
+ return input, model
97
+
98
+ def handle_outputs(self, outputs):
99
+ return (outputs[0][0],)
100
+
101
+
102
+ class Wan_V2_5_T2V_API(BizyAirTrdApiBaseNode):
103
+ @classmethod
104
+ def INPUT_TYPES(cls):
105
+ return {
106
+ "required": {
107
+ "prompt": (
108
+ "STRING",
109
+ {
110
+ "multiline": True,
111
+ "default": "",
112
+ },
113
+ ),
114
+ },
115
+ "optional": {
116
+ "audio": ("AUDIO",),
117
+ "negative_prompt": (
118
+ "STRING",
119
+ {
120
+ "multiline": True,
121
+ "default": "",
122
+ },
123
+ ),
124
+ "size": (
125
+ [
126
+ "832*480",
127
+ "480*832",
128
+ "624*624",
129
+ "1280*720",
130
+ "720*1280",
131
+ "960*960",
132
+ "1088*832",
133
+ "832*1088",
134
+ "1920*1080",
135
+ "1080*1920",
136
+ "1440*1440",
137
+ "1632*1248",
138
+ "1248*1632",
139
+ ],
140
+ {"default": "1920*1080"},
141
+ ),
142
+ "duration": ([5, 10], {"default": 5}),
143
+ "prompt_extend": (
144
+ "BOOLEAN",
145
+ {
146
+ "default": True,
147
+ "tooltip": "是否开启prompt智能改写。开启后使用大模型对输入prompt进行智能改写。对于较短的prompt生成效果提升明显,但会增加耗时。",
148
+ },
149
+ ),
150
+ "auto_audio": (
151
+ "BOOLEAN",
152
+ {
153
+ "default": True,
154
+ "tooltip": "是否由模型自动生成声音,优先级低于audio参数。",
155
+ },
156
+ ),
157
+ },
158
+ }
159
+
160
+ NODE_DISPLAY_NAME = "Wan2.5 Text To Video"
161
+ RETURN_TYPES = ("VIDEO", "STRING")
162
+ RETURN_NAMES = ("video", "actual_prompt")
163
+ CATEGORY = "☁️BizyAir/External APIs/WanVideo"
164
+
165
+ def handle_inputs(self, headers, prompt_id, **kwargs):
166
+ # 参数
167
+ model = "wan2.5-t2v-preview"
168
+ negative_prompt = kwargs.get("negative_prompt", "")
169
+ audio = kwargs.get("audio", None)
170
+ size = kwargs.get("size", "1920*1080")
171
+ duration = kwargs.get("duration", 5)
172
+ prompt_extend = kwargs.get("prompt_extend", True)
173
+ auto_audio = kwargs.get("auto_audio", True)
174
+ prompt = kwargs.get("prompt", "")
175
+
176
+ input = {
177
+ "size": size,
178
+ "prompt_extend": prompt_extend,
179
+ "duration": duration,
180
+ "audio": auto_audio,
181
+ "model": model,
182
+ }
183
+ if prompt is not None and prompt.strip() != "":
184
+ input["prompt"] = prompt
185
+ if negative_prompt is not None and negative_prompt.strip() != "":
186
+ input["negative_prompt"] = negative_prompt
187
+
188
+ # 上传音频
189
+ if audio is not None:
190
+ audio_url = self.upload_file(
191
+ save_audio(audio=audio, format="mp3"), f"{prompt_id}.mp3", headers
192
+ )
193
+ input["audio_url"] = audio_url
194
+
195
+ return input, model
196
+
197
+ def handle_outputs(self, outputs):
198
+ return (outputs[0][0],)
@@ -0,0 +1,182 @@
1
+ import abc
2
+ import io
3
+ import json
4
+ import logging
5
+ import time
6
+ from typing import List, Tuple
7
+
8
+ import requests
9
+ import torch
10
+ from bizyairsdk import bytesio_to_image_tensor, common_upscale
11
+ from comfy_api.latest._input_impl import VideoFromFile
12
+
13
+ from bizyengine.core import (
14
+ BizyAirMiscBaseNode,
15
+ pop_api_key_and_prompt_id,
16
+ register_node,
17
+ )
18
+ from bizyengine.core.common import client
19
+ from bizyengine.core.common.client import send_request
20
+ from bizyengine.core.common.env_var import BIZYAIR_X_SERVER
21
+ from bizyengine.core.nodes_base import PREFIX
22
+
23
+ from ..utils.aliyun_oss import parse_upload_token, upload_file_without_sdk
24
+
25
+
26
+ class TrdBase(abc.ABC):
27
+ @abc.abstractmethod
28
+ # Return: data, model, prompt
29
+ def handle_inputs(self, headers, prompt_id, **kwargs) -> Tuple[dict, str]:
30
+ pass
31
+
32
+ @abc.abstractmethod
33
+ # Return: videos, images, texts
34
+ def handle_outputs(
35
+ self, outputs: Tuple[List[VideoFromFile], List[torch.Tensor], List[str]]
36
+ ) -> Tuple:
37
+ pass
38
+
39
+
40
+ class BizyAirTrdApiBaseNode(BizyAirMiscBaseNode, TrdBase):
41
+ FUNCTION = "api_call"
42
+ OUTPUT_NODE = False
43
+
44
+ def __init_subclass__(cls, **kwargs):
45
+ register_node(cls, PREFIX)
46
+
47
+ def api_call(self, **kwargs):
48
+ extra_data = pop_api_key_and_prompt_id(kwargs)
49
+ headers = client.headers(api_key=extra_data["api_key"])
50
+ prompt_id = extra_data["prompt_id"]
51
+ headers["X-BIZYAIR-PROMPT-ID"] = prompt_id
52
+
53
+ data, model = self.handle_inputs(headers, prompt_id, **kwargs)
54
+ outputs = self.create_task_and_wait_for_completion(data, model, headers)
55
+ return self.handle_outputs(outputs)
56
+
57
+ def create_task_and_wait_for_completion(
58
+ self, data, model, headers
59
+ ) -> Tuple[List[VideoFromFile], List[torch.Tensor], List[str]]:
60
+ # 创建任务
61
+ create_task_url = f"{BIZYAIR_X_SERVER}/trd_api/{model}"
62
+ json_payload = json.dumps(data).encode("utf-8")
63
+ logging.debug(f"json_payload: {json_payload}")
64
+ create_api_resp = send_request(
65
+ url=create_task_url,
66
+ data=json_payload,
67
+ headers=headers,
68
+ )
69
+ logging.debug(
70
+ f"{self.NODE_DISPLAY_NAME} create task api resp: {create_api_resp}"
71
+ )
72
+
73
+ # 检查任务创建是否成功
74
+ if "data" not in create_api_resp or "request_id" not in create_api_resp["data"]:
75
+ raise ValueError(f"Invalid response: {create_api_resp}")
76
+
77
+ # 轮询获取结果,最多等待1小时
78
+ request_id = create_api_resp["data"]["request_id"]
79
+ logging.info(f"{self.NODE_DISPLAY_NAME} task created, request_id: {request_id}")
80
+ start_time = time.time()
81
+ status_url = f"{BIZYAIR_X_SERVER}/trd_api/{request_id}"
82
+ while time.time() - start_time < 3600:
83
+ time.sleep(10)
84
+ try:
85
+ status_api_resp = send_request(
86
+ method="GET",
87
+ url=status_url,
88
+ headers=headers,
89
+ )
90
+ except Exception as e:
91
+ logging.error(
92
+ f"{self.NODE_DISPLAY_NAME} task {request_id} status api error: {e}"
93
+ )
94
+ continue
95
+
96
+ if "data" not in status_api_resp:
97
+ logging.error(
98
+ f"{self.NODE_DISPLAY_NAME} task {request_id} status api resp no data: {status_api_resp}"
99
+ )
100
+ continue
101
+ if "status" not in status_api_resp["data"]:
102
+ logging.error(
103
+ f"{self.NODE_DISPLAY_NAME} task {request_id} status api resp no status: {status_api_resp}"
104
+ )
105
+ continue
106
+ status = status_api_resp["data"]["status"]
107
+ if status == "failed":
108
+ raise ValueError(
109
+ f"{self.NODE_DISPLAY_NAME} task {request_id} failed: {status_api_resp}"
110
+ )
111
+ if status == "running":
112
+ continue
113
+
114
+ # 成功,获取输出结果
115
+ if "outputs" not in status_api_resp["data"]:
116
+ raise ValueError(
117
+ f"{self.NODE_DISPLAY_NAME} task {request_id} no outputs: {status_api_resp}"
118
+ )
119
+ logging.info(
120
+ f"{self.NODE_DISPLAY_NAME} task {request_id} success: {status_api_resp}"
121
+ )
122
+ # 分别处理视频、图片、文本
123
+ videos = []
124
+ images = []
125
+ texts = []
126
+ outputs = status_api_resp["data"]["outputs"]
127
+ try:
128
+ if "videos" in outputs:
129
+ for video_url in outputs["videos"]:
130
+ video_resp = requests.get(video_url, stream=True, timeout=3600)
131
+ video_resp.raise_for_status() # 非 2xx 会抛异常
132
+ videos.append(VideoFromFile(io.BytesIO(video_resp.content)))
133
+ if "images" in outputs:
134
+ for image_url in outputs["images"]:
135
+ image_resp = requests.get(image_url, stream=True, timeout=3600)
136
+ image_resp.raise_for_status() # 非 2xx 会抛异常
137
+ images.append(
138
+ bytesio_to_image_tensor(io.BytesIO(image_resp.content))
139
+ )
140
+ if "texts" in outputs:
141
+ for text in outputs["texts"]:
142
+ texts.append(text)
143
+ except Exception as e:
144
+ logging.error(
145
+ f"{self.NODE_DISPLAY_NAME} task {request_id} handle outputs error: {e}"
146
+ )
147
+ raise ValueError(
148
+ f"{self.NODE_DISPLAY_NAME} task {request_id} handle outputs error: {e}, please download the outputs manually, outputs: {outputs}"
149
+ )
150
+
151
+ return (videos, images, texts)
152
+
153
+ raise ValueError(
154
+ f"{self.NODE_DISPLAY_NAME} task timed out, request ID: {request_id}"
155
+ )
156
+
157
+ def upload_file(self, bytes, file_name, headers):
158
+ oss_token_url = (
159
+ f"{BIZYAIR_X_SERVER}/upload/token?file_name={file_name}&file_type=inputs"
160
+ )
161
+ token_resp = send_request("GET", oss_token_url, headers=headers)
162
+ auth_info = parse_upload_token(token_resp)
163
+ return upload_file_without_sdk(file_content=bytes, **auth_info)
164
+
165
+ def combine_images(self, images: List[torch.Tensor]) -> torch.Tensor:
166
+ s = None
167
+ if images is not None and len(images) > 0:
168
+ for _, image in enumerate(images):
169
+ if s is None:
170
+ s = image
171
+ else:
172
+ # ComfyUI BatchImage logic
173
+ if s.shape[1:] != image.shape[1:]:
174
+ image = common_upscale(
175
+ image.movedim(-1, 1),
176
+ s.shape[2],
177
+ image.shape[1],
178
+ "bilinear",
179
+ "center",
180
+ ).movedim(1, -1)
181
+ s = torch.cat((s, image), dim=0)
182
+ return s
@@ -6,5 +6,6 @@ from bizyengine.core.nodes_base import (
6
6
  BizyAirBaseNode,
7
7
  BizyAirMiscBaseNode,
8
8
  pop_api_key_and_prompt_id,
9
+ register_node,
9
10
  )
10
11
  from bizyengine.core.nodes_io import BizyAirNodeIO, create_node_data
bizyengine/version.txt CHANGED
@@ -1 +1 @@
1
- 1.2.68
1
+ 1.2.69
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: bizyengine
3
- Version: 1.2.68
3
+ Version: 1.2.69
4
4
  Summary: [a/BizyAir](https://github.com/siliconflow/BizyAir) Comfy Nodes that can run in any environment.
5
5
  Author-email: SiliconFlow <yaochi@siliconflow.cn>
6
6
  Project-URL: Repository, https://github.com/siliconflow/BizyAir
@@ -14,7 +14,7 @@ Requires-Dist: inputimeout
14
14
  Requires-Dist: openai>=1.77.0
15
15
  Requires-Dist: pycryptodome
16
16
  Requires-Dist: mcp>=1.18.0
17
- Requires-Dist: bizyairsdk>=0.1.3
17
+ Requires-Dist: bizyairsdk>=0.1.5
18
18
 
19
19
  ## BizyEngine
20
20