bizyengine 1.2.45__py3-none-any.whl → 1.2.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bizyengine/bizy_server/errno.py +21 -0
- bizyengine/bizy_server/server.py +130 -160
- bizyengine/bizy_server/utils.py +3 -0
- bizyengine/bizyair_extras/__init__.py +38 -31
- bizyengine/bizyair_extras/third_party_api/__init__.py +15 -0
- bizyengine/bizyair_extras/third_party_api/nodes_doubao.py +535 -0
- bizyengine/bizyair_extras/third_party_api/nodes_flux.py +173 -0
- bizyengine/bizyair_extras/third_party_api/nodes_gemini.py +403 -0
- bizyengine/bizyair_extras/third_party_api/nodes_gpt.py +101 -0
- bizyengine/bizyair_extras/third_party_api/nodes_hailuo.py +115 -0
- bizyengine/bizyair_extras/third_party_api/nodes_kling.py +404 -0
- bizyengine/bizyair_extras/third_party_api/nodes_sora.py +218 -0
- bizyengine/bizyair_extras/third_party_api/nodes_veo3.py +193 -0
- bizyengine/bizyair_extras/third_party_api/nodes_wan_api.py +198 -0
- bizyengine/bizyair_extras/third_party_api/trd_nodes_base.py +183 -0
- bizyengine/bizyair_extras/utils/aliyun_oss.py +92 -0
- bizyengine/bizyair_extras/utils/audio.py +88 -0
- bizyengine/bizybot/__init__.py +12 -0
- bizyengine/bizybot/client.py +774 -0
- bizyengine/bizybot/config.py +129 -0
- bizyengine/bizybot/coordinator.py +556 -0
- bizyengine/bizybot/exceptions.py +186 -0
- bizyengine/bizybot/mcp/__init__.py +3 -0
- bizyengine/bizybot/mcp/manager.py +520 -0
- bizyengine/bizybot/mcp/models.py +46 -0
- bizyengine/bizybot/mcp/registry.py +129 -0
- bizyengine/bizybot/mcp/routing.py +378 -0
- bizyengine/bizybot/models.py +344 -0
- bizyengine/core/__init__.py +1 -0
- bizyengine/core/commands/servers/prompt_server.py +10 -1
- bizyengine/core/common/client.py +8 -7
- bizyengine/core/common/utils.py +30 -1
- bizyengine/core/image_utils.py +12 -283
- bizyengine/misc/llm.py +32 -15
- bizyengine/misc/utils.py +179 -2
- bizyengine/version.txt +1 -1
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/METADATA +3 -1
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/RECORD +40 -16
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/WHEEL +0 -0
- {bizyengine-1.2.45.dist-info → bizyengine-1.2.71.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
from bizyairsdk import tensor_to_bytesio
|
|
2
|
+
|
|
3
|
+
from .trd_nodes_base import BizyAirTrdApiBaseNode
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Sora_V2_I2V_API(BizyAirTrdApiBaseNode):
|
|
7
|
+
NODE_DISPLAY_NAME = "Sora2 Image To Video"
|
|
8
|
+
RETURN_TYPES = ("VIDEO",)
|
|
9
|
+
RETURN_NAMES = ("video",)
|
|
10
|
+
CATEGORY = "☁️BizyAir/External APIs/Sora"
|
|
11
|
+
|
|
12
|
+
@classmethod
|
|
13
|
+
def INPUT_TYPES(cls):
|
|
14
|
+
return {
|
|
15
|
+
"required": {
|
|
16
|
+
"prompt": (
|
|
17
|
+
"STRING",
|
|
18
|
+
{
|
|
19
|
+
"multiline": True,
|
|
20
|
+
"default": "",
|
|
21
|
+
},
|
|
22
|
+
),
|
|
23
|
+
"image": ("IMAGE", {"tooltip": "首帧图片"}),
|
|
24
|
+
"model": (["sora-2", "sora-2-pro"], {"default": "sora-2"}),
|
|
25
|
+
},
|
|
26
|
+
"optional": {
|
|
27
|
+
"aspect_ratio": (
|
|
28
|
+
["9:16", "16:9"],
|
|
29
|
+
{"default": "16:9"},
|
|
30
|
+
),
|
|
31
|
+
"duration": ([10, 15], {"default": 10}),
|
|
32
|
+
"size": (["small", "large"], {"default": "small"}),
|
|
33
|
+
},
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
37
|
+
# 参数
|
|
38
|
+
aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
39
|
+
duration = kwargs.get("duration", 10)
|
|
40
|
+
size = kwargs.get("size", "small")
|
|
41
|
+
model = kwargs.get("model", "sora-2")
|
|
42
|
+
prompt = kwargs.get("prompt", "")
|
|
43
|
+
image = kwargs.get("image", None)
|
|
44
|
+
if image is None:
|
|
45
|
+
raise ValueError("Image is required")
|
|
46
|
+
# 上传图片
|
|
47
|
+
url = self.upload_file(
|
|
48
|
+
tensor_to_bytesio(image=image, total_pixels=4096 * 4096),
|
|
49
|
+
f"{prompt_id}.png",
|
|
50
|
+
headers,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
data = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"aspect_ratio": aspect_ratio,
|
|
56
|
+
"duration": duration,
|
|
57
|
+
"size": size,
|
|
58
|
+
"prompt": prompt,
|
|
59
|
+
"url": url,
|
|
60
|
+
}
|
|
61
|
+
return data, model
|
|
62
|
+
|
|
63
|
+
def handle_outputs(self, outputs):
|
|
64
|
+
return (outputs[0][0],)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class Sora_V2_T2V_API(BizyAirTrdApiBaseNode):
|
|
68
|
+
NODE_DISPLAY_NAME = "Sora2 Text To Video"
|
|
69
|
+
RETURN_TYPES = ("VIDEO",)
|
|
70
|
+
RETURN_NAMES = ("video",)
|
|
71
|
+
CATEGORY = "☁️BizyAir/External APIs/Sora"
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def INPUT_TYPES(cls):
|
|
75
|
+
return {
|
|
76
|
+
"required": {
|
|
77
|
+
"prompt": (
|
|
78
|
+
"STRING",
|
|
79
|
+
{
|
|
80
|
+
"multiline": True,
|
|
81
|
+
"default": "",
|
|
82
|
+
},
|
|
83
|
+
),
|
|
84
|
+
"model": (["sora-2", "sora-2-pro"], {"default": "sora-2"}),
|
|
85
|
+
},
|
|
86
|
+
"optional": {
|
|
87
|
+
"aspect_ratio": (
|
|
88
|
+
["9:16", "16:9"],
|
|
89
|
+
{"default": "16:9"},
|
|
90
|
+
),
|
|
91
|
+
"duration": ([10, 15], {"default": 10}),
|
|
92
|
+
"size": (["small", "large"], {"default": "small"}),
|
|
93
|
+
},
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
97
|
+
model = kwargs.get("model", "sora-2")
|
|
98
|
+
duration = kwargs.get("duration", 10)
|
|
99
|
+
aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
100
|
+
size = kwargs.get("size", "small")
|
|
101
|
+
prompt = kwargs.get("prompt", "")
|
|
102
|
+
data = {
|
|
103
|
+
"model": model,
|
|
104
|
+
"duration": duration,
|
|
105
|
+
"size": size,
|
|
106
|
+
"prompt": prompt,
|
|
107
|
+
"aspect_ratio": aspect_ratio,
|
|
108
|
+
}
|
|
109
|
+
return data, model
|
|
110
|
+
|
|
111
|
+
def handle_outputs(self, outputs):
|
|
112
|
+
return (outputs[0][0],)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# class Sora_V2_PRO_I2V_API(BizyAirTrdApiBaseNode):
|
|
116
|
+
# NODE_DISPLAY_NAME = "Sora2 Pro Image To Video"
|
|
117
|
+
# RETURN_TYPES = ("VIDEO",)
|
|
118
|
+
# RETURN_NAMES = ("video",)
|
|
119
|
+
# CATEGORY = "☁️BizyAir/External APIs/Sora"
|
|
120
|
+
|
|
121
|
+
# @classmethod
|
|
122
|
+
# def INPUT_TYPES(cls):
|
|
123
|
+
# return {
|
|
124
|
+
# "required": {
|
|
125
|
+
# "prompt": (
|
|
126
|
+
# "STRING",
|
|
127
|
+
# {
|
|
128
|
+
# "multiline": True,
|
|
129
|
+
# "default": "",
|
|
130
|
+
# },
|
|
131
|
+
# ),
|
|
132
|
+
# "image": ("IMAGE", {"tooltip": "首帧图片"}),
|
|
133
|
+
# "model": (["sora-2-pro"], {"default": "sora-2-pro"}),
|
|
134
|
+
# },
|
|
135
|
+
# "optional": {
|
|
136
|
+
# "aspect_ratio": (
|
|
137
|
+
# ["9:16", "16:9"],
|
|
138
|
+
# {"default": "16:9"},
|
|
139
|
+
# ),
|
|
140
|
+
# "duration": ([10, 15, 25], {"default": 10}),
|
|
141
|
+
# "size": (["standard", "high"], {"default": "standard"}),
|
|
142
|
+
# },
|
|
143
|
+
# }
|
|
144
|
+
|
|
145
|
+
# def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
146
|
+
# # 参数
|
|
147
|
+
# aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
148
|
+
# duration = kwargs.get("duration", 10)
|
|
149
|
+
# size = kwargs.get("size", "standard")
|
|
150
|
+
# model = kwargs.get("model", "sora-2-pro")
|
|
151
|
+
# prompt = kwargs.get("prompt", "")
|
|
152
|
+
# image = kwargs.get("image", None)
|
|
153
|
+
# if image is None:
|
|
154
|
+
# raise ValueError("Image is required")
|
|
155
|
+
# # 上传图片
|
|
156
|
+
# url = self.upload_file(
|
|
157
|
+
# tensor_to_bytesio(image=image, total_pixels=4096 * 4096),
|
|
158
|
+
# f"{prompt_id}.png",
|
|
159
|
+
# headers,
|
|
160
|
+
# )
|
|
161
|
+
|
|
162
|
+
# data = {
|
|
163
|
+
# "model": model,
|
|
164
|
+
# "aspect_ratio": aspect_ratio,
|
|
165
|
+
# "duration": duration,
|
|
166
|
+
# "size": size,
|
|
167
|
+
# "prompt": prompt,
|
|
168
|
+
# "url": url,
|
|
169
|
+
# }
|
|
170
|
+
# return data, model
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
# class Sora_V2_PRO_T2V_API(BizyAirTrdApiBaseNode):
|
|
174
|
+
# NODE_DISPLAY_NAME = "Sora2 Pro Text To Video"
|
|
175
|
+
# RETURN_TYPES = ("VIDEO",)
|
|
176
|
+
# RETURN_NAMES = ("video",)
|
|
177
|
+
# CATEGORY = "☁️BizyAir/External APIs/Sora"
|
|
178
|
+
|
|
179
|
+
# @classmethod
|
|
180
|
+
# def INPUT_TYPES(cls):
|
|
181
|
+
# return {
|
|
182
|
+
# "required": {
|
|
183
|
+
# "prompt": (
|
|
184
|
+
# "STRING",
|
|
185
|
+
# {
|
|
186
|
+
# "multiline": True,
|
|
187
|
+
# "default": "",
|
|
188
|
+
# },
|
|
189
|
+
# ),
|
|
190
|
+
# "model": (["sora-2-pro"], {"default": "sora-2-pro"}),
|
|
191
|
+
# },
|
|
192
|
+
# "optional": {
|
|
193
|
+
# "aspect_ratio": (
|
|
194
|
+
# ["9:16", "16:9"],
|
|
195
|
+
# {"default": "16:9"},
|
|
196
|
+
# ),
|
|
197
|
+
# "duration": ([10, 15, 25], {"default": 10}),
|
|
198
|
+
# "size": (["standard", "high"], {"default": "standard"}),
|
|
199
|
+
# },
|
|
200
|
+
# }
|
|
201
|
+
|
|
202
|
+
# def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
203
|
+
# model = kwargs.get("model", "sora-2-pro")
|
|
204
|
+
# duration = kwargs.get("duration", 10)
|
|
205
|
+
# aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
206
|
+
# size = kwargs.get("size", "standard")
|
|
207
|
+
# prompt = kwargs.get("prompt", "")
|
|
208
|
+
# data = {
|
|
209
|
+
# "model": model,
|
|
210
|
+
# "duration": duration,
|
|
211
|
+
# "size": size,
|
|
212
|
+
# "prompt": prompt,
|
|
213
|
+
# "aspect_ratio": aspect_ratio,
|
|
214
|
+
# }
|
|
215
|
+
# return data, model
|
|
216
|
+
|
|
217
|
+
# def handle_outputs(self, outputs):
|
|
218
|
+
# return (outputs[0][0],)
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
from bizyairsdk import tensor_to_bytesio
|
|
2
|
+
|
|
3
|
+
from .trd_nodes_base import BizyAirTrdApiBaseNode
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Veo_V3_1_I2V_API(BizyAirTrdApiBaseNode):
|
|
7
|
+
@classmethod
|
|
8
|
+
def INPUT_TYPES(cls):
|
|
9
|
+
return {
|
|
10
|
+
"required": {
|
|
11
|
+
"prompt": (
|
|
12
|
+
"STRING",
|
|
13
|
+
{
|
|
14
|
+
"multiline": True,
|
|
15
|
+
"default": "",
|
|
16
|
+
},
|
|
17
|
+
),
|
|
18
|
+
"first_frame_image": ("IMAGE", {"tooltip": "首帧图片"}),
|
|
19
|
+
"model": (["veo3.1-fast", "veo3.1-pro"], {"default": "veo3.1-fast"}),
|
|
20
|
+
},
|
|
21
|
+
"optional": {
|
|
22
|
+
"last_frame_image": ("IMAGE", {"tooltip": "尾帧图片"}),
|
|
23
|
+
"aspect_ratio": (
|
|
24
|
+
["9:16", "16:9"],
|
|
25
|
+
{"default": "16:9"},
|
|
26
|
+
),
|
|
27
|
+
},
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
NODE_DISPLAY_NAME = "Veo3.1 Image To Video"
|
|
31
|
+
RETURN_TYPES = ("VIDEO",)
|
|
32
|
+
RETURN_NAMES = ("video",)
|
|
33
|
+
CATEGORY = "☁️BizyAir/External APIs/Veo"
|
|
34
|
+
|
|
35
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
36
|
+
# 参数
|
|
37
|
+
model = kwargs.get("model", "veo3.1-fast")
|
|
38
|
+
prompt = kwargs.get("prompt", "")
|
|
39
|
+
aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
40
|
+
first_frame_image = kwargs.get("first_frame_image", None)
|
|
41
|
+
last_frame_image = kwargs.get("last_frame_image", None)
|
|
42
|
+
|
|
43
|
+
if prompt is None or prompt.strip() == "":
|
|
44
|
+
raise ValueError("Prompt is required")
|
|
45
|
+
|
|
46
|
+
# 上传图片
|
|
47
|
+
if first_frame_image is None:
|
|
48
|
+
raise ValueError("First frame image is required")
|
|
49
|
+
first_frame_image_url = self.upload_file(
|
|
50
|
+
tensor_to_bytesio(image=first_frame_image, total_pixels=4096 * 4096),
|
|
51
|
+
f"{prompt_id}_first.png",
|
|
52
|
+
headers,
|
|
53
|
+
)
|
|
54
|
+
data = {
|
|
55
|
+
"aspect_ratio": aspect_ratio,
|
|
56
|
+
"model": model,
|
|
57
|
+
"prompt": prompt,
|
|
58
|
+
"first_frame_image": first_frame_image_url,
|
|
59
|
+
}
|
|
60
|
+
if last_frame_image is not None:
|
|
61
|
+
last_frame_image_url = self.upload_file(
|
|
62
|
+
tensor_to_bytesio(image=last_frame_image, total_pixels=4096 * 4096),
|
|
63
|
+
f"{prompt_id}_last.png",
|
|
64
|
+
headers,
|
|
65
|
+
)
|
|
66
|
+
data["last_frame_image"] = last_frame_image_url
|
|
67
|
+
|
|
68
|
+
return data, "veo3.1"
|
|
69
|
+
|
|
70
|
+
def handle_outputs(self, outputs):
|
|
71
|
+
return (outputs[0][0],)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class Veo_V3_1_I2V_REF_API(BizyAirTrdApiBaseNode):
|
|
75
|
+
@classmethod
|
|
76
|
+
def INPUT_TYPES(cls):
|
|
77
|
+
return {
|
|
78
|
+
"required": {
|
|
79
|
+
"prompt": (
|
|
80
|
+
"STRING",
|
|
81
|
+
{
|
|
82
|
+
"multiline": True,
|
|
83
|
+
"default": "",
|
|
84
|
+
},
|
|
85
|
+
),
|
|
86
|
+
"ref_image_1": ("IMAGE", {"tooltip": "参考图片1"}),
|
|
87
|
+
"model": (["veo3.1-fast"], {"default": "veo3.1-fast"}),
|
|
88
|
+
},
|
|
89
|
+
"optional": {
|
|
90
|
+
"ref_image_2": ("IMAGE", {"tooltip": "参考图片2"}),
|
|
91
|
+
"ref_image_3": ("IMAGE", {"tooltip": "参考图片3"}),
|
|
92
|
+
"aspect_ratio": (["16:9"], {"default": "16:9"}),
|
|
93
|
+
},
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
NODE_DISPLAY_NAME = "Veo3.1 Image To Video (Reference Images)"
|
|
97
|
+
RETURN_TYPES = ("VIDEO",)
|
|
98
|
+
RETURN_NAMES = ("video",)
|
|
99
|
+
CATEGORY = "☁️BizyAir/External APIs/Veo"
|
|
100
|
+
FUNCTION = "api_call"
|
|
101
|
+
|
|
102
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
103
|
+
# 参数
|
|
104
|
+
model = kwargs.get("model", "veo3.1-fast")
|
|
105
|
+
prompt = kwargs.get("prompt", "")
|
|
106
|
+
aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
107
|
+
ref_image_1 = kwargs.get("ref_image_1", None)
|
|
108
|
+
ref_image_2 = kwargs.get("ref_image_2", None)
|
|
109
|
+
ref_image_3 = kwargs.get("ref_image_3", None)
|
|
110
|
+
|
|
111
|
+
if prompt is None or prompt.strip() == "":
|
|
112
|
+
raise ValueError("Prompt is required")
|
|
113
|
+
|
|
114
|
+
# 上传图片
|
|
115
|
+
ref_images = []
|
|
116
|
+
if ref_image_1 is not None:
|
|
117
|
+
ref_image_1_url = self.upload_file(
|
|
118
|
+
tensor_to_bytesio(image=ref_image_1, total_pixels=4096 * 4096),
|
|
119
|
+
f"{prompt_id}_ref_1.png",
|
|
120
|
+
headers,
|
|
121
|
+
)
|
|
122
|
+
ref_images.append(ref_image_1_url)
|
|
123
|
+
if ref_image_2 is not None:
|
|
124
|
+
ref_image_2_url = self.upload_file(
|
|
125
|
+
tensor_to_bytesio(image=ref_image_2, total_pixels=4096 * 4096),
|
|
126
|
+
f"{prompt_id}_ref_2.png",
|
|
127
|
+
headers,
|
|
128
|
+
)
|
|
129
|
+
ref_images.append(ref_image_2_url)
|
|
130
|
+
if ref_image_3 is not None:
|
|
131
|
+
ref_image_3_url = self.upload_file(
|
|
132
|
+
tensor_to_bytesio(image=ref_image_3, total_pixels=4096 * 4096),
|
|
133
|
+
f"{prompt_id}_ref_3.png",
|
|
134
|
+
headers,
|
|
135
|
+
)
|
|
136
|
+
ref_images.append(ref_image_3_url)
|
|
137
|
+
if len(ref_images) == 0:
|
|
138
|
+
raise ValueError("At least one reference image is required")
|
|
139
|
+
data = {
|
|
140
|
+
"aspect_ratio": aspect_ratio,
|
|
141
|
+
"model": model,
|
|
142
|
+
"prompt": prompt,
|
|
143
|
+
"urls": ref_images,
|
|
144
|
+
}
|
|
145
|
+
return data, "veo3.1"
|
|
146
|
+
|
|
147
|
+
def handle_outputs(self, outputs):
|
|
148
|
+
return (outputs[0][0],)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class Veo_V3_1_T2V_API(BizyAirTrdApiBaseNode):
|
|
152
|
+
@classmethod
|
|
153
|
+
def INPUT_TYPES(cls):
|
|
154
|
+
return {
|
|
155
|
+
"required": {
|
|
156
|
+
"prompt": (
|
|
157
|
+
"STRING",
|
|
158
|
+
{
|
|
159
|
+
"multiline": True,
|
|
160
|
+
"default": "",
|
|
161
|
+
},
|
|
162
|
+
),
|
|
163
|
+
"model": (["veo3.1-fast", "veo3.1-pro"], {"default": "veo3.1-fast"}),
|
|
164
|
+
},
|
|
165
|
+
"optional": {
|
|
166
|
+
"aspect_ratio": (
|
|
167
|
+
["9:16", "16:9"],
|
|
168
|
+
{"default": "16:9"},
|
|
169
|
+
),
|
|
170
|
+
},
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
NODE_DISPLAY_NAME = "Veo3.1 Text To Video"
|
|
174
|
+
RETURN_TYPES = ("VIDEO",)
|
|
175
|
+
RETURN_NAMES = ("video",)
|
|
176
|
+
CATEGORY = "☁️BizyAir/External APIs/Veo"
|
|
177
|
+
|
|
178
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
179
|
+
model = kwargs.get("model", "veo3.1-fast")
|
|
180
|
+
prompt = kwargs.get("prompt", "")
|
|
181
|
+
aspect_ratio = kwargs.get("aspect_ratio", "16:9")
|
|
182
|
+
|
|
183
|
+
if prompt is None or prompt.strip() == "":
|
|
184
|
+
raise ValueError("Prompt is required")
|
|
185
|
+
data = {
|
|
186
|
+
"aspect_ratio": aspect_ratio,
|
|
187
|
+
"model": model,
|
|
188
|
+
"prompt": prompt,
|
|
189
|
+
}
|
|
190
|
+
return data, "veo3.1"
|
|
191
|
+
|
|
192
|
+
def handle_outputs(self, outputs):
|
|
193
|
+
return (outputs[0][0],)
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
from bizyairsdk import tensor_to_bytesio
|
|
2
|
+
|
|
3
|
+
from bizyengine.bizyair_extras.utils.audio import save_audio
|
|
4
|
+
|
|
5
|
+
from .trd_nodes_base import BizyAirTrdApiBaseNode
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Wan_V2_5_I2V_API(BizyAirTrdApiBaseNode):
|
|
9
|
+
@classmethod
|
|
10
|
+
def INPUT_TYPES(cls):
|
|
11
|
+
return {
|
|
12
|
+
"required": {
|
|
13
|
+
"image": ("IMAGE",),
|
|
14
|
+
},
|
|
15
|
+
"optional": {
|
|
16
|
+
"audio": ("AUDIO",),
|
|
17
|
+
"prompt": (
|
|
18
|
+
"STRING",
|
|
19
|
+
{
|
|
20
|
+
"multiline": True,
|
|
21
|
+
"default": "",
|
|
22
|
+
},
|
|
23
|
+
),
|
|
24
|
+
"negative_prompt": (
|
|
25
|
+
"STRING",
|
|
26
|
+
{
|
|
27
|
+
"multiline": True,
|
|
28
|
+
"default": "",
|
|
29
|
+
},
|
|
30
|
+
),
|
|
31
|
+
"resolution": (
|
|
32
|
+
["480P", "720P", "1080P"],
|
|
33
|
+
{"default": "1080P"},
|
|
34
|
+
),
|
|
35
|
+
"duration": ([5, 10], {"default": 5}),
|
|
36
|
+
"prompt_extend": (
|
|
37
|
+
"BOOLEAN",
|
|
38
|
+
{
|
|
39
|
+
"default": True,
|
|
40
|
+
"tooltip": "是否开启prompt智能改写。开启后使用大模型对输入prompt进行智能改写。对于较短的prompt生成效果提升明显,但会增加耗时。",
|
|
41
|
+
},
|
|
42
|
+
),
|
|
43
|
+
"auto_audio": (
|
|
44
|
+
"BOOLEAN",
|
|
45
|
+
{
|
|
46
|
+
"default": True,
|
|
47
|
+
"tooltip": "是否由模型自动生成声音,优先级低于audio参数。",
|
|
48
|
+
},
|
|
49
|
+
),
|
|
50
|
+
},
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
NODE_DISPLAY_NAME = "Wan2.5 Image To Video"
|
|
54
|
+
RETURN_TYPES = ("VIDEO", "STRING")
|
|
55
|
+
RETURN_NAMES = ("video", "actual_prompt")
|
|
56
|
+
CATEGORY = "☁️BizyAir/External APIs/WanVideo"
|
|
57
|
+
|
|
58
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
59
|
+
# 参数
|
|
60
|
+
prompt = kwargs.get("prompt", "")
|
|
61
|
+
negative_prompt = kwargs.get("negative_prompt", "")
|
|
62
|
+
audio = kwargs.get("audio", None)
|
|
63
|
+
resolution = kwargs.get("resolution", "1080P")
|
|
64
|
+
duration = kwargs.get("duration", 5)
|
|
65
|
+
prompt_extend = kwargs.get("prompt_extend", True)
|
|
66
|
+
auto_audio = kwargs.get("auto_audio", True)
|
|
67
|
+
image = kwargs.get("image", None)
|
|
68
|
+
|
|
69
|
+
model = "wan2.5-i2v-preview"
|
|
70
|
+
input = {
|
|
71
|
+
"resolution": resolution,
|
|
72
|
+
"prompt_extend": prompt_extend,
|
|
73
|
+
"duration": duration,
|
|
74
|
+
"audio": auto_audio,
|
|
75
|
+
"model": model,
|
|
76
|
+
}
|
|
77
|
+
if prompt is not None and prompt.strip() != "":
|
|
78
|
+
input["prompt"] = prompt
|
|
79
|
+
if negative_prompt is not None and negative_prompt.strip() != "":
|
|
80
|
+
input["negative_prompt"] = negative_prompt
|
|
81
|
+
|
|
82
|
+
# 上传图片&音频
|
|
83
|
+
if image is not None:
|
|
84
|
+
image_url = self.upload_file(
|
|
85
|
+
tensor_to_bytesio(image=image, total_pixels=4096 * 4096),
|
|
86
|
+
f"{prompt_id}.png",
|
|
87
|
+
headers,
|
|
88
|
+
)
|
|
89
|
+
input["img_url"] = image_url
|
|
90
|
+
if audio is not None:
|
|
91
|
+
audio_url = self.upload_file(
|
|
92
|
+
save_audio(audio=audio, format="mp3"), f"{prompt_id}.mp3", headers
|
|
93
|
+
)
|
|
94
|
+
input["audio_url"] = audio_url
|
|
95
|
+
|
|
96
|
+
return input, model
|
|
97
|
+
|
|
98
|
+
def handle_outputs(self, outputs):
|
|
99
|
+
return (outputs[0][0],)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class Wan_V2_5_T2V_API(BizyAirTrdApiBaseNode):
|
|
103
|
+
@classmethod
|
|
104
|
+
def INPUT_TYPES(cls):
|
|
105
|
+
return {
|
|
106
|
+
"required": {
|
|
107
|
+
"prompt": (
|
|
108
|
+
"STRING",
|
|
109
|
+
{
|
|
110
|
+
"multiline": True,
|
|
111
|
+
"default": "",
|
|
112
|
+
},
|
|
113
|
+
),
|
|
114
|
+
},
|
|
115
|
+
"optional": {
|
|
116
|
+
"audio": ("AUDIO",),
|
|
117
|
+
"negative_prompt": (
|
|
118
|
+
"STRING",
|
|
119
|
+
{
|
|
120
|
+
"multiline": True,
|
|
121
|
+
"default": "",
|
|
122
|
+
},
|
|
123
|
+
),
|
|
124
|
+
"size": (
|
|
125
|
+
[
|
|
126
|
+
"832*480",
|
|
127
|
+
"480*832",
|
|
128
|
+
"624*624",
|
|
129
|
+
"1280*720",
|
|
130
|
+
"720*1280",
|
|
131
|
+
"960*960",
|
|
132
|
+
"1088*832",
|
|
133
|
+
"832*1088",
|
|
134
|
+
"1920*1080",
|
|
135
|
+
"1080*1920",
|
|
136
|
+
"1440*1440",
|
|
137
|
+
"1632*1248",
|
|
138
|
+
"1248*1632",
|
|
139
|
+
],
|
|
140
|
+
{"default": "1920*1080"},
|
|
141
|
+
),
|
|
142
|
+
"duration": ([5, 10], {"default": 5}),
|
|
143
|
+
"prompt_extend": (
|
|
144
|
+
"BOOLEAN",
|
|
145
|
+
{
|
|
146
|
+
"default": True,
|
|
147
|
+
"tooltip": "是否开启prompt智能改写。开启后使用大模型对输入prompt进行智能改写。对于较短的prompt生成效果提升明显,但会增加耗时。",
|
|
148
|
+
},
|
|
149
|
+
),
|
|
150
|
+
"auto_audio": (
|
|
151
|
+
"BOOLEAN",
|
|
152
|
+
{
|
|
153
|
+
"default": True,
|
|
154
|
+
"tooltip": "是否由模型自动生成声音,优先级低于audio参数。",
|
|
155
|
+
},
|
|
156
|
+
),
|
|
157
|
+
},
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
NODE_DISPLAY_NAME = "Wan2.5 Text To Video"
|
|
161
|
+
RETURN_TYPES = ("VIDEO", "STRING")
|
|
162
|
+
RETURN_NAMES = ("video", "actual_prompt")
|
|
163
|
+
CATEGORY = "☁️BizyAir/External APIs/WanVideo"
|
|
164
|
+
|
|
165
|
+
def handle_inputs(self, headers, prompt_id, **kwargs):
|
|
166
|
+
# 参数
|
|
167
|
+
model = "wan2.5-t2v-preview"
|
|
168
|
+
negative_prompt = kwargs.get("negative_prompt", "")
|
|
169
|
+
audio = kwargs.get("audio", None)
|
|
170
|
+
size = kwargs.get("size", "1920*1080")
|
|
171
|
+
duration = kwargs.get("duration", 5)
|
|
172
|
+
prompt_extend = kwargs.get("prompt_extend", True)
|
|
173
|
+
auto_audio = kwargs.get("auto_audio", True)
|
|
174
|
+
prompt = kwargs.get("prompt", "")
|
|
175
|
+
|
|
176
|
+
input = {
|
|
177
|
+
"size": size,
|
|
178
|
+
"prompt_extend": prompt_extend,
|
|
179
|
+
"duration": duration,
|
|
180
|
+
"audio": auto_audio,
|
|
181
|
+
"model": model,
|
|
182
|
+
}
|
|
183
|
+
if prompt is not None and prompt.strip() != "":
|
|
184
|
+
input["prompt"] = prompt
|
|
185
|
+
if negative_prompt is not None and negative_prompt.strip() != "":
|
|
186
|
+
input["negative_prompt"] = negative_prompt
|
|
187
|
+
|
|
188
|
+
# 上传音频
|
|
189
|
+
if audio is not None:
|
|
190
|
+
audio_url = self.upload_file(
|
|
191
|
+
save_audio(audio=audio, format="mp3"), f"{prompt_id}.mp3", headers
|
|
192
|
+
)
|
|
193
|
+
input["audio_url"] = audio_url
|
|
194
|
+
|
|
195
|
+
return input, model
|
|
196
|
+
|
|
197
|
+
def handle_outputs(self, outputs):
|
|
198
|
+
return (outputs[0][0],)
|