bizyengine 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bizyengine/__init__.py +35 -0
- bizyengine/bizy_server/__init__.py +7 -0
- bizyengine/bizy_server/api_client.py +763 -0
- bizyengine/bizy_server/errno.py +122 -0
- bizyengine/bizy_server/error_handler.py +3 -0
- bizyengine/bizy_server/execution.py +55 -0
- bizyengine/bizy_server/resp.py +24 -0
- bizyengine/bizy_server/server.py +898 -0
- bizyengine/bizy_server/utils.py +93 -0
- bizyengine/bizyair_extras/__init__.py +24 -0
- bizyengine/bizyair_extras/nodes_advanced_refluxcontrol.py +62 -0
- bizyengine/bizyair_extras/nodes_cogview4.py +31 -0
- bizyengine/bizyair_extras/nodes_comfyui_detail_daemon.py +180 -0
- bizyengine/bizyair_extras/nodes_comfyui_instantid.py +164 -0
- bizyengine/bizyair_extras/nodes_comfyui_layerstyle_advance.py +141 -0
- bizyengine/bizyair_extras/nodes_comfyui_pulid_flux.py +88 -0
- bizyengine/bizyair_extras/nodes_controlnet.py +50 -0
- bizyengine/bizyair_extras/nodes_custom_sampler.py +130 -0
- bizyengine/bizyair_extras/nodes_dataset.py +99 -0
- bizyengine/bizyair_extras/nodes_differential_diffusion.py +16 -0
- bizyengine/bizyair_extras/nodes_flux.py +69 -0
- bizyengine/bizyair_extras/nodes_image_utils.py +93 -0
- bizyengine/bizyair_extras/nodes_ip2p.py +20 -0
- bizyengine/bizyair_extras/nodes_ipadapter_plus/__init__.py +1 -0
- bizyengine/bizyair_extras/nodes_ipadapter_plus/nodes_ipadapter_plus.py +1598 -0
- bizyengine/bizyair_extras/nodes_janus_pro.py +81 -0
- bizyengine/bizyair_extras/nodes_kolors_mz/__init__.py +86 -0
- bizyengine/bizyair_extras/nodes_model_advanced.py +62 -0
- bizyengine/bizyair_extras/nodes_sd3.py +52 -0
- bizyengine/bizyair_extras/nodes_segment_anything.py +256 -0
- bizyengine/bizyair_extras/nodes_segment_anything_utils.py +134 -0
- bizyengine/bizyair_extras/nodes_testing_utils.py +139 -0
- bizyengine/bizyair_extras/nodes_trellis.py +199 -0
- bizyengine/bizyair_extras/nodes_ultimatesdupscale.py +137 -0
- bizyengine/bizyair_extras/nodes_upscale_model.py +32 -0
- bizyengine/bizyair_extras/nodes_wan_video.py +49 -0
- bizyengine/bizyair_extras/oauth_callback/main.py +118 -0
- bizyengine/core/__init__.py +8 -0
- bizyengine/core/commands/__init__.py +1 -0
- bizyengine/core/commands/base.py +27 -0
- bizyengine/core/commands/invoker.py +4 -0
- bizyengine/core/commands/processors/model_hosting_processor.py +0 -0
- bizyengine/core/commands/processors/prompt_processor.py +123 -0
- bizyengine/core/commands/servers/model_server.py +0 -0
- bizyengine/core/commands/servers/prompt_server.py +234 -0
- bizyengine/core/common/__init__.py +8 -0
- bizyengine/core/common/caching.py +198 -0
- bizyengine/core/common/client.py +262 -0
- bizyengine/core/common/env_var.py +101 -0
- bizyengine/core/common/utils.py +93 -0
- bizyengine/core/configs/conf.py +112 -0
- bizyengine/core/configs/models.json +101 -0
- bizyengine/core/configs/models.yaml +329 -0
- bizyengine/core/data_types.py +20 -0
- bizyengine/core/image_utils.py +288 -0
- bizyengine/core/nodes_base.py +159 -0
- bizyengine/core/nodes_io.py +97 -0
- bizyengine/core/path_utils/__init__.py +9 -0
- bizyengine/core/path_utils/path_manager.py +276 -0
- bizyengine/core/path_utils/utils.py +34 -0
- bizyengine/misc/__init__.py +0 -0
- bizyengine/misc/auth.py +83 -0
- bizyengine/misc/llm.py +431 -0
- bizyengine/misc/mzkolors.py +93 -0
- bizyengine/misc/nodes.py +1208 -0
- bizyengine/misc/nodes_controlnet_aux.py +491 -0
- bizyengine/misc/nodes_controlnet_union_sdxl.py +171 -0
- bizyengine/misc/route_sam.py +60 -0
- bizyengine/misc/segment_anything.py +276 -0
- bizyengine/misc/supernode.py +182 -0
- bizyengine/misc/utils.py +218 -0
- bizyengine/version.txt +1 -0
- bizyengine-0.4.2.dist-info/METADATA +12 -0
- bizyengine-0.4.2.dist-info/RECORD +76 -0
- bizyengine-0.4.2.dist-info/WHEEL +5 -0
- bizyengine-0.4.2.dist-info/top_level.txt +1 -0
bizyengine/misc/llm.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
import aiohttp
|
|
5
|
+
from aiohttp import web
|
|
6
|
+
from bizyengine.core.common.env_var import BIZYAIR_SERVER_ADDRESS
|
|
7
|
+
from bizyengine.core.image_utils import decode_data, encode_comfy_image, encode_data
|
|
8
|
+
from server import PromptServer
|
|
9
|
+
|
|
10
|
+
from .utils import (
|
|
11
|
+
decode_and_deserialize,
|
|
12
|
+
get_api_key,
|
|
13
|
+
get_llm_response,
|
|
14
|
+
get_vlm_response,
|
|
15
|
+
send_post_request,
|
|
16
|
+
serialize_and_encode,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def fetch_all_models(api_key):
|
|
21
|
+
url = "https://api.siliconflow.cn/v1/models"
|
|
22
|
+
headers = {"accept": "application/json", "authorization": f"Bearer {api_key}"}
|
|
23
|
+
params = {"type": "text", "sub_type": "chat"}
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
async with aiohttp.ClientSession() as session:
|
|
27
|
+
async with session.get(
|
|
28
|
+
url, headers=headers, params=params, timeout=10
|
|
29
|
+
) as response:
|
|
30
|
+
if response.status == 200:
|
|
31
|
+
data = await response.json()
|
|
32
|
+
all_models = [model["id"] for model in data["data"]]
|
|
33
|
+
return all_models
|
|
34
|
+
else:
|
|
35
|
+
print(f"Error fetching models: HTTP Status {response.status}")
|
|
36
|
+
return []
|
|
37
|
+
except aiohttp.ClientError as e:
|
|
38
|
+
print(f"Error fetching models: {e}")
|
|
39
|
+
return []
|
|
40
|
+
except asyncio.exceptions.TimeoutError:
|
|
41
|
+
print("Request to fetch models timed out")
|
|
42
|
+
return []
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@PromptServer.instance.routes.post("/bizyair/get_silicon_cloud_llm_models")
|
|
46
|
+
async def get_silicon_cloud_llm_models_endpoint(request):
|
|
47
|
+
data = await request.json()
|
|
48
|
+
api_key = data.get("api_key", get_api_key())
|
|
49
|
+
all_models = await fetch_all_models(api_key)
|
|
50
|
+
llm_models = [model for model in all_models if "vl" not in model.lower()]
|
|
51
|
+
llm_models.append("No LLM Enhancement")
|
|
52
|
+
return web.json_response(llm_models)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@PromptServer.instance.routes.post("/bizyair/get_silicon_cloud_vlm_models")
|
|
56
|
+
async def get_silicon_cloud_vlm_models_endpoint(request):
|
|
57
|
+
data = await request.json()
|
|
58
|
+
api_key = data.get("api_key", get_api_key())
|
|
59
|
+
all_models = await fetch_all_models(api_key)
|
|
60
|
+
vlm_models = [model for model in all_models if "vl" in model.lower()]
|
|
61
|
+
vlm_models.append("No VLM Enhancement")
|
|
62
|
+
return web.json_response(vlm_models)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class SiliconCloudLLMAPI:
|
|
66
|
+
def __init__(self):
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def INPUT_TYPES(s):
|
|
71
|
+
default_system_prompt = """你是一个 stable diffusion prompt 专家,为我生成适用于 Stable Diffusion 模型的prompt。 我给你相关的单词,你帮我扩写为适合 Stable Diffusion 文生图的 prompt。要求: 1. 英文输出 2. 除了 prompt 外,不要输出任何其它的信息 """
|
|
72
|
+
return {
|
|
73
|
+
"required": {
|
|
74
|
+
"model": ((), {}),
|
|
75
|
+
"system_prompt": (
|
|
76
|
+
"STRING",
|
|
77
|
+
{
|
|
78
|
+
"default": default_system_prompt,
|
|
79
|
+
"multiline": True,
|
|
80
|
+
"dynamicPrompts": True,
|
|
81
|
+
},
|
|
82
|
+
),
|
|
83
|
+
"user_prompt": (
|
|
84
|
+
"STRING",
|
|
85
|
+
{
|
|
86
|
+
"default": "小猫,梵高风格",
|
|
87
|
+
"multiline": True,
|
|
88
|
+
"dynamicPrompts": True,
|
|
89
|
+
},
|
|
90
|
+
),
|
|
91
|
+
"max_tokens": ("INT", {"default": 512, "min": 100, "max": 1e5}),
|
|
92
|
+
"temperature": (
|
|
93
|
+
"FLOAT",
|
|
94
|
+
{"default": 0.7, "min": 0.0, "max": 2.0, "step": 0.01},
|
|
95
|
+
),
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
RETURN_TYPES = ("STRING",)
|
|
100
|
+
FUNCTION = "get_llm_model_response"
|
|
101
|
+
OUTPUT_NODE = False
|
|
102
|
+
CATEGORY = "☁️BizyAir/AI Assistants"
|
|
103
|
+
|
|
104
|
+
def get_llm_model_response(
|
|
105
|
+
self, model, system_prompt, user_prompt, max_tokens, temperature
|
|
106
|
+
):
|
|
107
|
+
if model == "No LLM Enhancement":
|
|
108
|
+
return {"ui": {"text": (user_prompt,)}, "result": (user_prompt,)}
|
|
109
|
+
response = get_llm_response(
|
|
110
|
+
model,
|
|
111
|
+
system_prompt,
|
|
112
|
+
user_prompt,
|
|
113
|
+
max_tokens,
|
|
114
|
+
temperature,
|
|
115
|
+
)
|
|
116
|
+
ret = json.loads(response)
|
|
117
|
+
text = ret["choices"][0]["message"]["content"]
|
|
118
|
+
return (text,) # if update ui: {"ui": {"text": (text,)}, "result": (text,)}
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class SiliconCloudVLMAPI:
|
|
122
|
+
def __init__(self):
|
|
123
|
+
pass
|
|
124
|
+
|
|
125
|
+
@classmethod
|
|
126
|
+
def INPUT_TYPES(s):
|
|
127
|
+
return {
|
|
128
|
+
"required": {
|
|
129
|
+
"model": ((), {}),
|
|
130
|
+
"system_prompt": (
|
|
131
|
+
"STRING",
|
|
132
|
+
{
|
|
133
|
+
"default": "你是一个能分析图像的AI助手。请仔细观察图像,并根据用户的问题提供详细、准确的描述。",
|
|
134
|
+
"multiline": True,
|
|
135
|
+
},
|
|
136
|
+
),
|
|
137
|
+
"user_prompt": (
|
|
138
|
+
"STRING",
|
|
139
|
+
{
|
|
140
|
+
"default": "请描述这张图片的内容,并指出任何有趣或不寻常的细节。",
|
|
141
|
+
"multiline": True,
|
|
142
|
+
},
|
|
143
|
+
),
|
|
144
|
+
"images": ("IMAGE",),
|
|
145
|
+
"max_tokens": ("INT", {"default": 512, "min": 100, "max": 1e5}),
|
|
146
|
+
"temperature": (
|
|
147
|
+
"FLOAT",
|
|
148
|
+
{"default": 0.7, "min": 0.0, "max": 2.0, "step": 0.01},
|
|
149
|
+
),
|
|
150
|
+
"detail": (["auto", "low", "high"], {"default": "auto"}),
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
RETURN_TYPES = ("STRING",)
|
|
155
|
+
FUNCTION = "get_vlm_model_response"
|
|
156
|
+
OUTPUT_NODE = False
|
|
157
|
+
CATEGORY = "☁️BizyAir/AI Assistants"
|
|
158
|
+
|
|
159
|
+
def get_vlm_model_response(
|
|
160
|
+
self, model, system_prompt, user_prompt, images, max_tokens, temperature, detail
|
|
161
|
+
):
|
|
162
|
+
if model == "No VLM Enhancement":
|
|
163
|
+
return (user_prompt,)
|
|
164
|
+
|
|
165
|
+
# 使用 encode_comfy_image 函数编码图像批次
|
|
166
|
+
encoded_images_json = encode_comfy_image(
|
|
167
|
+
images, image_format="WEBP", lossless=True
|
|
168
|
+
)
|
|
169
|
+
encoded_images_dict = json.loads(encoded_images_json)
|
|
170
|
+
|
|
171
|
+
# 提取所有编码后的图像
|
|
172
|
+
base64_images = list(encoded_images_dict.values())
|
|
173
|
+
|
|
174
|
+
response = get_vlm_response(
|
|
175
|
+
model,
|
|
176
|
+
system_prompt,
|
|
177
|
+
user_prompt,
|
|
178
|
+
base64_images,
|
|
179
|
+
max_tokens,
|
|
180
|
+
temperature,
|
|
181
|
+
detail,
|
|
182
|
+
)
|
|
183
|
+
ret = json.loads(response)
|
|
184
|
+
text = ret["choices"][0]["message"]["content"]
|
|
185
|
+
return (text,)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class BizyAirJoyCaption:
|
|
189
|
+
# refer to: https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha
|
|
190
|
+
API_URL = f"{BIZYAIR_SERVER_ADDRESS}/supernode/joycaption2"
|
|
191
|
+
|
|
192
|
+
@classmethod
|
|
193
|
+
def INPUT_TYPES(s):
|
|
194
|
+
return {
|
|
195
|
+
"required": {
|
|
196
|
+
"image": ("IMAGE",),
|
|
197
|
+
"do_sample": (["enable", "disable"],),
|
|
198
|
+
"temperature": (
|
|
199
|
+
"FLOAT",
|
|
200
|
+
{
|
|
201
|
+
"default": 0.5,
|
|
202
|
+
"min": 0.0,
|
|
203
|
+
"max": 2.0,
|
|
204
|
+
"step": 0.01,
|
|
205
|
+
"round": 0.001,
|
|
206
|
+
"display": "number",
|
|
207
|
+
},
|
|
208
|
+
),
|
|
209
|
+
"max_tokens": (
|
|
210
|
+
"INT",
|
|
211
|
+
{
|
|
212
|
+
"default": 256,
|
|
213
|
+
"min": 16,
|
|
214
|
+
"max": 512,
|
|
215
|
+
"step": 16,
|
|
216
|
+
"display": "number",
|
|
217
|
+
},
|
|
218
|
+
),
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
RETURN_TYPES = ("STRING",)
|
|
223
|
+
FUNCTION = "joycaption"
|
|
224
|
+
|
|
225
|
+
CATEGORY = "☁️BizyAir/AI Assistants"
|
|
226
|
+
|
|
227
|
+
def joycaption(self, image, do_sample, temperature, max_tokens):
|
|
228
|
+
API_KEY = get_api_key()
|
|
229
|
+
SIZE_LIMIT = 1536
|
|
230
|
+
# device = image.device
|
|
231
|
+
_, w, h, c = image.shape
|
|
232
|
+
assert (
|
|
233
|
+
w <= SIZE_LIMIT and h <= SIZE_LIMIT
|
|
234
|
+
), f"width and height must be less than {SIZE_LIMIT}x{SIZE_LIMIT}, but got {w} and {h}"
|
|
235
|
+
|
|
236
|
+
payload = {
|
|
237
|
+
"image": None,
|
|
238
|
+
"do_sample": do_sample == "enable",
|
|
239
|
+
"temperature": temperature,
|
|
240
|
+
"max_new_tokens": max_tokens,
|
|
241
|
+
"caption_type": "Descriptive",
|
|
242
|
+
"caption_length": "any",
|
|
243
|
+
"extra_options": [],
|
|
244
|
+
"name_input": "",
|
|
245
|
+
"custom_prompt": "A descriptive caption for this image:\n",
|
|
246
|
+
}
|
|
247
|
+
auth = f"Bearer {API_KEY}"
|
|
248
|
+
headers = {
|
|
249
|
+
"accept": "application/json",
|
|
250
|
+
"content-type": "application/json",
|
|
251
|
+
"authorization": auth,
|
|
252
|
+
}
|
|
253
|
+
input_image = encode_data(image, disable_image_marker=True)
|
|
254
|
+
payload["image"] = input_image
|
|
255
|
+
|
|
256
|
+
ret: str = send_post_request(self.API_URL, payload=payload, headers=headers)
|
|
257
|
+
ret = json.loads(ret)
|
|
258
|
+
|
|
259
|
+
try:
|
|
260
|
+
if "result" in ret:
|
|
261
|
+
ret = json.loads(ret["result"])
|
|
262
|
+
if ret["type"] == "error":
|
|
263
|
+
raise Exception(ret["message"])
|
|
264
|
+
except Exception as e:
|
|
265
|
+
raise Exception(f"Unexpected response: {ret} {e=}")
|
|
266
|
+
|
|
267
|
+
msg = ret["data"]
|
|
268
|
+
if msg["type"] not in (
|
|
269
|
+
"comfyair",
|
|
270
|
+
"bizyair",
|
|
271
|
+
):
|
|
272
|
+
raise Exception(f"Unexpected response type: {msg}")
|
|
273
|
+
|
|
274
|
+
caption = msg["data"]
|
|
275
|
+
return (caption,)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class BizyAirJoyCaption2:
|
|
279
|
+
def __init__(self):
|
|
280
|
+
pass
|
|
281
|
+
|
|
282
|
+
# refer to: https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha
|
|
283
|
+
API_URL = f"{BIZYAIR_SERVER_ADDRESS}/supernode/joycaption2"
|
|
284
|
+
|
|
285
|
+
@classmethod
|
|
286
|
+
def INPUT_TYPES(s):
|
|
287
|
+
return {
|
|
288
|
+
"required": {
|
|
289
|
+
"image": ("IMAGE",),
|
|
290
|
+
"do_sample": ([True, False],),
|
|
291
|
+
"temperature": (
|
|
292
|
+
"FLOAT",
|
|
293
|
+
{
|
|
294
|
+
"default": 0.5,
|
|
295
|
+
"min": 0.0,
|
|
296
|
+
"max": 2.0,
|
|
297
|
+
"step": 0.01,
|
|
298
|
+
"round": 0.001,
|
|
299
|
+
"display": "number",
|
|
300
|
+
},
|
|
301
|
+
),
|
|
302
|
+
"max_tokens": (
|
|
303
|
+
"INT",
|
|
304
|
+
{
|
|
305
|
+
"default": 256,
|
|
306
|
+
"min": 16,
|
|
307
|
+
"max": 512,
|
|
308
|
+
"step": 16,
|
|
309
|
+
"display": "number",
|
|
310
|
+
},
|
|
311
|
+
),
|
|
312
|
+
"caption_type": (
|
|
313
|
+
[
|
|
314
|
+
"Descriptive",
|
|
315
|
+
"Descriptive (Informal)",
|
|
316
|
+
"Training Prompt",
|
|
317
|
+
"MidJourney",
|
|
318
|
+
"Booru tag list",
|
|
319
|
+
"Booru-like tag list",
|
|
320
|
+
"Art Critic",
|
|
321
|
+
"Product Listing",
|
|
322
|
+
"Social Media Post",
|
|
323
|
+
],
|
|
324
|
+
),
|
|
325
|
+
"caption_length": (
|
|
326
|
+
["any", "very short", "short", "medium-length", "long", "very long"]
|
|
327
|
+
+ [str(i) for i in range(20, 261, 10)],
|
|
328
|
+
),
|
|
329
|
+
"extra_options": (
|
|
330
|
+
"STRING",
|
|
331
|
+
{
|
|
332
|
+
"default": "If there is a person/character in the image you must refer to them as {name}.",
|
|
333
|
+
"tooltip": "Extra options for the model",
|
|
334
|
+
"multiline": True,
|
|
335
|
+
},
|
|
336
|
+
),
|
|
337
|
+
"name_input": (
|
|
338
|
+
"STRING",
|
|
339
|
+
{
|
|
340
|
+
"default": "Jack",
|
|
341
|
+
"tooltip": "Name input is only used if an Extra Option is selected that requires it.",
|
|
342
|
+
},
|
|
343
|
+
),
|
|
344
|
+
"custom_prompt": (
|
|
345
|
+
"STRING",
|
|
346
|
+
{
|
|
347
|
+
"default": "",
|
|
348
|
+
"multiline": True,
|
|
349
|
+
},
|
|
350
|
+
),
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
RETURN_TYPES = ("STRING",)
|
|
355
|
+
FUNCTION = "joycaption2"
|
|
356
|
+
|
|
357
|
+
CATEGORY = "☁️BizyAir/AI Assistants"
|
|
358
|
+
|
|
359
|
+
def joycaption2(
|
|
360
|
+
self,
|
|
361
|
+
image,
|
|
362
|
+
do_sample,
|
|
363
|
+
temperature,
|
|
364
|
+
max_tokens,
|
|
365
|
+
caption_type,
|
|
366
|
+
caption_length,
|
|
367
|
+
extra_options,
|
|
368
|
+
name_input,
|
|
369
|
+
custom_prompt,
|
|
370
|
+
):
|
|
371
|
+
API_KEY = get_api_key()
|
|
372
|
+
SIZE_LIMIT = 1536
|
|
373
|
+
_, w, h, c = image.shape
|
|
374
|
+
assert (
|
|
375
|
+
w <= SIZE_LIMIT and h <= SIZE_LIMIT
|
|
376
|
+
), f"width and height must be less than {SIZE_LIMIT}x{SIZE_LIMIT}, but got {w} and {h}"
|
|
377
|
+
|
|
378
|
+
payload = {
|
|
379
|
+
"image": None,
|
|
380
|
+
"do_sample": do_sample == True,
|
|
381
|
+
"temperature": temperature,
|
|
382
|
+
"max_new_tokens": max_tokens,
|
|
383
|
+
"caption_type": caption_type,
|
|
384
|
+
"caption_length": caption_length,
|
|
385
|
+
"extra_options": [extra_options],
|
|
386
|
+
"name_input": name_input,
|
|
387
|
+
"custom_prompt": custom_prompt,
|
|
388
|
+
}
|
|
389
|
+
auth = f"Bearer {API_KEY}"
|
|
390
|
+
headers = {
|
|
391
|
+
"accept": "application/json",
|
|
392
|
+
"content-type": "application/json",
|
|
393
|
+
"authorization": auth,
|
|
394
|
+
}
|
|
395
|
+
input_image = encode_data(image, disable_image_marker=True)
|
|
396
|
+
payload["image"] = input_image
|
|
397
|
+
|
|
398
|
+
ret: str = send_post_request(self.API_URL, payload=payload, headers=headers)
|
|
399
|
+
ret = json.loads(ret)
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
if "result" in ret:
|
|
403
|
+
ret = json.loads(ret["result"])
|
|
404
|
+
if ret["type"] == "error":
|
|
405
|
+
raise Exception(ret["message"])
|
|
406
|
+
except Exception as e:
|
|
407
|
+
raise Exception(f"Unexpected response: {ret} {e=}")
|
|
408
|
+
|
|
409
|
+
msg = ret["data"]
|
|
410
|
+
if msg["type"] not in (
|
|
411
|
+
"comfyair",
|
|
412
|
+
"bizyair",
|
|
413
|
+
):
|
|
414
|
+
raise Exception(f"Unexpected response type: {msg}")
|
|
415
|
+
|
|
416
|
+
caption = msg["data"]
|
|
417
|
+
return (caption,)
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
NODE_CLASS_MAPPINGS = {
|
|
421
|
+
"BizyAirSiliconCloudLLMAPI": SiliconCloudLLMAPI,
|
|
422
|
+
"BizyAirSiliconCloudVLMAPI": SiliconCloudVLMAPI,
|
|
423
|
+
"BizyAirJoyCaption": BizyAirJoyCaption,
|
|
424
|
+
"BizyAirJoyCaption2": BizyAirJoyCaption2,
|
|
425
|
+
}
|
|
426
|
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
427
|
+
"BizyAirSiliconCloudLLMAPI": "☁️BizyAir SiliconCloud LLM API",
|
|
428
|
+
"BizyAirSiliconCloudVLMAPI": "☁️BizyAir SiliconCloud VLM API",
|
|
429
|
+
"BizyAirJoyCaption": "☁️BizyAir Joy Caption",
|
|
430
|
+
"BizyAirJoyCaption2": "☁️BizyAir Joy Caption2",
|
|
431
|
+
}
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import uuid
|
|
3
|
+
|
|
4
|
+
import torch
|
|
5
|
+
from bizyengine.core import BizyAirBaseNode, BizyAirNodeIO, create_node_data
|
|
6
|
+
from bizyengine.core.common.env_var import BIZYAIR_SERVER_ADDRESS
|
|
7
|
+
from bizyengine.core.data_types import CONDITIONING
|
|
8
|
+
from bizyengine.core.image_utils import encode_data
|
|
9
|
+
|
|
10
|
+
from .utils import (
|
|
11
|
+
decode_and_deserialize,
|
|
12
|
+
get_api_key,
|
|
13
|
+
send_post_request,
|
|
14
|
+
serialize_and_encode,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
CATEGORY_NAME = "☁️BizyAir/Kolors"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BizyAirMZChatGLM3TextEncode:
|
|
21
|
+
API_URL = f"{BIZYAIR_SERVER_ADDRESS}/supernode/mzkolorschatglm3"
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def INPUT_TYPES(s):
|
|
25
|
+
return {
|
|
26
|
+
"required": {
|
|
27
|
+
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
RETURN_TYPES = ("CONDITIONING",)
|
|
32
|
+
|
|
33
|
+
FUNCTION = "encode"
|
|
34
|
+
CATEGORY = CATEGORY_NAME
|
|
35
|
+
|
|
36
|
+
def encode(self, text):
|
|
37
|
+
API_KEY = get_api_key()
|
|
38
|
+
assert len(text) <= 4096, f"the prompt is too long, length: {len(text)}"
|
|
39
|
+
|
|
40
|
+
payload = {
|
|
41
|
+
"text": text,
|
|
42
|
+
}
|
|
43
|
+
auth = f"Bearer {API_KEY}"
|
|
44
|
+
headers = {
|
|
45
|
+
"accept": "application/json",
|
|
46
|
+
"content-type": "application/json",
|
|
47
|
+
"authorization": auth,
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
response: str = send_post_request(
|
|
51
|
+
self.API_URL, payload=payload, headers=headers
|
|
52
|
+
)
|
|
53
|
+
tensors_np = decode_and_deserialize(response)
|
|
54
|
+
|
|
55
|
+
ret_conditioning = []
|
|
56
|
+
for item in tensors_np:
|
|
57
|
+
t, d = item
|
|
58
|
+
t_tensor = torch.from_numpy(t)
|
|
59
|
+
d_dict = {}
|
|
60
|
+
for k, v in d.items():
|
|
61
|
+
d_dict[k] = torch.from_numpy(v)
|
|
62
|
+
ret_conditioning.append([t_tensor, d_dict])
|
|
63
|
+
|
|
64
|
+
return (ret_conditioning,)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class BizyAir_MinusZoneChatGLM3TextEncode(BizyAirMZChatGLM3TextEncode, BizyAirBaseNode):
|
|
68
|
+
RETURN_TYPES = (CONDITIONING,)
|
|
69
|
+
|
|
70
|
+
FUNCTION = "mz_encode"
|
|
71
|
+
|
|
72
|
+
def mz_encode(self, text):
|
|
73
|
+
out = self.encode(text)[0]
|
|
74
|
+
node_data = create_node_data(
|
|
75
|
+
class_type="ComfyAirLoadData",
|
|
76
|
+
inputs={"conditioning": {"relay": out}},
|
|
77
|
+
outputs={"slot_index": 3},
|
|
78
|
+
)
|
|
79
|
+
node_data["is_changed"] = uuid.uuid4().hex
|
|
80
|
+
return (
|
|
81
|
+
BizyAirNodeIO(
|
|
82
|
+
self.assigned_id,
|
|
83
|
+
nodes={self.assigned_id: encode_data(node_data, old_version=True)},
|
|
84
|
+
),
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
NODE_CLASS_MAPPINGS = {
|
|
89
|
+
"BizyAir_MinusZoneChatGLM3TextEncode": BizyAir_MinusZoneChatGLM3TextEncode,
|
|
90
|
+
}
|
|
91
|
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
92
|
+
"BizyAir_MinusZoneChatGLM3TextEncode": "☁️BizyAir MinusZone ChatGLM3 Text Encode",
|
|
93
|
+
}
|