vectorvein 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/api/client.py +81 -103
- vectorvein/api/exceptions.py +1 -3
- vectorvein/api/models.py +11 -11
- vectorvein/chat_clients/anthropic_client.py +157 -169
- vectorvein/chat_clients/base_client.py +257 -198
- vectorvein/chat_clients/openai_compatible_client.py +150 -161
- vectorvein/chat_clients/utils.py +44 -24
- vectorvein/server/token_server.py +1 -1
- vectorvein/settings/__init__.py +27 -27
- vectorvein/types/defaults.py +32 -16
- vectorvein/types/llm_parameters.py +40 -34
- vectorvein/types/settings.py +10 -10
- vectorvein/utilities/media_processing.py +1 -1
- vectorvein/utilities/rate_limiter.py +5 -6
- vectorvein/utilities/retry.py +6 -5
- vectorvein/workflow/graph/edge.py +3 -3
- vectorvein/workflow/graph/node.py +14 -26
- vectorvein/workflow/graph/port.py +40 -39
- vectorvein/workflow/graph/workflow.py +13 -25
- vectorvein/workflow/nodes/audio_generation.py +5 -7
- vectorvein/workflow/nodes/control_flows.py +7 -9
- vectorvein/workflow/nodes/file_processing.py +4 -6
- vectorvein/workflow/nodes/image_generation.py +20 -22
- vectorvein/workflow/nodes/llms.py +13 -15
- vectorvein/workflow/nodes/media_editing.py +26 -40
- vectorvein/workflow/nodes/media_processing.py +19 -21
- vectorvein/workflow/nodes/output.py +10 -12
- vectorvein/workflow/nodes/relational_db.py +3 -5
- vectorvein/workflow/nodes/text_processing.py +8 -10
- vectorvein/workflow/nodes/tools.py +8 -10
- vectorvein/workflow/nodes/triggers.py +1 -3
- vectorvein/workflow/nodes/vector_db.py +3 -5
- vectorvein/workflow/nodes/video_generation.py +4 -6
- vectorvein/workflow/nodes/web_crawlers.py +4 -6
- vectorvein/workflow/utils/analyse.py +5 -13
- vectorvein/workflow/utils/check.py +6 -16
- vectorvein/workflow/utils/json_to_code.py +6 -14
- vectorvein/workflow/utils/layout.py +3 -5
- {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/METADATA +1 -1
- vectorvein-0.3.3.dist-info/RECORD +68 -0
- {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/WHEEL +1 -1
- vectorvein-0.3.1.dist-info/RECORD +0 -68
- {vectorvein-0.3.1.dist-info → vectorvein-0.3.3.dist-info}/entry_points.txt +0 -0
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class ClaudeVision(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="ClaudeVision",
|
11
9
|
category="media_processing",
|
@@ -53,7 +51,7 @@ class ClaudeVision(Node):
|
|
53
51
|
"images": InputPort(
|
54
52
|
name="images",
|
55
53
|
port_type=PortType.FILE,
|
56
|
-
value=
|
54
|
+
value=[],
|
57
55
|
multiple=True,
|
58
56
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
59
57
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -73,7 +71,7 @@ class ClaudeVision(Node):
|
|
73
71
|
|
74
72
|
|
75
73
|
class DeepseekVl(Node):
|
76
|
-
def __init__(self, id:
|
74
|
+
def __init__(self, id: str | None = None):
|
77
75
|
super().__init__(
|
78
76
|
node_type="DeepseekVl",
|
79
77
|
category="media_processing",
|
@@ -105,7 +103,7 @@ class DeepseekVl(Node):
|
|
105
103
|
"images": InputPort(
|
106
104
|
name="images",
|
107
105
|
port_type=PortType.FILE,
|
108
|
-
value=
|
106
|
+
value=[],
|
109
107
|
multiple=True,
|
110
108
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
111
109
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -125,7 +123,7 @@ class DeepseekVl(Node):
|
|
125
123
|
|
126
124
|
|
127
125
|
class GeminiVision(Node):
|
128
|
-
def __init__(self, id:
|
126
|
+
def __init__(self, id: str | None = None):
|
129
127
|
super().__init__(
|
130
128
|
node_type="GeminiVision",
|
131
129
|
category="media_processing",
|
@@ -167,7 +165,7 @@ class GeminiVision(Node):
|
|
167
165
|
"images": InputPort(
|
168
166
|
name="images",
|
169
167
|
port_type=PortType.FILE,
|
170
|
-
value=
|
168
|
+
value=[],
|
171
169
|
multiple=True,
|
172
170
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
173
171
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -187,7 +185,7 @@ class GeminiVision(Node):
|
|
187
185
|
|
188
186
|
|
189
187
|
class GlmVision(Node):
|
190
|
-
def __init__(self, id:
|
188
|
+
def __init__(self, id: str | None = None):
|
191
189
|
super().__init__(
|
192
190
|
node_type="GlmVision",
|
193
191
|
category="media_processing",
|
@@ -221,7 +219,7 @@ class GlmVision(Node):
|
|
221
219
|
"images": InputPort(
|
222
220
|
name="images",
|
223
221
|
port_type=PortType.FILE,
|
224
|
-
value=
|
222
|
+
value=[],
|
225
223
|
multiple=True,
|
226
224
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
227
225
|
condition="fields_data.images_or_urls.value == 'images'",
|
@@ -241,7 +239,7 @@ class GlmVision(Node):
|
|
241
239
|
|
242
240
|
|
243
241
|
class GptVision(Node):
|
244
|
-
def __init__(self, id:
|
242
|
+
def __init__(self, id: str | None = None):
|
245
243
|
super().__init__(
|
246
244
|
node_type="GptVision",
|
247
245
|
category="media_processing",
|
@@ -277,7 +275,7 @@ class GptVision(Node):
|
|
277
275
|
"images": InputPort(
|
278
276
|
name="images",
|
279
277
|
port_type=PortType.FILE,
|
280
|
-
value=
|
278
|
+
value=[],
|
281
279
|
multiple=True,
|
282
280
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
283
281
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -307,7 +305,7 @@ class GptVision(Node):
|
|
307
305
|
|
308
306
|
|
309
307
|
class InternVision(Node):
|
310
|
-
def __init__(self, id:
|
308
|
+
def __init__(self, id: str | None = None):
|
311
309
|
super().__init__(
|
312
310
|
node_type="InternVision",
|
313
311
|
category="media_processing",
|
@@ -340,7 +338,7 @@ class InternVision(Node):
|
|
340
338
|
"images": InputPort(
|
341
339
|
name="images",
|
342
340
|
port_type=PortType.FILE,
|
343
|
-
value=
|
341
|
+
value=[],
|
344
342
|
multiple=True,
|
345
343
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
346
344
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -360,7 +358,7 @@ class InternVision(Node):
|
|
360
358
|
|
361
359
|
|
362
360
|
class Ocr(Node):
|
363
|
-
def __init__(self, id:
|
361
|
+
def __init__(self, id: str | None = None):
|
364
362
|
super().__init__(
|
365
363
|
node_type="Ocr",
|
366
364
|
category="media_processing",
|
@@ -389,7 +387,7 @@ class Ocr(Node):
|
|
389
387
|
"images": InputPort(
|
390
388
|
name="images",
|
391
389
|
port_type=PortType.FILE,
|
392
|
-
value=
|
390
|
+
value=[],
|
393
391
|
multiple=True,
|
394
392
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
395
393
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -416,7 +414,7 @@ class Ocr(Node):
|
|
416
414
|
),
|
417
415
|
"output_words_info": OutputPort(
|
418
416
|
name="output_words_info",
|
419
|
-
value=
|
417
|
+
value=[],
|
420
418
|
condition="fields_data.get('ocr_type') in ['general', 'business_license']",
|
421
419
|
condition_python=lambda ports: ports["ocr_type"].value in ["general", "business_license"],
|
422
420
|
has_tooltip=True,
|
@@ -426,7 +424,7 @@ class Ocr(Node):
|
|
426
424
|
|
427
425
|
|
428
426
|
class QwenVision(Node):
|
429
|
-
def __init__(self, id:
|
427
|
+
def __init__(self, id: str | None = None):
|
430
428
|
super().__init__(
|
431
429
|
node_type="QwenVision",
|
432
430
|
category="media_processing",
|
@@ -468,7 +466,7 @@ class QwenVision(Node):
|
|
468
466
|
"images": InputPort(
|
469
467
|
name="images",
|
470
468
|
port_type=PortType.FILE,
|
471
|
-
value=
|
469
|
+
value=[],
|
472
470
|
multiple=True,
|
473
471
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
474
472
|
condition="fields_data.get('images_or_urls') == 'images'",
|
@@ -488,7 +486,7 @@ class QwenVision(Node):
|
|
488
486
|
|
489
487
|
|
490
488
|
class SpeechRecognition(Node):
|
491
|
-
def __init__(self, id:
|
489
|
+
def __init__(self, id: str | None = None):
|
492
490
|
super().__init__(
|
493
491
|
node_type="SpeechRecognition",
|
494
492
|
category="media_processing",
|
@@ -507,7 +505,7 @@ class SpeechRecognition(Node):
|
|
507
505
|
"files": InputPort(
|
508
506
|
name="files",
|
509
507
|
port_type=PortType.FILE,
|
510
|
-
value=
|
508
|
+
value=[],
|
511
509
|
multiple=True,
|
512
510
|
support_file_types=[".wav", ".mp3", ".mp4", ".m4a", ".wma", ".aac", ".ogg", ".amr", ".flac"],
|
513
511
|
condition="fields_data.get('files_or_urls') == 'files'",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class Audio(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="Audio",
|
11
9
|
category="outputs",
|
@@ -63,7 +61,7 @@ class Audio(Node):
|
|
63
61
|
|
64
62
|
|
65
63
|
class Text(Node):
|
66
|
-
def __init__(self, id:
|
64
|
+
def __init__(self, id: str | None = None):
|
67
65
|
super().__init__(
|
68
66
|
node_type="Text",
|
69
67
|
category="outputs",
|
@@ -93,7 +91,7 @@ class Text(Node):
|
|
93
91
|
|
94
92
|
|
95
93
|
class Table(Node):
|
96
|
-
def __init__(self, id:
|
94
|
+
def __init__(self, id: str | None = None):
|
97
95
|
super().__init__(
|
98
96
|
node_type="Table",
|
99
97
|
category="outputs",
|
@@ -131,7 +129,7 @@ class Table(Node):
|
|
131
129
|
|
132
130
|
|
133
131
|
class Document(Node):
|
134
|
-
def __init__(self, id:
|
132
|
+
def __init__(self, id: str | None = None):
|
135
133
|
super().__init__(
|
136
134
|
node_type="Document",
|
137
135
|
category="outputs",
|
@@ -186,7 +184,7 @@ class Document(Node):
|
|
186
184
|
|
187
185
|
|
188
186
|
class Echarts(Node):
|
189
|
-
def __init__(self, id:
|
187
|
+
def __init__(self, id: str | None = None):
|
190
188
|
super().__init__(
|
191
189
|
node_type="Echarts",
|
192
190
|
category="outputs",
|
@@ -208,7 +206,7 @@ class Echarts(Node):
|
|
208
206
|
|
209
207
|
|
210
208
|
class Email(Node):
|
211
|
-
def __init__(self, id:
|
209
|
+
def __init__(self, id: str | None = None):
|
212
210
|
super().__init__(
|
213
211
|
node_type="Email",
|
214
212
|
category="outputs",
|
@@ -240,7 +238,7 @@ class Email(Node):
|
|
240
238
|
|
241
239
|
|
242
240
|
class Html(Node):
|
243
|
-
def __init__(self, id:
|
241
|
+
def __init__(self, id: str | None = None):
|
244
242
|
super().__init__(
|
245
243
|
node_type="Html",
|
246
244
|
category="outputs",
|
@@ -258,7 +256,7 @@ class Html(Node):
|
|
258
256
|
|
259
257
|
|
260
258
|
class Mermaid(Node):
|
261
|
-
def __init__(self, id:
|
259
|
+
def __init__(self, id: str | None = None):
|
262
260
|
super().__init__(
|
263
261
|
node_type="Mermaid",
|
264
262
|
category="outputs",
|
@@ -280,7 +278,7 @@ class Mermaid(Node):
|
|
280
278
|
|
281
279
|
|
282
280
|
class Mindmap(Node):
|
283
|
-
def __init__(self, id:
|
281
|
+
def __init__(self, id: str | None = None):
|
284
282
|
super().__init__(
|
285
283
|
node_type="Mindmap",
|
286
284
|
category="outputs",
|
@@ -302,7 +300,7 @@ class Mindmap(Node):
|
|
302
300
|
|
303
301
|
|
304
302
|
class PictureRender(Node):
|
305
|
-
def __init__(self, id:
|
303
|
+
def __init__(self, id: str | None = None):
|
306
304
|
super().__init__(
|
307
305
|
node_type="PictureRender",
|
308
306
|
category="outputs",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class GetTableInfo(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="GetTableInfo",
|
11
9
|
category="relational_db",
|
@@ -39,7 +37,7 @@ class GetTableInfo(Node):
|
|
39
37
|
|
40
38
|
|
41
39
|
class RunSql(Node):
|
42
|
-
def __init__(self, id:
|
40
|
+
def __init__(self, id: str | None = None):
|
43
41
|
super().__init__(
|
44
42
|
node_type="RunSql",
|
45
43
|
category="relational_db",
|
@@ -90,7 +88,7 @@ class RunSql(Node):
|
|
90
88
|
|
91
89
|
|
92
90
|
class SmartQuery(Node):
|
93
|
-
def __init__(self, id:
|
91
|
+
def __init__(self, id: str | None = None):
|
94
92
|
super().__init__(
|
95
93
|
node_type="SmartQuery",
|
96
94
|
category="relational_db",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class TextInOut(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="TextInOut",
|
11
9
|
category="text_processing",
|
@@ -33,7 +31,7 @@ class TextInOut(Node):
|
|
33
31
|
|
34
32
|
|
35
33
|
class TextReplace(Node):
|
36
|
-
def __init__(self, id:
|
34
|
+
def __init__(self, id: str | None = None):
|
37
35
|
super().__init__(
|
38
36
|
node_type="TextReplace",
|
39
37
|
category="text_processing",
|
@@ -57,7 +55,7 @@ class TextReplace(Node):
|
|
57
55
|
|
58
56
|
|
59
57
|
class TextSplitters(Node):
|
60
|
-
def __init__(self, id:
|
58
|
+
def __init__(self, id: str | None = None):
|
61
59
|
super().__init__(
|
62
60
|
node_type="TextSplitters",
|
63
61
|
category="text_processing",
|
@@ -106,7 +104,7 @@ class TextSplitters(Node):
|
|
106
104
|
|
107
105
|
|
108
106
|
class TextTruncation(Node):
|
109
|
-
def __init__(self, id:
|
107
|
+
def __init__(self, id: str | None = None):
|
110
108
|
super().__init__(
|
111
109
|
node_type="TextTruncation",
|
112
110
|
category="text_processing",
|
@@ -144,7 +142,7 @@ class TextTruncation(Node):
|
|
144
142
|
|
145
143
|
|
146
144
|
class MarkdownToHtml(Node):
|
147
|
-
def __init__(self, id:
|
145
|
+
def __init__(self, id: str | None = None):
|
148
146
|
super().__init__(
|
149
147
|
node_type="MarkdownToHtml",
|
150
148
|
category="text_processing",
|
@@ -165,7 +163,7 @@ class MarkdownToHtml(Node):
|
|
165
163
|
|
166
164
|
|
167
165
|
class ListRender(Node):
|
168
|
-
def __init__(self, id:
|
166
|
+
def __init__(self, id: str | None = None):
|
169
167
|
super().__init__(
|
170
168
|
node_type="ListRender",
|
171
169
|
category="text_processing",
|
@@ -199,7 +197,7 @@ class ListRender(Node):
|
|
199
197
|
|
200
198
|
|
201
199
|
class TemplateCompose(Node):
|
202
|
-
def __init__(self, id:
|
200
|
+
def __init__(self, id: str | None = None):
|
203
201
|
super().__init__(
|
204
202
|
node_type="TemplateCompose",
|
205
203
|
category="text_processing",
|
@@ -219,7 +217,7 @@ class TemplateCompose(Node):
|
|
219
217
|
|
220
218
|
|
221
219
|
class RegexExtract(Node):
|
222
|
-
def __init__(self, id:
|
220
|
+
def __init__(self, id: str | None = None):
|
223
221
|
super().__init__(
|
224
222
|
node_type="RegexExtract",
|
225
223
|
category="text_processing",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class CodebaseAnalysis(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="CodebaseAnalysis",
|
11
9
|
category="tools",
|
@@ -24,7 +22,7 @@ class CodebaseAnalysis(Node):
|
|
24
22
|
"codebase_file": InputPort(
|
25
23
|
name="codebase_file",
|
26
24
|
port_type=PortType.FILE,
|
27
|
-
value=
|
25
|
+
value=[],
|
28
26
|
support_file_types=[".zip"],
|
29
27
|
multiple=False,
|
30
28
|
condition="return fieldsData.input_type.value === 'file'",
|
@@ -65,7 +63,7 @@ class CodebaseAnalysis(Node):
|
|
65
63
|
"ignore_patterns": InputPort(
|
66
64
|
name="ignore_patterns",
|
67
65
|
port_type=PortType.INPUT,
|
68
|
-
value=
|
66
|
+
value=[],
|
69
67
|
multiple=True,
|
70
68
|
),
|
71
69
|
"output": OutputPort(),
|
@@ -74,7 +72,7 @@ class CodebaseAnalysis(Node):
|
|
74
72
|
|
75
73
|
|
76
74
|
class TextTranslation(Node):
|
77
|
-
def __init__(self, id:
|
75
|
+
def __init__(self, id: str | None = None):
|
78
76
|
super().__init__(
|
79
77
|
node_type="TextTranslation",
|
80
78
|
category="tools",
|
@@ -142,7 +140,7 @@ class TextTranslation(Node):
|
|
142
140
|
|
143
141
|
|
144
142
|
class TextSearch(Node):
|
145
|
-
def __init__(self, id:
|
143
|
+
def __init__(self, id: str | None = None):
|
146
144
|
super().__init__(
|
147
145
|
node_type="TextSearch",
|
148
146
|
category="tools",
|
@@ -264,7 +262,7 @@ class TextSearch(Node):
|
|
264
262
|
|
265
263
|
|
266
264
|
class ProgrammingFunction(Node):
|
267
|
-
def __init__(self, id:
|
265
|
+
def __init__(self, id: str | None = None):
|
268
266
|
super().__init__(
|
269
267
|
node_type="ProgrammingFunction",
|
270
268
|
category="tools",
|
@@ -330,7 +328,7 @@ class ProgrammingFunction(Node):
|
|
330
328
|
|
331
329
|
|
332
330
|
class ImageSearch(Node):
|
333
|
-
def __init__(self, id:
|
331
|
+
def __init__(self, id: str | None = None):
|
334
332
|
super().__init__(
|
335
333
|
node_type="ImageSearch",
|
336
334
|
category="tools",
|
@@ -375,7 +373,7 @@ class ImageSearch(Node):
|
|
375
373
|
|
376
374
|
|
377
375
|
class WorkflowInvoke(Node):
|
378
|
-
def __init__(self, id:
|
376
|
+
def __init__(self, id: str | None = None):
|
379
377
|
super().__init__(
|
380
378
|
node_type="WorkflowInvoke",
|
381
379
|
category="tools",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort
|
5
3
|
|
6
4
|
|
7
5
|
class ButtonTrigger(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="ButtonTrigger",
|
11
9
|
category="triggers",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class AddData(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="AddData",
|
11
9
|
category="vector_db",
|
@@ -91,7 +89,7 @@ class AddData(Node):
|
|
91
89
|
|
92
90
|
|
93
91
|
class DeleteData(Node):
|
94
|
-
def __init__(self, id:
|
92
|
+
def __init__(self, id: str | None = None):
|
95
93
|
super().__init__(
|
96
94
|
node_type="DeleteData",
|
97
95
|
category="vector_db",
|
@@ -115,7 +113,7 @@ class DeleteData(Node):
|
|
115
113
|
|
116
114
|
|
117
115
|
class Search(Node):
|
118
|
-
def __init__(self, id:
|
116
|
+
def __init__(self, id: str | None = None):
|
119
117
|
super().__init__(
|
120
118
|
node_type="Search",
|
121
119
|
category="vector_db",
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class KlingVideo(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="KlingVideo",
|
11
9
|
category="video_generation",
|
@@ -20,7 +18,7 @@ class KlingVideo(Node):
|
|
20
18
|
"image": InputPort(
|
21
19
|
name="image",
|
22
20
|
port_type=PortType.FILE,
|
23
|
-
value=
|
21
|
+
value=[],
|
24
22
|
support_file_types=[".jpg", ".jpeg", ".png"],
|
25
23
|
multiple=True,
|
26
24
|
),
|
@@ -67,7 +65,7 @@ class KlingVideo(Node):
|
|
67
65
|
|
68
66
|
|
69
67
|
class CogVideoX(Node):
|
70
|
-
def __init__(self, id:
|
68
|
+
def __init__(self, id: str | None = None):
|
71
69
|
super().__init__(
|
72
70
|
node_type="CogVideoX",
|
73
71
|
category="video_generation",
|
@@ -82,7 +80,7 @@ class CogVideoX(Node):
|
|
82
80
|
"image": InputPort(
|
83
81
|
name="image",
|
84
82
|
port_type=PortType.FILE,
|
85
|
-
value=
|
83
|
+
value=[],
|
86
84
|
support_file_types=[".jpg", ".jpeg", ".png"],
|
87
85
|
multiple=True,
|
88
86
|
),
|
@@ -1,11 +1,9 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
|
3
1
|
from ..graph.node import Node
|
4
2
|
from ..graph.port import PortType, InputPort, OutputPort
|
5
3
|
|
6
4
|
|
7
5
|
class TextCrawler(Node):
|
8
|
-
def __init__(self, id:
|
6
|
+
def __init__(self, id: str | None = None):
|
9
7
|
super().__init__(
|
10
8
|
node_type="TextCrawler",
|
11
9
|
category="web_crawlers",
|
@@ -43,7 +41,7 @@ class TextCrawler(Node):
|
|
43
41
|
|
44
42
|
|
45
43
|
class BilibiliCrawler(Node):
|
46
|
-
def __init__(self, id:
|
44
|
+
def __init__(self, id: str | None = None):
|
47
45
|
super().__init__(
|
48
46
|
node_type="BilibiliCrawler",
|
49
47
|
category="web_crawlers",
|
@@ -84,7 +82,7 @@ class BilibiliCrawler(Node):
|
|
84
82
|
|
85
83
|
|
86
84
|
class DouyinCrawler(Node):
|
87
|
-
def __init__(self, id:
|
85
|
+
def __init__(self, id: str | None = None):
|
88
86
|
super().__init__(
|
89
87
|
node_type="DouyinCrawler",
|
90
88
|
category="web_crawlers",
|
@@ -111,7 +109,7 @@ class DouyinCrawler(Node):
|
|
111
109
|
|
112
110
|
|
113
111
|
class YoutubeCrawler(Node):
|
114
|
-
def __init__(self, id:
|
112
|
+
def __init__(self, id: str | None = None):
|
115
113
|
super().__init__(
|
116
114
|
node_type="YoutubeCrawler",
|
117
115
|
category="web_crawlers",
|
@@ -34,9 +34,7 @@ class AnalyseResult(TypedDict):
|
|
34
34
|
nodes: list[NodeRecord]
|
35
35
|
|
36
36
|
|
37
|
-
def analyse_workflow_record(
|
38
|
-
json_str: str, connected_only: bool = False, reserver_programming_function_ports: bool = False
|
39
|
-
) -> AnalyseResult:
|
37
|
+
def analyse_workflow_record(json_str: str, connected_only: bool = False, reserver_programming_function_ports: bool = False) -> AnalyseResult:
|
40
38
|
"""
|
41
39
|
分析工作流JSON字符串,提取节点和端口信息
|
42
40
|
|
@@ -274,9 +272,7 @@ def format_analysis_result(analysis_result: AnalyseResult, max_value_length: int
|
|
274
272
|
|
275
273
|
port_summary.append(port_info)
|
276
274
|
|
277
|
-
truncated_node["ports_summary"] = (
|
278
|
-
f"{len(ports)}个端口,其中{sum(1 for p in ports if p.get('connected'))}个已连接"
|
279
|
-
)
|
275
|
+
truncated_node["ports_summary"] = f"{len(ports)}个端口,其中{sum(1 for p in ports if p.get('connected'))}个已连接"
|
280
276
|
|
281
277
|
node_str = f"节点{idx + 1}: {json.dumps(truncated_node, ensure_ascii=False, indent=2)}"
|
282
278
|
formatted_parts.append(node_str)
|
@@ -284,7 +280,7 @@ def format_analysis_result(analysis_result: AnalyseResult, max_value_length: int
|
|
284
280
|
# 添加其他可能的顶级信息
|
285
281
|
for key, value in analysis_result.items():
|
286
282
|
if key != "nodes":
|
287
|
-
if isinstance(value,
|
283
|
+
if isinstance(value, dict | list):
|
288
284
|
summary = f"{key}: 包含{len(value)}个项目"
|
289
285
|
else:
|
290
286
|
summary = f"{key}: {prettify_value(value, max_value_length)}"
|
@@ -350,16 +346,12 @@ def format_workflow_analysis_for_llm(analysis_result: AnalyseResult, max_value_l
|
|
350
346
|
# 列表值,使用Python风格显示
|
351
347
|
truncated_list = []
|
352
348
|
item_max_length = max(max_value_length // len(value) if value else max_value_length, 10)
|
353
|
-
node_info.append(
|
354
|
-
f" - value(list): {json.dumps(prettify_value(value, item_max_length), ensure_ascii=False)}"
|
355
|
-
)
|
349
|
+
node_info.append(f" - value(list): {json.dumps(prettify_value(value, item_max_length), ensure_ascii=False)}")
|
356
350
|
elif isinstance(value, dict):
|
357
351
|
# 字典值,使用Python风格显示
|
358
352
|
dict_items = []
|
359
353
|
key_max_length = max(max_value_length // len(value) if value else max_value_length, 10)
|
360
|
-
node_info.append(
|
361
|
-
f" - value(dict): {json.dumps(prettify_value(value, key_max_length), ensure_ascii=False)}"
|
362
|
-
)
|
354
|
+
node_info.append(f" - value(dict): {json.dumps(prettify_value(value, key_max_length), ensure_ascii=False)}")
|
363
355
|
elif isinstance(value, str):
|
364
356
|
# 字符串值,带引号
|
365
357
|
truncated = prettify_value(value, max_value_length)
|
@@ -39,17 +39,11 @@ def check_dag(workflow: "Workflow"):
|
|
39
39
|
result = {"no_cycle": True, "no_isolated_nodes": True}
|
40
40
|
|
41
41
|
# 过滤掉触发器节点和辅助节点
|
42
|
-
trigger_nodes = [
|
43
|
-
node.id
|
44
|
-
for node in workflow.nodes
|
45
|
-
if hasattr(node, "category") and (node.category == "triggers" or node.category == "assistedNodes")
|
46
|
-
]
|
42
|
+
trigger_nodes = [node.id for node in workflow.nodes if hasattr(node, "category") and (node.category == "triggers" or node.category == "assistedNodes")]
|
47
43
|
|
48
44
|
# 获取需要检查的节点和边
|
49
45
|
regular_nodes = [node.id for node in workflow.nodes if node.id not in trigger_nodes]
|
50
|
-
regular_edges = [
|
51
|
-
edge for edge in workflow.edges if edge.source not in trigger_nodes and edge.target not in trigger_nodes
|
52
|
-
]
|
46
|
+
regular_edges = [edge for edge in workflow.edges if edge.source not in trigger_nodes and edge.target not in trigger_nodes]
|
53
47
|
|
54
48
|
# ---------- 检查有向图是否有环 ----------
|
55
49
|
# 构建邻接表
|
@@ -59,7 +53,7 @@ def check_dag(workflow: "Workflow"):
|
|
59
53
|
adjacency[edge.source].append(edge.target)
|
60
54
|
|
61
55
|
# 三种状态: 0 = 未访问, 1 = 正在访问, 2 = 已访问完成
|
62
|
-
visited =
|
56
|
+
visited = dict.fromkeys(regular_nodes, 0)
|
63
57
|
|
64
58
|
def dfs_cycle_detection(node_id):
|
65
59
|
# 如果节点正在被访问,说明找到了环
|
@@ -157,9 +151,7 @@ def check_ui(workflow: "Workflow") -> UIWarning:
|
|
157
151
|
|
158
152
|
# 检查显示的端口是否也被连接
|
159
153
|
if (node.id, port_name) in connected_ports:
|
160
|
-
warnings["input_ports_shown_but_connected"].append(
|
161
|
-
{"node_id": node.id, "node_type": node.type, "port_name": port_name}
|
162
|
-
)
|
154
|
+
warnings["input_ports_shown_but_connected"].append({"node_id": node.id, "node_type": node.type, "port_name": port_name})
|
163
155
|
|
164
156
|
# 如果没有任何显示的输入端口
|
165
157
|
warnings["has_shown_input_ports"] = has_shown_input_ports
|
@@ -174,7 +166,7 @@ def check_useless_nodes(workflow: "Workflow") -> list["Node"]:
|
|
174
166
|
1. 节点非 output 类节点,并且节点的输出端口没有任何连线,说明该节点数据不会传给下一个节点或者显示出来。
|
175
167
|
"""
|
176
168
|
useless_nodes = []
|
177
|
-
source_nodes =
|
169
|
+
source_nodes = {edge.source for edge in workflow.edges}
|
178
170
|
|
179
171
|
for node in workflow.nodes:
|
180
172
|
if hasattr(node, "category") and node.category == "outputs":
|
@@ -206,9 +198,7 @@ def check_required_ports(workflow: "Workflow") -> "list[tuple[Node, Port]]":
|
|
206
198
|
# 如果是函数,传入ports作为参数
|
207
199
|
condition_result = port.condition_python(ports)
|
208
200
|
else:
|
209
|
-
raise ValueError(
|
210
|
-
f"condition_python 必须是可调用函数,当前类型为 {type(port.condition_python)}"
|
211
|
-
)
|
201
|
+
raise ValueError(f"condition_python 必须是可调用函数,当前类型为 {type(port.condition_python)}")
|
212
202
|
|
213
203
|
if not condition_result:
|
214
204
|
condition_applies = False
|