vectorvein 0.2.55__py3-none-any.whl → 0.2.57__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein/chat_clients/anthropic_client.py +12 -2
- vectorvein/workflow/graph/port.py +15 -3
- vectorvein/workflow/graph/workflow.py +12 -1
- vectorvein/workflow/nodes/control_flows.py +2 -0
- vectorvein/workflow/nodes/file_processing.py +5 -0
- vectorvein/workflow/nodes/image_generation.py +21 -0
- vectorvein/workflow/nodes/llms.py +19 -0
- vectorvein/workflow/nodes/media_editing.py +54 -0
- vectorvein/workflow/nodes/media_processing.py +21 -0
- vectorvein/workflow/nodes/output.py +7 -0
- vectorvein/workflow/nodes/relational_db.py +1 -0
- vectorvein/workflow/nodes/text_processing.py +4 -0
- vectorvein/workflow/nodes/tools.py +360 -356
- vectorvein/workflow/nodes/vector_db.py +4 -0
- vectorvein/workflow/nodes/web_crawlers.py +1 -0
- vectorvein/workflow/utils/check.py +79 -0
- {vectorvein-0.2.55.dist-info → vectorvein-0.2.57.dist-info}/METADATA +1 -1
- {vectorvein-0.2.55.dist-info → vectorvein-0.2.57.dist-info}/RECORD +20 -20
- {vectorvein-0.2.55.dist-info → vectorvein-0.2.57.dist-info}/WHEEL +0 -0
- {vectorvein-0.2.55.dist-info → vectorvein-0.2.57.dist-info}/entry_points.txt +0 -0
@@ -45,46 +45,61 @@ class AudioEditing(Node):
|
|
45
45
|
{"value": "start_end_time", "label": "start_end_time"},
|
46
46
|
],
|
47
47
|
condition="return fieldsData.trim.value",
|
48
|
+
condition_python=lambda ports: ports["trim"].value,
|
48
49
|
),
|
49
50
|
"trim_length": InputPort(
|
50
51
|
name="trim_length",
|
51
52
|
port_type=PortType.NUMBER,
|
52
53
|
value=0,
|
53
54
|
condition="return fieldsData.trim.value && (fieldsData.trim_method.value === 'start_duration' || fieldsData.trim_method.value === 'end_duration')",
|
55
|
+
condition_python=lambda ports: ports["trim"].value
|
56
|
+
and (
|
57
|
+
ports["trim_method"].value == "start_duration" or ports["trim_method"].value == "end_duration"
|
58
|
+
),
|
54
59
|
),
|
55
60
|
"trim_start_time": InputPort(
|
56
61
|
name="trim_start_time",
|
57
62
|
port_type=PortType.INPUT,
|
58
63
|
value="00:00:00",
|
59
64
|
condition="return fieldsData.trim.value && fieldsData.trim_method.value === 'start_end_time'",
|
65
|
+
condition_python=lambda ports: ports["trim"].value
|
66
|
+
and ports["trim_method"].value == "start_end_time",
|
60
67
|
),
|
61
68
|
"trim_end_time": InputPort(
|
62
69
|
name="trim_end_time",
|
63
70
|
port_type=PortType.INPUT,
|
64
71
|
value="00:01:00",
|
65
72
|
condition="return fieldsData.trim.value && fieldsData.trim_method.value === 'start_end_time'",
|
73
|
+
condition_python=lambda ports: ports["trim"].value
|
74
|
+
and ports["trim_method"].value == "start_end_time",
|
66
75
|
),
|
67
76
|
"adjust_volume": InputPort(
|
68
77
|
name="adjust_volume",
|
69
78
|
port_type=PortType.CHECKBOX,
|
70
79
|
value=False,
|
80
|
+
condition="return fieldsData.adjust_volume.value",
|
81
|
+
condition_python=lambda ports: ports["adjust_volume"].value,
|
71
82
|
),
|
72
83
|
"volume_adjustment_ratio": InputPort(
|
73
84
|
name="volume_adjustment_ratio",
|
74
85
|
port_type=PortType.NUMBER,
|
75
86
|
value=1.0,
|
76
87
|
condition="return fieldsData.adjust_volume.value",
|
88
|
+
condition_python=lambda ports: ports["adjust_volume"].value,
|
77
89
|
),
|
78
90
|
"fade_in_out": InputPort(
|
79
91
|
name="fade_in_out",
|
80
92
|
port_type=PortType.CHECKBOX,
|
81
93
|
value=False,
|
94
|
+
condition="return fieldsData.fade_in_out.value",
|
95
|
+
condition_python=lambda ports: ports["fade_in_out"].value,
|
82
96
|
),
|
83
97
|
"fade_in_out_duration": InputPort(
|
84
98
|
name="fade_in_out_duration",
|
85
99
|
port_type=PortType.NUMBER,
|
86
100
|
value=1,
|
87
101
|
condition="return fieldsData.fade_in_out.value",
|
102
|
+
condition_python=lambda ports: ports["fade_in_out"].value,
|
88
103
|
),
|
89
104
|
"adjust_speed": InputPort(
|
90
105
|
name="adjust_speed",
|
@@ -100,18 +115,23 @@ class AudioEditing(Node):
|
|
100
115
|
{"value": "specified_final_length", "label": "specified_final_length"},
|
101
116
|
],
|
102
117
|
condition="return fieldsData.adjust_speed.value",
|
118
|
+
condition_python=lambda ports: ports["adjust_speed"].value,
|
103
119
|
),
|
104
120
|
"specified_speed": InputPort(
|
105
121
|
name="specified_speed",
|
106
122
|
port_type=PortType.NUMBER,
|
107
123
|
value=1.0,
|
108
124
|
condition="return fieldsData.adjust_speed.value && fieldsData.speed_adjustment_method.value === 'specified_speed'",
|
125
|
+
condition_python=lambda ports: ports["adjust_speed"].value
|
126
|
+
and ports["speed_adjustment_method"].value == "specified_speed",
|
109
127
|
),
|
110
128
|
"specified_final_length": InputPort(
|
111
129
|
name="specified_final_length",
|
112
130
|
port_type=PortType.NUMBER,
|
113
131
|
value=10,
|
114
132
|
condition="return fieldsData.adjust_speed.value && fieldsData.speed_adjustment_method.value === 'specified_final_length'",
|
133
|
+
condition_python=lambda ports: ports["adjust_speed"].value
|
134
|
+
and ports["speed_adjustment_method"].value == "specified_final_length",
|
115
135
|
),
|
116
136
|
"adjust_channels": InputPort(
|
117
137
|
name="adjust_channels",
|
@@ -127,6 +147,7 @@ class AudioEditing(Node):
|
|
127
147
|
{"value": "mono_to_stereo", "label": "mono_to_stereo"},
|
128
148
|
],
|
129
149
|
condition="return fieldsData.adjust_channels.value",
|
150
|
+
condition_python=lambda ports: ports["adjust_channels"].value,
|
130
151
|
),
|
131
152
|
"output_audio_format": InputPort(
|
132
153
|
name="output_audio_format",
|
@@ -190,6 +211,7 @@ class ImageBackgroundRemoval(Node):
|
|
190
211
|
port_type=PortType.INPUT,
|
191
212
|
value="#ffffff",
|
192
213
|
condition="return !fieldsData.transparent_background.value",
|
214
|
+
condition_python=lambda ports: not ports["transparent_background"].value,
|
193
215
|
),
|
194
216
|
"crop_to_subject": InputPort(
|
195
217
|
name="crop_to_subject",
|
@@ -241,6 +263,7 @@ class ImageEditing(Node):
|
|
241
263
|
{"value": "fixed", "label": "fixed"},
|
242
264
|
],
|
243
265
|
condition="return fieldsData.crop.value",
|
266
|
+
condition_python=lambda ports: ports["crop"].value,
|
244
267
|
),
|
245
268
|
"crop_position": InputPort(
|
246
269
|
name="crop_position",
|
@@ -259,42 +282,51 @@ class ImageEditing(Node):
|
|
259
282
|
{"value": "absolute", "label": "absolute"},
|
260
283
|
],
|
261
284
|
condition="return fieldsData.crop.value",
|
285
|
+
condition_python=lambda ports: ports["crop"].value,
|
262
286
|
),
|
263
287
|
"crop_x": InputPort(
|
264
288
|
name="crop_x",
|
265
289
|
port_type=PortType.NUMBER,
|
266
290
|
value=1,
|
267
291
|
condition="return fieldsData.crop_position.value == 'absolute' && fieldsData.crop.value",
|
292
|
+
condition_python=lambda ports: ports["crop_position"].value == "absolute" and ports["crop"].value,
|
268
293
|
),
|
269
294
|
"crop_y": InputPort(
|
270
295
|
name="crop_y",
|
271
296
|
port_type=PortType.NUMBER,
|
272
297
|
value=1,
|
273
298
|
condition="return fieldsData.crop_position.value == 'absolute' && fieldsData.crop.value",
|
299
|
+
condition_python=lambda ports: ports["crop_position"].value == "absolute" and ports["crop"].value,
|
274
300
|
),
|
275
301
|
"crop_width": InputPort(
|
276
302
|
name="crop_width",
|
277
303
|
port_type=PortType.NUMBER,
|
278
304
|
value=300,
|
279
305
|
condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'fixed'",
|
306
|
+
condition_python=lambda ports: ports["crop"].value and ports["crop_method"].value == "fixed",
|
280
307
|
),
|
281
308
|
"crop_height": InputPort(
|
282
309
|
name="crop_height",
|
283
310
|
port_type=PortType.NUMBER,
|
284
311
|
value=300,
|
285
312
|
condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'fixed'",
|
313
|
+
condition_python=lambda ports: ports["crop"].value and ports["crop_method"].value == "fixed",
|
286
314
|
),
|
287
315
|
"crop_width_ratio": InputPort(
|
288
316
|
name="crop_width_ratio",
|
289
317
|
port_type=PortType.NUMBER,
|
290
318
|
value=1,
|
291
319
|
condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'proportional'",
|
320
|
+
condition_python=lambda ports: ports["crop"].value
|
321
|
+
and ports["crop_method"].value == "proportional",
|
292
322
|
),
|
293
323
|
"crop_height_ratio": InputPort(
|
294
324
|
name="crop_height_ratio",
|
295
325
|
port_type=PortType.NUMBER,
|
296
326
|
value=1,
|
297
327
|
condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'proportional'",
|
328
|
+
condition_python=lambda ports: ports["crop"].value
|
329
|
+
and ports["crop_method"].value == "proportional",
|
298
330
|
),
|
299
331
|
"scale": InputPort(
|
300
332
|
name="scale",
|
@@ -310,24 +342,30 @@ class ImageEditing(Node):
|
|
310
342
|
{"value": "fixed_width_height", "label": "fixed_width_height"},
|
311
343
|
],
|
312
344
|
condition="return fieldsData.scale.value",
|
345
|
+
condition_python=lambda ports: ports["scale"].value,
|
313
346
|
),
|
314
347
|
"scale_ratio": InputPort(
|
315
348
|
name="scale_ratio",
|
316
349
|
port_type=PortType.NUMBER,
|
317
350
|
value=1,
|
318
351
|
condition="return fieldsData.scale.value && fieldsData.scale_method.value == 'proportional_scale'",
|
352
|
+
condition_python=lambda ports: ports["scale"].value
|
353
|
+
and ports["scale_method"].value == "proportional_scale",
|
319
354
|
),
|
320
355
|
"scale_width": InputPort(
|
321
356
|
name="scale_width",
|
322
357
|
port_type=PortType.NUMBER,
|
323
358
|
value=0,
|
324
359
|
condition="return fieldsData.scale.value && fieldsData.scale_method.value == 'fixed_width_height'",
|
360
|
+
condition_python=lambda ports: ports["scale"].value
|
361
|
+
and ports["scale_method"].value == "fixed_width_height",
|
325
362
|
),
|
326
363
|
"scale_height": InputPort(
|
327
364
|
name="scale_height",
|
328
365
|
port_type=PortType.NUMBER,
|
329
366
|
value=0,
|
330
367
|
condition="return fieldsData.scale.value && fieldsData.scale_method.value == 'fixed_width_height'",
|
368
|
+
condition_python=lambda ports: ports["scale"].value,
|
331
369
|
),
|
332
370
|
"compress": InputPort(
|
333
371
|
name="compress",
|
@@ -384,12 +422,14 @@ class ImageSegmentation(Node):
|
|
384
422
|
port_type=PortType.TEXTAREA,
|
385
423
|
value="",
|
386
424
|
condition="return fieldsData.selection_method.value === 'prompt'",
|
425
|
+
condition_python=lambda ports: ports["selection_method"].value == "prompt",
|
387
426
|
),
|
388
427
|
"coordinates": InputPort(
|
389
428
|
name="coordinates",
|
390
429
|
port_type=PortType.TEXTAREA,
|
391
430
|
value="",
|
392
431
|
condition="return fieldsData.selection_method.value === 'coordinates'",
|
432
|
+
condition_python=lambda ports: ports["selection_method"].value == "coordinates",
|
393
433
|
),
|
394
434
|
"remove_coordinates": InputPort(
|
395
435
|
name="remove_coordinates",
|
@@ -447,24 +487,28 @@ class ImageWatermark(Node):
|
|
447
487
|
value=list(),
|
448
488
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
449
489
|
condition="return fieldsData.image_or_text.value == 'image'",
|
490
|
+
condition_python=lambda ports: ports["image_or_text"].value == "image",
|
450
491
|
),
|
451
492
|
"watermark_image_width_ratio": InputPort(
|
452
493
|
name="watermark_image_width_ratio",
|
453
494
|
port_type=PortType.NUMBER,
|
454
495
|
value=0.3,
|
455
496
|
condition="return fieldsData.image_or_text.value == 'image'",
|
497
|
+
condition_python=lambda ports: ports["image_or_text"].value == "image",
|
456
498
|
),
|
457
499
|
"watermark_image_height_ratio": InputPort(
|
458
500
|
name="watermark_image_height_ratio",
|
459
501
|
port_type=PortType.NUMBER,
|
460
502
|
value=0,
|
461
503
|
condition="return fieldsData.image_or_text.value == 'image'",
|
504
|
+
condition_python=lambda ports: ports["image_or_text"].value == "image",
|
462
505
|
),
|
463
506
|
"watermark_text": InputPort(
|
464
507
|
name="watermark_text",
|
465
508
|
port_type=PortType.TEXTAREA,
|
466
509
|
value="",
|
467
510
|
condition="return fieldsData.image_or_text.value == 'text'",
|
511
|
+
condition_python=lambda ports: ports["image_or_text"].value == "text",
|
468
512
|
),
|
469
513
|
"watermark_text_font": InputPort(
|
470
514
|
name="watermark_text_font",
|
@@ -484,6 +528,7 @@ class ImageWatermark(Node):
|
|
484
528
|
{"value": "custom", "label": "custom"},
|
485
529
|
],
|
486
530
|
condition="return fieldsData.image_or_text.value == 'text'",
|
531
|
+
condition_python=lambda ports: ports["image_or_text"].value == "text",
|
487
532
|
),
|
488
533
|
"watermark_text_font_custom": InputPort(
|
489
534
|
name="watermark_text_font_custom",
|
@@ -491,18 +536,22 @@ class ImageWatermark(Node):
|
|
491
536
|
value=list(),
|
492
537
|
support_file_types=[".otf", ".ttf", ".ttc", ".otc"],
|
493
538
|
condition="return fieldsData.image_or_text.value == 'text' && fieldsData.watermark_text_font.value == 'custom'",
|
539
|
+
condition_python=lambda ports: ports["image_or_text"].value == "text"
|
540
|
+
and ports["watermark_text_font"].value == "custom",
|
494
541
|
),
|
495
542
|
"watermark_text_font_size": InputPort(
|
496
543
|
name="watermark_text_font_size",
|
497
544
|
port_type=PortType.NUMBER,
|
498
545
|
value=20,
|
499
546
|
condition="return fieldsData.image_or_text.value == 'text'",
|
547
|
+
condition_python=lambda ports: ports["image_or_text"].value == "text",
|
500
548
|
),
|
501
549
|
"watermark_text_font_color": InputPort(
|
502
550
|
name="watermark_text_font_color",
|
503
551
|
port_type=PortType.INPUT,
|
504
552
|
value="#ffffff",
|
505
553
|
condition="return fieldsData.image_or_text.value == 'text'",
|
554
|
+
condition_python=lambda ports: ports["image_or_text"].value == "text",
|
506
555
|
),
|
507
556
|
"opacity": InputPort(
|
508
557
|
name="opacity",
|
@@ -585,12 +634,14 @@ class VideoEditing(Node):
|
|
585
634
|
port_type=PortType.INPUT,
|
586
635
|
value="00:00:00",
|
587
636
|
condition="return fieldsData.trim_video.value",
|
637
|
+
condition_python=lambda ports: ports["trim_video"].value,
|
588
638
|
),
|
589
639
|
"trim_end_time": InputPort(
|
590
640
|
name="trim_end_time",
|
591
641
|
port_type=PortType.INPUT,
|
592
642
|
value="00:01:00",
|
593
643
|
condition="return fieldsData.trim_video.value",
|
644
|
+
condition_python=lambda ports: ports["trim_video"].value,
|
594
645
|
),
|
595
646
|
"rotate_video": InputPort(
|
596
647
|
name="rotate_video",
|
@@ -613,6 +664,7 @@ class VideoEditing(Node):
|
|
613
664
|
port_type=PortType.INPUT,
|
614
665
|
value="",
|
615
666
|
condition="return fieldsData.add_watermark.value",
|
667
|
+
condition_python=lambda ports: ports["add_watermark"].value,
|
616
668
|
),
|
617
669
|
"output_video_format": InputPort(
|
618
670
|
name="output_video_format",
|
@@ -659,12 +711,14 @@ class VideoScreenshot(Node):
|
|
659
711
|
port_type=PortType.NUMBER,
|
660
712
|
value=10,
|
661
713
|
condition="return fieldsData.screenshot_method.value === 'interval'",
|
714
|
+
condition_python=lambda ports: ports["screenshot_method"].value == "interval",
|
662
715
|
),
|
663
716
|
"screenshot_timestamps": InputPort(
|
664
717
|
name="screenshot_timestamps",
|
665
718
|
port_type=PortType.INPUT,
|
666
719
|
value="",
|
667
720
|
condition="return fieldsData.screenshot_method.value === 'timestamps'",
|
721
|
+
condition_python=lambda ports: ports["screenshot_method"].value == "timestamps",
|
668
722
|
),
|
669
723
|
"output_type": InputPort(
|
670
724
|
name="output_type",
|
@@ -51,6 +51,7 @@ class ClaudeVision(Node):
|
|
51
51
|
multiple=True,
|
52
52
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
53
53
|
condition="fields_data.get('images_or_urls') == 'images'",
|
54
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
54
55
|
show=True,
|
55
56
|
),
|
56
57
|
"urls": InputPort(
|
@@ -58,6 +59,7 @@ class ClaudeVision(Node):
|
|
58
59
|
port_type=PortType.TEXT,
|
59
60
|
value="",
|
60
61
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
62
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
61
63
|
),
|
62
64
|
"output": OutputPort(),
|
63
65
|
},
|
@@ -101,6 +103,7 @@ class DeepseekVl(Node):
|
|
101
103
|
multiple=True,
|
102
104
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
103
105
|
condition="fields_data.get('images_or_urls') == 'images'",
|
106
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
104
107
|
show=True,
|
105
108
|
),
|
106
109
|
"urls": InputPort(
|
@@ -108,6 +111,7 @@ class DeepseekVl(Node):
|
|
108
111
|
port_type=PortType.TEXT,
|
109
112
|
value="",
|
110
113
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
114
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
111
115
|
),
|
112
116
|
"output": OutputPort(),
|
113
117
|
},
|
@@ -161,6 +165,7 @@ class GeminiVision(Node):
|
|
161
165
|
multiple=True,
|
162
166
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
163
167
|
condition="fields_data.get('images_or_urls') == 'images'",
|
168
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
164
169
|
show=True,
|
165
170
|
),
|
166
171
|
"urls": InputPort(
|
@@ -168,6 +173,7 @@ class GeminiVision(Node):
|
|
168
173
|
port_type=PortType.TEXT,
|
169
174
|
value="",
|
170
175
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
176
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
171
177
|
),
|
172
178
|
"output": OutputPort(),
|
173
179
|
},
|
@@ -213,6 +219,7 @@ class GlmVision(Node):
|
|
213
219
|
multiple=True,
|
214
220
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
215
221
|
condition="fields_data.images_or_urls.value == 'images'",
|
222
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
216
223
|
show=True,
|
217
224
|
),
|
218
225
|
"urls": InputPort(
|
@@ -220,6 +227,7 @@ class GlmVision(Node):
|
|
220
227
|
port_type=PortType.TEXT,
|
221
228
|
value="",
|
222
229
|
condition="fields_data.images_or_urls.value == 'urls'",
|
230
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
223
231
|
),
|
224
232
|
"output": OutputPort(),
|
225
233
|
},
|
@@ -264,6 +272,7 @@ class GptVision(Node):
|
|
264
272
|
multiple=True,
|
265
273
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
266
274
|
condition="fields_data.get('images_or_urls') == 'images'",
|
275
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
267
276
|
show=True,
|
268
277
|
),
|
269
278
|
"urls": InputPort(
|
@@ -271,6 +280,7 @@ class GptVision(Node):
|
|
271
280
|
port_type=PortType.TEXT,
|
272
281
|
value="",
|
273
282
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
283
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
274
284
|
),
|
275
285
|
"detail_type": InputPort(
|
276
286
|
name="detail_type",
|
@@ -325,6 +335,7 @@ class InternVision(Node):
|
|
325
335
|
multiple=True,
|
326
336
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
327
337
|
condition="fields_data.get('images_or_urls') == 'images'",
|
338
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
328
339
|
show=True,
|
329
340
|
),
|
330
341
|
"urls": InputPort(
|
@@ -332,6 +343,7 @@ class InternVision(Node):
|
|
332
343
|
port_type=PortType.TEXT,
|
333
344
|
value="",
|
334
345
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
346
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
335
347
|
),
|
336
348
|
"output": OutputPort(),
|
337
349
|
},
|
@@ -372,6 +384,7 @@ class Ocr(Node):
|
|
372
384
|
multiple=True,
|
373
385
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
374
386
|
condition="fields_data.get('images_or_urls') == 'images'",
|
387
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
375
388
|
show=True,
|
376
389
|
),
|
377
390
|
"urls": InputPort(
|
@@ -379,20 +392,24 @@ class Ocr(Node):
|
|
379
392
|
port_type=PortType.TEXT,
|
380
393
|
value="",
|
381
394
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
395
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
382
396
|
),
|
383
397
|
"output_table": OutputPort(
|
384
398
|
name="output_table",
|
385
399
|
condition="fields_data.get('ocr_type') == 'table'",
|
400
|
+
condition_python=lambda ports: ports["ocr_type"].value == "table",
|
386
401
|
has_tooltip=True,
|
387
402
|
),
|
388
403
|
"output_content": OutputPort(
|
389
404
|
name="output_content",
|
390
405
|
condition="fields_data.get('ocr_type') in ['general', 'business_license']",
|
406
|
+
condition_python=lambda ports: ports["ocr_type"].value in ["general", "business_license"],
|
391
407
|
),
|
392
408
|
"output_words_info": OutputPort(
|
393
409
|
name="output_words_info",
|
394
410
|
value=list(),
|
395
411
|
condition="fields_data.get('ocr_type') in ['general', 'business_license']",
|
412
|
+
condition_python=lambda ports: ports["ocr_type"].value in ["general", "business_license"],
|
396
413
|
has_tooltip=True,
|
397
414
|
),
|
398
415
|
},
|
@@ -446,6 +463,7 @@ class QwenVision(Node):
|
|
446
463
|
multiple=True,
|
447
464
|
support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
|
448
465
|
condition="fields_data.get('images_or_urls') == 'images'",
|
466
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "images",
|
449
467
|
show=True,
|
450
468
|
),
|
451
469
|
"urls": InputPort(
|
@@ -453,6 +471,7 @@ class QwenVision(Node):
|
|
453
471
|
port_type=PortType.TEXT,
|
454
472
|
value="",
|
455
473
|
condition="fields_data.get('images_or_urls') == 'urls'",
|
474
|
+
condition_python=lambda ports: ports["images_or_urls"].value == "urls",
|
456
475
|
),
|
457
476
|
"output": OutputPort(),
|
458
477
|
},
|
@@ -483,6 +502,7 @@ class SpeechRecognition(Node):
|
|
483
502
|
multiple=True,
|
484
503
|
support_file_types=[".wav", ".mp3", ".mp4", ".m4a", ".wma", ".aac", ".ogg", ".amr", ".flac"],
|
485
504
|
condition="fields_data.get('files_or_urls') == 'files'",
|
505
|
+
condition_python=lambda ports: ports["files_or_urls"].value == "files",
|
486
506
|
show=True,
|
487
507
|
),
|
488
508
|
"urls": InputPort(
|
@@ -490,6 +510,7 @@ class SpeechRecognition(Node):
|
|
490
510
|
port_type=PortType.TEXT,
|
491
511
|
value="",
|
492
512
|
condition="fields_data.get('files_or_urls') == 'urls'",
|
513
|
+
condition_python=lambda ports: ports["files_or_urls"].value == "urls",
|
493
514
|
),
|
494
515
|
"output_type": InputPort(
|
495
516
|
name="output_type",
|
@@ -26,18 +26,21 @@ class Audio(Node):
|
|
26
26
|
port_type=PortType.TEXTAREA,
|
27
27
|
value="",
|
28
28
|
condition="return fieldsData.audio_type.value == 'play_audio'",
|
29
|
+
condition_python=lambda ports: ports["audio_type"].value == "play_audio",
|
29
30
|
),
|
30
31
|
"is_midi": InputPort(
|
31
32
|
name="is_midi",
|
32
33
|
port_type=PortType.CHECKBOX,
|
33
34
|
value=False,
|
34
35
|
condition="return fieldsData.audio_type.value == 'play_audio'",
|
36
|
+
condition_python=lambda ports: ports["audio_type"].value == "play_audio",
|
35
37
|
),
|
36
38
|
"content": InputPort(
|
37
39
|
name="content",
|
38
40
|
port_type=PortType.TEXTAREA,
|
39
41
|
value="",
|
40
42
|
condition="return fieldsData.audio_type.value == 'text_to_speech'",
|
43
|
+
condition_python=lambda ports: ports["audio_type"].value == "text_to_speech",
|
41
44
|
),
|
42
45
|
"show_player": InputPort(
|
43
46
|
name="show_player",
|
@@ -329,12 +332,16 @@ class PictureRender(Node):
|
|
329
332
|
port_type=PortType.NUMBER,
|
330
333
|
value=1200,
|
331
334
|
condition="return ['url', 'html_code', 'markdown', 'mindmap', 'mermaid'].includes(fieldsData.render_type.value)",
|
335
|
+
condition_python=lambda ports: ports["render_type"].value
|
336
|
+
in ["url", "html_code", "markdown", "mindmap", "mermaid"],
|
332
337
|
),
|
333
338
|
"height": InputPort(
|
334
339
|
name="height",
|
335
340
|
port_type=PortType.NUMBER,
|
336
341
|
value=800,
|
337
342
|
condition="return ['url', 'html_code', 'markdown', 'mindmap', 'mermaid'].includes(fieldsData.render_type.value)",
|
343
|
+
condition_python=lambda ports: ports["render_type"].value
|
344
|
+
in ["url", "html_code", "markdown", "mindmap", "mermaid"],
|
338
345
|
),
|
339
346
|
"base64_encode": InputPort(
|
340
347
|
name="base64_encode",
|
@@ -84,18 +84,21 @@ class TextSplitters(Node):
|
|
84
84
|
port_type=PortType.NUMBER,
|
85
85
|
value=500,
|
86
86
|
condition="return ['general', 'markdown'].includes(fieldsData.split_method.value)",
|
87
|
+
condition_python=lambda ports: ports["split_method"].value in ["general", "markdown"],
|
87
88
|
),
|
88
89
|
"chunk_overlap": InputPort(
|
89
90
|
name="chunk_overlap",
|
90
91
|
port_type=PortType.NUMBER,
|
91
92
|
value=30,
|
92
93
|
condition="return ['general', 'markdown'].includes(fieldsData.split_method.value)",
|
94
|
+
condition_python=lambda ports: ports["split_method"].value in ["general", "markdown"],
|
93
95
|
),
|
94
96
|
"delimiter": InputPort(
|
95
97
|
name="delimiter",
|
96
98
|
port_type=PortType.INPUT,
|
97
99
|
value="\\n",
|
98
100
|
condition="return fieldsData.split_method.value == 'delimiter'",
|
101
|
+
condition_python=lambda ports: ports["split_method"].value == "delimiter",
|
99
102
|
),
|
100
103
|
"output": OutputPort(list=True),
|
101
104
|
},
|
@@ -179,6 +182,7 @@ class ListRender(Node):
|
|
179
182
|
port_type=PortType.INPUT,
|
180
183
|
value="\\n\\n",
|
181
184
|
condition="return fieldsData.output_type.value == 'text'",
|
185
|
+
condition_python=lambda ports: ports["output_type"].value == "text",
|
182
186
|
),
|
183
187
|
"output_type": InputPort(
|
184
188
|
name="output_type",
|