vectorvein 0.2.56__py3-none-any.whl → 0.2.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  from enum import Enum
2
- from typing import Optional, Any, Dict, List, Union
2
+ from typing import Optional, Any, Dict, List, Union, Callable
3
3
 
4
4
 
5
5
  class PortType(Enum):
@@ -28,6 +28,7 @@ class Port:
28
28
  field_type: Optional[str] = None,
29
29
  is_output: bool = False,
30
30
  condition: Optional[str] = None,
31
+ condition_python: Optional[Callable[[Dict[str, "Port"]], bool]] = None,
31
32
  max_length: Optional[int] = None,
32
33
  support_file_types: Optional[List[str]] = None,
33
34
  multiple: Optional[bool] = None,
@@ -48,6 +49,7 @@ class Port:
48
49
  self.field_type = field_type
49
50
  self.is_output = is_output
50
51
  self.condition = condition
52
+ self.condition_python = condition_python
51
53
  self.max_length = max_length
52
54
  self.support_file_types = support_file_types
53
55
  self.multiple = multiple
@@ -75,7 +77,7 @@ class Port:
75
77
  "name": self.name,
76
78
  "display_name": self.name,
77
79
  "field_type": self.port_type.value if isinstance(self.port_type, PortType) else self.port_type,
78
- "required": self.required,
80
+ "required": False if not isinstance(self.value, bool) and self.value else self.required,
79
81
  "show": self.show,
80
82
  "value": self._value,
81
83
  "options": self.options,
@@ -104,6 +106,12 @@ class Port:
104
106
  raise ValueError(f"Value `{value}` is not in Port `{self.name}` options {self.options}")
105
107
  self._value = value
106
108
 
109
+ def __str__(self) -> str:
110
+ return f"Port(name={self.name}, port_type={self.port_type})"
111
+
112
+ def __repr__(self) -> str:
113
+ return self.__str__()
114
+
107
115
 
108
116
  class InputPort(Port):
109
117
  def __init__(
@@ -116,6 +124,7 @@ class InputPort(Port):
116
124
  options: Optional[List[Any]] = None,
117
125
  field_type: Optional[str] = None,
118
126
  condition: Optional[str] = None,
127
+ condition_python: Optional[Callable[[Dict[str, "Port"]], bool]] = None,
119
128
  max_length: Optional[int] = None,
120
129
  support_file_types: Optional[List[str]] = None,
121
130
  multiple: Optional[bool] = None,
@@ -137,6 +146,7 @@ class InputPort(Port):
137
146
  field_type=field_type,
138
147
  is_output=False,
139
148
  condition=condition,
149
+ condition_python=condition_python,
140
150
  max_length=max_length,
141
151
  support_file_types=support_file_types,
142
152
  multiple=multiple,
@@ -155,12 +165,13 @@ class OutputPort(Port):
155
165
  self,
156
166
  name: str = "output",
157
167
  port_type: Union[PortType, str] = PortType.TEXT,
158
- required: bool = True,
168
+ required: bool = False,
159
169
  show: bool = False,
160
170
  value: Any = None,
161
171
  options: Optional[List[Any]] = None,
162
172
  field_type: Optional[str] = None,
163
173
  condition: Optional[str] = None,
174
+ condition_python: Optional[Callable[[Dict[str, "Port"]], bool]] = None,
164
175
  max_length: Optional[int] = None,
165
176
  support_file_types: Optional[List[str]] = None,
166
177
  multiple: Optional[bool] = None,
@@ -182,6 +193,7 @@ class OutputPort(Port):
182
193
  field_type=field_type,
183
194
  is_output=True,
184
195
  condition=condition,
196
+ condition_python=condition_python,
185
197
  max_length=max_length,
186
198
  support_file_types=support_file_types,
187
199
  multiple=multiple,
@@ -4,7 +4,14 @@ from typing import List, Union, Dict, Any, Optional
4
4
  from .node import Node
5
5
  from .edge import Edge
6
6
  from ..utils.layout import layout
7
- from ..utils.check import WorkflowCheckResult, check_dag, check_ui, check_useless_nodes
7
+ from ..utils.check import (
8
+ WorkflowCheckResult,
9
+ check_dag,
10
+ check_ui,
11
+ check_useless_nodes,
12
+ check_required_ports,
13
+ check_override_ports,
14
+ )
8
15
 
9
16
 
10
17
  class Workflow:
@@ -128,6 +135,8 @@ class Workflow:
128
135
  dag_check = check_dag(self) # 检查流程图是否为有向无环图,并检测是否存在孤立节点。
129
136
  ui_check = check_ui(self)
130
137
  useless_nodes = check_useless_nodes(self)
138
+ required_ports = check_required_ports(self)
139
+ override_ports = check_override_ports(self)
131
140
 
132
141
  # 合并结果
133
142
  result: WorkflowCheckResult = {
@@ -135,6 +144,8 @@ class Workflow:
135
144
  "no_isolated_nodes": dag_check["no_isolated_nodes"],
136
145
  "ui_warnings": ui_check,
137
146
  "useless_nodes": useless_nodes,
147
+ "required_ports": required_ports,
148
+ "override_ports": override_ports,
138
149
  }
139
150
 
140
151
  return result
@@ -135,6 +135,7 @@ class JsonProcess(Node):
135
135
  port_type=PortType.INPUT,
136
136
  value="",
137
137
  condition="return fieldsData.process_mode.value == 'get_value'",
138
+ condition_python=lambda ports: ports["process_mode"].value == "get_value",
138
139
  ),
139
140
  "keys": InputPort(
140
141
  name="keys",
@@ -146,6 +147,7 @@ class JsonProcess(Node):
146
147
  port_type=PortType.INPUT,
147
148
  value="",
148
149
  condition="return fieldsData.process_mode.value == 'get_value'",
150
+ condition_python=lambda ports: ports["process_mode"].value == "get_value",
149
151
  ),
150
152
  "output": OutputPort(),
151
153
  },
@@ -33,24 +33,28 @@ class FileLoader(Node):
33
33
  port_type=PortType.CHECKBOX,
34
34
  value=True,
35
35
  condition="return fieldsData.parse_quality.value === 'default'",
36
+ condition_python=lambda ports: ports["parse_quality"].value == "default",
36
37
  ),
37
38
  "remove_url_and_email": InputPort(
38
39
  name="remove_url_and_email",
39
40
  port_type=PortType.CHECKBOX,
40
41
  value=True,
41
42
  condition="return fieldsData.parse_quality.value === 'default'",
43
+ condition_python=lambda ports: ports["parse_quality"].value == "default",
42
44
  ),
43
45
  "parse_table": InputPort(
44
46
  name="parse_table",
45
47
  port_type=PortType.CHECKBOX,
46
48
  value=True,
47
49
  condition="return fieldsData.parse_quality.value === 'high'",
50
+ condition_python=lambda ports: ports["parse_quality"].value == "high",
48
51
  ),
49
52
  "parse_formula": InputPort(
50
53
  name="parse_formula",
51
54
  port_type=PortType.CHECKBOX,
52
55
  value=False,
53
56
  condition="return fieldsData.parse_quality.value === 'high'",
57
+ condition_python=lambda ports: ports["parse_quality"].value == "high",
54
58
  ),
55
59
  "multiple": InputPort(
56
60
  name="multiple",
@@ -92,6 +96,7 @@ class FileUpload(Node):
92
96
  {"value": "dict", "label": "dict"},
93
97
  ],
94
98
  condition="return fieldsData.unzip_files.value",
99
+ condition_python=lambda ports: ports["unzip_files"].value,
95
100
  ),
96
101
  "allowed_file_types": InputPort(
97
102
  name="allowed_file_types",
@@ -35,6 +35,7 @@ class BackgroundGeneration(Node):
35
35
  {"value": "portrait", "label": "portrait"},
36
36
  ],
37
37
  condition="return fieldsData.remove_background.value",
38
+ condition_python=lambda ports: ports["remove_background"].value,
38
39
  ),
39
40
  "ref_image_url": InputPort(
40
41
  name="ref_image_url",
@@ -199,6 +200,7 @@ class Flux1(Node):
199
200
  value=1024,
200
201
  max=1536,
201
202
  condition="return fieldsData.model.value !== 'FLUX.1 [pro] ultra'",
203
+ condition_python=lambda ports: ports["model"].value != "FLUX.1 [pro] ultra",
202
204
  ),
203
205
  "height": InputPort(
204
206
  name="height",
@@ -206,6 +208,7 @@ class Flux1(Node):
206
208
  value=1024,
207
209
  max=1536,
208
210
  condition="return fieldsData.model.value !== 'FLUX.1 [pro] ultra'",
211
+ condition_python=lambda ports: ports["model"].value != "FLUX.1 [pro] ultra",
209
212
  ),
210
213
  "aspect_ratio": InputPort(
211
214
  name="aspect_ratio",
@@ -221,12 +224,14 @@ class Flux1(Node):
221
224
  {"value": "9:21", "label": "9:21"},
222
225
  ],
223
226
  condition="return fieldsData.model.value === 'FLUX.1 [pro] ultra'",
227
+ condition_python=lambda ports: ports["model"].value == "FLUX.1 [pro] ultra",
224
228
  ),
225
229
  "raw": InputPort(
226
230
  name="raw",
227
231
  port_type=PortType.CHECKBOX,
228
232
  value=False,
229
233
  condition="return fieldsData.model.value === 'FLUX.1 [pro] ultra'",
234
+ condition_python=lambda ports: ports["model"].value == "FLUX.1 [pro] ultra",
230
235
  ),
231
236
  "steps": InputPort(
232
237
  name="steps",
@@ -284,6 +289,7 @@ class Inpainting(Node):
284
289
  value=list(),
285
290
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
286
291
  condition="return fieldsData.inpainting_method.value === 'custom'",
292
+ condition_python=lambda ports: ports["inpainting_method"].value == "custom",
287
293
  multiple=True,
288
294
  ),
289
295
  "prompt": InputPort(
@@ -314,6 +320,7 @@ class Inpainting(Node):
314
320
  "output": OutputPort(),
315
321
  "output_mask": OutputPort(
316
322
  condition="return fieldsData.inpainting_method.value === 'smart'",
323
+ condition_python=lambda ports: ports["inpainting_method"].value == "smart",
317
324
  ),
318
325
  },
319
326
  )
@@ -437,12 +444,14 @@ class Pulid(Node):
437
444
  port_type=PortType.NUMBER,
438
445
  value=1024,
439
446
  condition="return fieldsData.image_size.value === 'custom'",
447
+ condition_python=lambda ports: ports["image_size"].value == "custom",
440
448
  ),
441
449
  "custom_height": InputPort(
442
450
  name="custom_height",
443
451
  port_type=PortType.NUMBER,
444
452
  value=768,
445
453
  condition="return fieldsData.image_size.value === 'custom'",
454
+ condition_python=lambda ports: ports["image_size"].value == "custom",
446
455
  ),
447
456
  "num_inference_steps": InputPort(
448
457
  name="num_inference_steps",
@@ -518,12 +527,14 @@ class Recraft(Node):
518
527
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
519
528
  multiple=True,
520
529
  condition="return fieldsData.generation_type.value === 'image_to_vector'",
530
+ condition_python=lambda ports: ports["generation_type"].value == "image_to_vector",
521
531
  ),
522
532
  "prompt": InputPort(
523
533
  name="prompt",
524
534
  port_type=PortType.TEXTAREA,
525
535
  value="",
526
536
  condition="return fieldsData.generation_type.value === 'text_to_image'",
537
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image",
527
538
  multiple=True,
528
539
  ),
529
540
  "base_style": InputPort(
@@ -537,6 +548,7 @@ class Recraft(Node):
537
548
  {"value": "vector_illustration", "label": "vector_illustration"},
538
549
  ],
539
550
  condition="return fieldsData.generation_type.value === 'text_to_image'",
551
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image",
540
552
  multiple=True,
541
553
  ),
542
554
  "substyle_realistic_image": InputPort(
@@ -554,6 +566,8 @@ class Recraft(Node):
554
566
  {"value": "motion_blur", "label": "motion_blur"},
555
567
  ],
556
568
  condition="return fieldsData.generation_type.value === 'text_to_image' && fieldsData.base_style.value === 'realistic_image'",
569
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image"
570
+ and ports["base_style"].value == "realistic_image",
557
571
  multiple=True,
558
572
  ),
559
573
  "substyle_digital_illustration": InputPort(
@@ -573,6 +587,8 @@ class Recraft(Node):
573
587
  {"value": "2d_art_poster_2", "label": "2d_art_poster_2"},
574
588
  ],
575
589
  condition="return fieldsData.generation_type.value === 'text_to_image' && fieldsData.base_style.value === 'digital_illustration'",
590
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image"
591
+ and ports["base_style"].value == "digital_illustration",
576
592
  multiple=True,
577
593
  ),
578
594
  "substyle_vector_illustration": InputPort(
@@ -587,6 +603,8 @@ class Recraft(Node):
587
603
  {"value": "linocut", "label": "linocut"},
588
604
  ],
589
605
  condition="return fieldsData.generation_type.value === 'text_to_image' && fieldsData.base_style.value === 'vector_illustration'",
606
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image"
607
+ and ports["base_style"].value == "vector_illustration",
590
608
  multiple=True,
591
609
  ),
592
610
  "size": InputPort(
@@ -611,6 +629,7 @@ class Recraft(Node):
611
629
  {"value": "1707x1024", "label": "1707x1024"},
612
630
  ],
613
631
  condition="return fieldsData.generation_type.value === 'text_to_image'",
632
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image",
614
633
  ),
615
634
  "colors": InputPort(
616
635
  name="colors",
@@ -618,6 +637,7 @@ class Recraft(Node):
618
637
  value=list(),
619
638
  multiple=True,
620
639
  condition="return fieldsData.generation_type.value === 'text_to_image'",
640
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image",
621
641
  ),
622
642
  "background_color": InputPort(
623
643
  name="background_color",
@@ -626,6 +646,7 @@ class Recraft(Node):
626
646
  multiple=True,
627
647
  max_count=1,
628
648
  condition="return fieldsData.generation_type.value === 'text_to_image'",
649
+ condition_python=lambda ports: ports["generation_type"].value == "text_to_image",
629
650
  ),
630
651
  "output_type": InputPort(
631
652
  name="output_type",
@@ -146,10 +146,12 @@ class Baichuan(Node):
146
146
  "function_call_output": OutputPort(
147
147
  name="function_call_output",
148
148
  condition="return fieldsData.use_function_call.value",
149
+ condition_python=lambda ports: ports["use_function_call"].value,
149
150
  ),
150
151
  "function_call_arguments": OutputPort(
151
152
  name="function_call_arguments",
152
153
  condition="return fieldsData.use_function_call.value",
154
+ condition_python=lambda ports: ports["use_function_call"].value,
153
155
  ),
154
156
  },
155
157
  )
@@ -269,10 +271,12 @@ class ChatGLM(Node):
269
271
  "function_call_output": OutputPort(
270
272
  name="function_call_output",
271
273
  condition="return fieldsData.use_function_call.value",
274
+ condition_python=lambda ports: ports["use_function_call"].value,
272
275
  ),
273
276
  "function_call_arguments": OutputPort(
274
277
  name="function_call_arguments",
275
278
  condition="return fieldsData.use_function_call.value",
279
+ condition_python=lambda ports: ports["use_function_call"].value,
276
280
  ),
277
281
  },
278
282
  )
@@ -404,14 +408,17 @@ class Deepseek(Node):
404
408
  "reasoning_content": OutputPort(
405
409
  name="reasoning_content",
406
410
  condition="return fieldsData.llm_model.value === 'deepseek-reasoner'",
411
+ condition_python=lambda ports: ports["llm_model"].value == "deepseek-reasoner",
407
412
  ),
408
413
  "function_call_output": OutputPort(
409
414
  name="function_call_output",
410
415
  condition="return fieldsData.use_function_call.value",
416
+ condition_python=lambda ports: ports["use_function_call"].value,
411
417
  ),
412
418
  "function_call_arguments": OutputPort(
413
419
  name="function_call_arguments",
414
420
  condition="return fieldsData.use_function_call.value",
421
+ condition_python=lambda ports: ports["use_function_call"].value,
415
422
  ),
416
423
  },
417
424
  )
@@ -505,10 +512,12 @@ class Gemini(Node):
505
512
  "function_call_output": OutputPort(
506
513
  name="function_call_output",
507
514
  condition="return fieldsData.use_function_call.value",
515
+ condition_python=lambda ports: ports["use_function_call"].value,
508
516
  ),
509
517
  "function_call_arguments": OutputPort(
510
518
  name="function_call_arguments",
511
519
  condition="return fieldsData.use_function_call.value",
520
+ condition_python=lambda ports: ports["use_function_call"].value,
512
521
  ),
513
522
  },
514
523
  )
@@ -636,10 +645,12 @@ class MiniMax(Node):
636
645
  "function_call_output": OutputPort(
637
646
  name="function_call_output",
638
647
  condition="return fieldsData.use_function_call.value",
648
+ condition_python=lambda ports: ports["use_function_call"].value,
639
649
  ),
640
650
  "function_call_arguments": OutputPort(
641
651
  name="function_call_arguments",
642
652
  condition="return fieldsData.use_function_call.value",
653
+ condition_python=lambda ports: ports["use_function_call"].value,
643
654
  ),
644
655
  },
645
656
  )
@@ -723,10 +734,12 @@ class Moonshot(Node):
723
734
  "function_call_output": OutputPort(
724
735
  name="function_call_output",
725
736
  condition="return fieldsData.use_function_call.value",
737
+ condition_python=lambda ports: ports["use_function_call"].value,
726
738
  ),
727
739
  "function_call_arguments": OutputPort(
728
740
  name="function_call_arguments",
729
741
  condition="return fieldsData.use_function_call.value",
742
+ condition_python=lambda ports: ports["use_function_call"].value,
730
743
  ),
731
744
  },
732
745
  )
@@ -814,10 +827,12 @@ class OpenAI(Node):
814
827
  "function_call_output": OutputPort(
815
828
  name="function_call_output",
816
829
  condition="return fieldsData.use_function_call.value",
830
+ condition_python=lambda ports: ports["use_function_call"].value,
817
831
  ),
818
832
  "function_call_arguments": OutputPort(
819
833
  name="function_call_arguments",
820
834
  condition="return fieldsData.use_function_call.value",
835
+ condition_python=lambda ports: ports["use_function_call"].value,
821
836
  ),
822
837
  },
823
838
  )
@@ -899,10 +914,12 @@ class XAi(Node):
899
914
  "function_call_output": OutputPort(
900
915
  name="function_call_output",
901
916
  condition="return fieldsData.use_function_call.value",
917
+ condition_python=lambda ports: ports["use_function_call"].value,
902
918
  ),
903
919
  "function_call_arguments": OutputPort(
904
920
  name="function_call_arguments",
905
921
  condition="return fieldsData.use_function_call.value",
922
+ condition_python=lambda ports: ports["use_function_call"].value,
906
923
  ),
907
924
  },
908
925
  )
@@ -988,10 +1005,12 @@ class CustomModel(Node):
988
1005
  "function_call_output": OutputPort(
989
1006
  name="function_call_output",
990
1007
  condition="return fieldsData.use_function_call.value",
1008
+ condition_python=lambda ports: ports["use_function_call"].value,
991
1009
  ),
992
1010
  "function_call_arguments": OutputPort(
993
1011
  name="function_call_arguments",
994
1012
  condition="return fieldsData.use_function_call.value",
1013
+ condition_python=lambda ports: ports["use_function_call"].value,
995
1014
  ),
996
1015
  },
997
1016
  )
@@ -45,46 +45,61 @@ class AudioEditing(Node):
45
45
  {"value": "start_end_time", "label": "start_end_time"},
46
46
  ],
47
47
  condition="return fieldsData.trim.value",
48
+ condition_python=lambda ports: ports["trim"].value,
48
49
  ),
49
50
  "trim_length": InputPort(
50
51
  name="trim_length",
51
52
  port_type=PortType.NUMBER,
52
53
  value=0,
53
54
  condition="return fieldsData.trim.value && (fieldsData.trim_method.value === 'start_duration' || fieldsData.trim_method.value === 'end_duration')",
55
+ condition_python=lambda ports: ports["trim"].value
56
+ and (
57
+ ports["trim_method"].value == "start_duration" or ports["trim_method"].value == "end_duration"
58
+ ),
54
59
  ),
55
60
  "trim_start_time": InputPort(
56
61
  name="trim_start_time",
57
62
  port_type=PortType.INPUT,
58
63
  value="00:00:00",
59
64
  condition="return fieldsData.trim.value && fieldsData.trim_method.value === 'start_end_time'",
65
+ condition_python=lambda ports: ports["trim"].value
66
+ and ports["trim_method"].value == "start_end_time",
60
67
  ),
61
68
  "trim_end_time": InputPort(
62
69
  name="trim_end_time",
63
70
  port_type=PortType.INPUT,
64
71
  value="00:01:00",
65
72
  condition="return fieldsData.trim.value && fieldsData.trim_method.value === 'start_end_time'",
73
+ condition_python=lambda ports: ports["trim"].value
74
+ and ports["trim_method"].value == "start_end_time",
66
75
  ),
67
76
  "adjust_volume": InputPort(
68
77
  name="adjust_volume",
69
78
  port_type=PortType.CHECKBOX,
70
79
  value=False,
80
+ condition="return fieldsData.adjust_volume.value",
81
+ condition_python=lambda ports: ports["adjust_volume"].value,
71
82
  ),
72
83
  "volume_adjustment_ratio": InputPort(
73
84
  name="volume_adjustment_ratio",
74
85
  port_type=PortType.NUMBER,
75
86
  value=1.0,
76
87
  condition="return fieldsData.adjust_volume.value",
88
+ condition_python=lambda ports: ports["adjust_volume"].value,
77
89
  ),
78
90
  "fade_in_out": InputPort(
79
91
  name="fade_in_out",
80
92
  port_type=PortType.CHECKBOX,
81
93
  value=False,
94
+ condition="return fieldsData.fade_in_out.value",
95
+ condition_python=lambda ports: ports["fade_in_out"].value,
82
96
  ),
83
97
  "fade_in_out_duration": InputPort(
84
98
  name="fade_in_out_duration",
85
99
  port_type=PortType.NUMBER,
86
100
  value=1,
87
101
  condition="return fieldsData.fade_in_out.value",
102
+ condition_python=lambda ports: ports["fade_in_out"].value,
88
103
  ),
89
104
  "adjust_speed": InputPort(
90
105
  name="adjust_speed",
@@ -100,18 +115,23 @@ class AudioEditing(Node):
100
115
  {"value": "specified_final_length", "label": "specified_final_length"},
101
116
  ],
102
117
  condition="return fieldsData.adjust_speed.value",
118
+ condition_python=lambda ports: ports["adjust_speed"].value,
103
119
  ),
104
120
  "specified_speed": InputPort(
105
121
  name="specified_speed",
106
122
  port_type=PortType.NUMBER,
107
123
  value=1.0,
108
124
  condition="return fieldsData.adjust_speed.value && fieldsData.speed_adjustment_method.value === 'specified_speed'",
125
+ condition_python=lambda ports: ports["adjust_speed"].value
126
+ and ports["speed_adjustment_method"].value == "specified_speed",
109
127
  ),
110
128
  "specified_final_length": InputPort(
111
129
  name="specified_final_length",
112
130
  port_type=PortType.NUMBER,
113
131
  value=10,
114
132
  condition="return fieldsData.adjust_speed.value && fieldsData.speed_adjustment_method.value === 'specified_final_length'",
133
+ condition_python=lambda ports: ports["adjust_speed"].value
134
+ and ports["speed_adjustment_method"].value == "specified_final_length",
115
135
  ),
116
136
  "adjust_channels": InputPort(
117
137
  name="adjust_channels",
@@ -127,6 +147,7 @@ class AudioEditing(Node):
127
147
  {"value": "mono_to_stereo", "label": "mono_to_stereo"},
128
148
  ],
129
149
  condition="return fieldsData.adjust_channels.value",
150
+ condition_python=lambda ports: ports["adjust_channels"].value,
130
151
  ),
131
152
  "output_audio_format": InputPort(
132
153
  name="output_audio_format",
@@ -190,6 +211,7 @@ class ImageBackgroundRemoval(Node):
190
211
  port_type=PortType.INPUT,
191
212
  value="#ffffff",
192
213
  condition="return !fieldsData.transparent_background.value",
214
+ condition_python=lambda ports: not ports["transparent_background"].value,
193
215
  ),
194
216
  "crop_to_subject": InputPort(
195
217
  name="crop_to_subject",
@@ -241,6 +263,7 @@ class ImageEditing(Node):
241
263
  {"value": "fixed", "label": "fixed"},
242
264
  ],
243
265
  condition="return fieldsData.crop.value",
266
+ condition_python=lambda ports: ports["crop"].value,
244
267
  ),
245
268
  "crop_position": InputPort(
246
269
  name="crop_position",
@@ -259,42 +282,51 @@ class ImageEditing(Node):
259
282
  {"value": "absolute", "label": "absolute"},
260
283
  ],
261
284
  condition="return fieldsData.crop.value",
285
+ condition_python=lambda ports: ports["crop"].value,
262
286
  ),
263
287
  "crop_x": InputPort(
264
288
  name="crop_x",
265
289
  port_type=PortType.NUMBER,
266
290
  value=1,
267
291
  condition="return fieldsData.crop_position.value == 'absolute' && fieldsData.crop.value",
292
+ condition_python=lambda ports: ports["crop_position"].value == "absolute" and ports["crop"].value,
268
293
  ),
269
294
  "crop_y": InputPort(
270
295
  name="crop_y",
271
296
  port_type=PortType.NUMBER,
272
297
  value=1,
273
298
  condition="return fieldsData.crop_position.value == 'absolute' && fieldsData.crop.value",
299
+ condition_python=lambda ports: ports["crop_position"].value == "absolute" and ports["crop"].value,
274
300
  ),
275
301
  "crop_width": InputPort(
276
302
  name="crop_width",
277
303
  port_type=PortType.NUMBER,
278
304
  value=300,
279
305
  condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'fixed'",
306
+ condition_python=lambda ports: ports["crop"].value and ports["crop_method"].value == "fixed",
280
307
  ),
281
308
  "crop_height": InputPort(
282
309
  name="crop_height",
283
310
  port_type=PortType.NUMBER,
284
311
  value=300,
285
312
  condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'fixed'",
313
+ condition_python=lambda ports: ports["crop"].value and ports["crop_method"].value == "fixed",
286
314
  ),
287
315
  "crop_width_ratio": InputPort(
288
316
  name="crop_width_ratio",
289
317
  port_type=PortType.NUMBER,
290
318
  value=1,
291
319
  condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'proportional'",
320
+ condition_python=lambda ports: ports["crop"].value
321
+ and ports["crop_method"].value == "proportional",
292
322
  ),
293
323
  "crop_height_ratio": InputPort(
294
324
  name="crop_height_ratio",
295
325
  port_type=PortType.NUMBER,
296
326
  value=1,
297
327
  condition="return fieldsData.crop.value && fieldsData.crop_method.value == 'proportional'",
328
+ condition_python=lambda ports: ports["crop"].value
329
+ and ports["crop_method"].value == "proportional",
298
330
  ),
299
331
  "scale": InputPort(
300
332
  name="scale",
@@ -310,24 +342,30 @@ class ImageEditing(Node):
310
342
  {"value": "fixed_width_height", "label": "fixed_width_height"},
311
343
  ],
312
344
  condition="return fieldsData.scale.value",
345
+ condition_python=lambda ports: ports["scale"].value,
313
346
  ),
314
347
  "scale_ratio": InputPort(
315
348
  name="scale_ratio",
316
349
  port_type=PortType.NUMBER,
317
350
  value=1,
318
351
  condition="return fieldsData.scale.value && fieldsData.scale_method.value == 'proportional_scale'",
352
+ condition_python=lambda ports: ports["scale"].value
353
+ and ports["scale_method"].value == "proportional_scale",
319
354
  ),
320
355
  "scale_width": InputPort(
321
356
  name="scale_width",
322
357
  port_type=PortType.NUMBER,
323
358
  value=0,
324
359
  condition="return fieldsData.scale.value && fieldsData.scale_method.value == 'fixed_width_height'",
360
+ condition_python=lambda ports: ports["scale"].value
361
+ and ports["scale_method"].value == "fixed_width_height",
325
362
  ),
326
363
  "scale_height": InputPort(
327
364
  name="scale_height",
328
365
  port_type=PortType.NUMBER,
329
366
  value=0,
330
367
  condition="return fieldsData.scale.value && fieldsData.scale_method.value == 'fixed_width_height'",
368
+ condition_python=lambda ports: ports["scale"].value,
331
369
  ),
332
370
  "compress": InputPort(
333
371
  name="compress",
@@ -384,12 +422,14 @@ class ImageSegmentation(Node):
384
422
  port_type=PortType.TEXTAREA,
385
423
  value="",
386
424
  condition="return fieldsData.selection_method.value === 'prompt'",
425
+ condition_python=lambda ports: ports["selection_method"].value == "prompt",
387
426
  ),
388
427
  "coordinates": InputPort(
389
428
  name="coordinates",
390
429
  port_type=PortType.TEXTAREA,
391
430
  value="",
392
431
  condition="return fieldsData.selection_method.value === 'coordinates'",
432
+ condition_python=lambda ports: ports["selection_method"].value == "coordinates",
393
433
  ),
394
434
  "remove_coordinates": InputPort(
395
435
  name="remove_coordinates",
@@ -447,24 +487,28 @@ class ImageWatermark(Node):
447
487
  value=list(),
448
488
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
449
489
  condition="return fieldsData.image_or_text.value == 'image'",
490
+ condition_python=lambda ports: ports["image_or_text"].value == "image",
450
491
  ),
451
492
  "watermark_image_width_ratio": InputPort(
452
493
  name="watermark_image_width_ratio",
453
494
  port_type=PortType.NUMBER,
454
495
  value=0.3,
455
496
  condition="return fieldsData.image_or_text.value == 'image'",
497
+ condition_python=lambda ports: ports["image_or_text"].value == "image",
456
498
  ),
457
499
  "watermark_image_height_ratio": InputPort(
458
500
  name="watermark_image_height_ratio",
459
501
  port_type=PortType.NUMBER,
460
502
  value=0,
461
503
  condition="return fieldsData.image_or_text.value == 'image'",
504
+ condition_python=lambda ports: ports["image_or_text"].value == "image",
462
505
  ),
463
506
  "watermark_text": InputPort(
464
507
  name="watermark_text",
465
508
  port_type=PortType.TEXTAREA,
466
509
  value="",
467
510
  condition="return fieldsData.image_or_text.value == 'text'",
511
+ condition_python=lambda ports: ports["image_or_text"].value == "text",
468
512
  ),
469
513
  "watermark_text_font": InputPort(
470
514
  name="watermark_text_font",
@@ -484,6 +528,7 @@ class ImageWatermark(Node):
484
528
  {"value": "custom", "label": "custom"},
485
529
  ],
486
530
  condition="return fieldsData.image_or_text.value == 'text'",
531
+ condition_python=lambda ports: ports["image_or_text"].value == "text",
487
532
  ),
488
533
  "watermark_text_font_custom": InputPort(
489
534
  name="watermark_text_font_custom",
@@ -491,18 +536,22 @@ class ImageWatermark(Node):
491
536
  value=list(),
492
537
  support_file_types=[".otf", ".ttf", ".ttc", ".otc"],
493
538
  condition="return fieldsData.image_or_text.value == 'text' && fieldsData.watermark_text_font.value == 'custom'",
539
+ condition_python=lambda ports: ports["image_or_text"].value == "text"
540
+ and ports["watermark_text_font"].value == "custom",
494
541
  ),
495
542
  "watermark_text_font_size": InputPort(
496
543
  name="watermark_text_font_size",
497
544
  port_type=PortType.NUMBER,
498
545
  value=20,
499
546
  condition="return fieldsData.image_or_text.value == 'text'",
547
+ condition_python=lambda ports: ports["image_or_text"].value == "text",
500
548
  ),
501
549
  "watermark_text_font_color": InputPort(
502
550
  name="watermark_text_font_color",
503
551
  port_type=PortType.INPUT,
504
552
  value="#ffffff",
505
553
  condition="return fieldsData.image_or_text.value == 'text'",
554
+ condition_python=lambda ports: ports["image_or_text"].value == "text",
506
555
  ),
507
556
  "opacity": InputPort(
508
557
  name="opacity",
@@ -585,12 +634,14 @@ class VideoEditing(Node):
585
634
  port_type=PortType.INPUT,
586
635
  value="00:00:00",
587
636
  condition="return fieldsData.trim_video.value",
637
+ condition_python=lambda ports: ports["trim_video"].value,
588
638
  ),
589
639
  "trim_end_time": InputPort(
590
640
  name="trim_end_time",
591
641
  port_type=PortType.INPUT,
592
642
  value="00:01:00",
593
643
  condition="return fieldsData.trim_video.value",
644
+ condition_python=lambda ports: ports["trim_video"].value,
594
645
  ),
595
646
  "rotate_video": InputPort(
596
647
  name="rotate_video",
@@ -613,6 +664,7 @@ class VideoEditing(Node):
613
664
  port_type=PortType.INPUT,
614
665
  value="",
615
666
  condition="return fieldsData.add_watermark.value",
667
+ condition_python=lambda ports: ports["add_watermark"].value,
616
668
  ),
617
669
  "output_video_format": InputPort(
618
670
  name="output_video_format",
@@ -659,12 +711,14 @@ class VideoScreenshot(Node):
659
711
  port_type=PortType.NUMBER,
660
712
  value=10,
661
713
  condition="return fieldsData.screenshot_method.value === 'interval'",
714
+ condition_python=lambda ports: ports["screenshot_method"].value == "interval",
662
715
  ),
663
716
  "screenshot_timestamps": InputPort(
664
717
  name="screenshot_timestamps",
665
718
  port_type=PortType.INPUT,
666
719
  value="",
667
720
  condition="return fieldsData.screenshot_method.value === 'timestamps'",
721
+ condition_python=lambda ports: ports["screenshot_method"].value == "timestamps",
668
722
  ),
669
723
  "output_type": InputPort(
670
724
  name="output_type",
@@ -51,6 +51,7 @@ class ClaudeVision(Node):
51
51
  multiple=True,
52
52
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
53
53
  condition="fields_data.get('images_or_urls') == 'images'",
54
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
54
55
  show=True,
55
56
  ),
56
57
  "urls": InputPort(
@@ -58,6 +59,7 @@ class ClaudeVision(Node):
58
59
  port_type=PortType.TEXT,
59
60
  value="",
60
61
  condition="fields_data.get('images_or_urls') == 'urls'",
62
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
61
63
  ),
62
64
  "output": OutputPort(),
63
65
  },
@@ -101,6 +103,7 @@ class DeepseekVl(Node):
101
103
  multiple=True,
102
104
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
103
105
  condition="fields_data.get('images_or_urls') == 'images'",
106
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
104
107
  show=True,
105
108
  ),
106
109
  "urls": InputPort(
@@ -108,6 +111,7 @@ class DeepseekVl(Node):
108
111
  port_type=PortType.TEXT,
109
112
  value="",
110
113
  condition="fields_data.get('images_or_urls') == 'urls'",
114
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
111
115
  ),
112
116
  "output": OutputPort(),
113
117
  },
@@ -161,6 +165,7 @@ class GeminiVision(Node):
161
165
  multiple=True,
162
166
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
163
167
  condition="fields_data.get('images_or_urls') == 'images'",
168
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
164
169
  show=True,
165
170
  ),
166
171
  "urls": InputPort(
@@ -168,6 +173,7 @@ class GeminiVision(Node):
168
173
  port_type=PortType.TEXT,
169
174
  value="",
170
175
  condition="fields_data.get('images_or_urls') == 'urls'",
176
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
171
177
  ),
172
178
  "output": OutputPort(),
173
179
  },
@@ -213,6 +219,7 @@ class GlmVision(Node):
213
219
  multiple=True,
214
220
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
215
221
  condition="fields_data.images_or_urls.value == 'images'",
222
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
216
223
  show=True,
217
224
  ),
218
225
  "urls": InputPort(
@@ -220,6 +227,7 @@ class GlmVision(Node):
220
227
  port_type=PortType.TEXT,
221
228
  value="",
222
229
  condition="fields_data.images_or_urls.value == 'urls'",
230
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
223
231
  ),
224
232
  "output": OutputPort(),
225
233
  },
@@ -264,6 +272,7 @@ class GptVision(Node):
264
272
  multiple=True,
265
273
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
266
274
  condition="fields_data.get('images_or_urls') == 'images'",
275
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
267
276
  show=True,
268
277
  ),
269
278
  "urls": InputPort(
@@ -271,6 +280,7 @@ class GptVision(Node):
271
280
  port_type=PortType.TEXT,
272
281
  value="",
273
282
  condition="fields_data.get('images_or_urls') == 'urls'",
283
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
274
284
  ),
275
285
  "detail_type": InputPort(
276
286
  name="detail_type",
@@ -325,6 +335,7 @@ class InternVision(Node):
325
335
  multiple=True,
326
336
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
327
337
  condition="fields_data.get('images_or_urls') == 'images'",
338
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
328
339
  show=True,
329
340
  ),
330
341
  "urls": InputPort(
@@ -332,6 +343,7 @@ class InternVision(Node):
332
343
  port_type=PortType.TEXT,
333
344
  value="",
334
345
  condition="fields_data.get('images_or_urls') == 'urls'",
346
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
335
347
  ),
336
348
  "output": OutputPort(),
337
349
  },
@@ -372,6 +384,7 @@ class Ocr(Node):
372
384
  multiple=True,
373
385
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
374
386
  condition="fields_data.get('images_or_urls') == 'images'",
387
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
375
388
  show=True,
376
389
  ),
377
390
  "urls": InputPort(
@@ -379,20 +392,24 @@ class Ocr(Node):
379
392
  port_type=PortType.TEXT,
380
393
  value="",
381
394
  condition="fields_data.get('images_or_urls') == 'urls'",
395
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
382
396
  ),
383
397
  "output_table": OutputPort(
384
398
  name="output_table",
385
399
  condition="fields_data.get('ocr_type') == 'table'",
400
+ condition_python=lambda ports: ports["ocr_type"].value == "table",
386
401
  has_tooltip=True,
387
402
  ),
388
403
  "output_content": OutputPort(
389
404
  name="output_content",
390
405
  condition="fields_data.get('ocr_type') in ['general', 'business_license']",
406
+ condition_python=lambda ports: ports["ocr_type"].value in ["general", "business_license"],
391
407
  ),
392
408
  "output_words_info": OutputPort(
393
409
  name="output_words_info",
394
410
  value=list(),
395
411
  condition="fields_data.get('ocr_type') in ['general', 'business_license']",
412
+ condition_python=lambda ports: ports["ocr_type"].value in ["general", "business_license"],
396
413
  has_tooltip=True,
397
414
  ),
398
415
  },
@@ -446,6 +463,7 @@ class QwenVision(Node):
446
463
  multiple=True,
447
464
  support_file_types=[".jpg", ".jpeg", ".png", ".webp"],
448
465
  condition="fields_data.get('images_or_urls') == 'images'",
466
+ condition_python=lambda ports: ports["images_or_urls"].value == "images",
449
467
  show=True,
450
468
  ),
451
469
  "urls": InputPort(
@@ -453,6 +471,7 @@ class QwenVision(Node):
453
471
  port_type=PortType.TEXT,
454
472
  value="",
455
473
  condition="fields_data.get('images_or_urls') == 'urls'",
474
+ condition_python=lambda ports: ports["images_or_urls"].value == "urls",
456
475
  ),
457
476
  "output": OutputPort(),
458
477
  },
@@ -483,6 +502,7 @@ class SpeechRecognition(Node):
483
502
  multiple=True,
484
503
  support_file_types=[".wav", ".mp3", ".mp4", ".m4a", ".wma", ".aac", ".ogg", ".amr", ".flac"],
485
504
  condition="fields_data.get('files_or_urls') == 'files'",
505
+ condition_python=lambda ports: ports["files_or_urls"].value == "files",
486
506
  show=True,
487
507
  ),
488
508
  "urls": InputPort(
@@ -490,6 +510,7 @@ class SpeechRecognition(Node):
490
510
  port_type=PortType.TEXT,
491
511
  value="",
492
512
  condition="fields_data.get('files_or_urls') == 'urls'",
513
+ condition_python=lambda ports: ports["files_or_urls"].value == "urls",
493
514
  ),
494
515
  "output_type": InputPort(
495
516
  name="output_type",
@@ -26,18 +26,21 @@ class Audio(Node):
26
26
  port_type=PortType.TEXTAREA,
27
27
  value="",
28
28
  condition="return fieldsData.audio_type.value == 'play_audio'",
29
+ condition_python=lambda ports: ports["audio_type"].value == "play_audio",
29
30
  ),
30
31
  "is_midi": InputPort(
31
32
  name="is_midi",
32
33
  port_type=PortType.CHECKBOX,
33
34
  value=False,
34
35
  condition="return fieldsData.audio_type.value == 'play_audio'",
36
+ condition_python=lambda ports: ports["audio_type"].value == "play_audio",
35
37
  ),
36
38
  "content": InputPort(
37
39
  name="content",
38
40
  port_type=PortType.TEXTAREA,
39
41
  value="",
40
42
  condition="return fieldsData.audio_type.value == 'text_to_speech'",
43
+ condition_python=lambda ports: ports["audio_type"].value == "text_to_speech",
41
44
  ),
42
45
  "show_player": InputPort(
43
46
  name="show_player",
@@ -329,12 +332,16 @@ class PictureRender(Node):
329
332
  port_type=PortType.NUMBER,
330
333
  value=1200,
331
334
  condition="return ['url', 'html_code', 'markdown', 'mindmap', 'mermaid'].includes(fieldsData.render_type.value)",
335
+ condition_python=lambda ports: ports["render_type"].value
336
+ in ["url", "html_code", "markdown", "mindmap", "mermaid"],
332
337
  ),
333
338
  "height": InputPort(
334
339
  name="height",
335
340
  port_type=PortType.NUMBER,
336
341
  value=800,
337
342
  condition="return ['url', 'html_code', 'markdown', 'mindmap', 'mermaid'].includes(fieldsData.render_type.value)",
343
+ condition_python=lambda ports: ports["render_type"].value
344
+ in ["url", "html_code", "markdown", "mindmap", "mermaid"],
338
345
  ),
339
346
  "base64_encode": InputPort(
340
347
  name="base64_encode",
@@ -67,6 +67,7 @@ class RunSql(Node):
67
67
  port_type=PortType.CHECKBOX,
68
68
  value=True,
69
69
  condition="return fieldsData.output_type.value == 'list'",
70
+ condition_python=lambda ports: ports["output_type"].value == "list",
70
71
  ),
71
72
  "max_count": InputPort(
72
73
  name="max_count",
@@ -84,18 +84,21 @@ class TextSplitters(Node):
84
84
  port_type=PortType.NUMBER,
85
85
  value=500,
86
86
  condition="return ['general', 'markdown'].includes(fieldsData.split_method.value)",
87
+ condition_python=lambda ports: ports["split_method"].value in ["general", "markdown"],
87
88
  ),
88
89
  "chunk_overlap": InputPort(
89
90
  name="chunk_overlap",
90
91
  port_type=PortType.NUMBER,
91
92
  value=30,
92
93
  condition="return ['general', 'markdown'].includes(fieldsData.split_method.value)",
94
+ condition_python=lambda ports: ports["split_method"].value in ["general", "markdown"],
93
95
  ),
94
96
  "delimiter": InputPort(
95
97
  name="delimiter",
96
98
  port_type=PortType.INPUT,
97
99
  value="\\n",
98
100
  condition="return fieldsData.split_method.value == 'delimiter'",
101
+ condition_python=lambda ports: ports["split_method"].value == "delimiter",
99
102
  ),
100
103
  "output": OutputPort(list=True),
101
104
  },
@@ -179,6 +182,7 @@ class ListRender(Node):
179
182
  port_type=PortType.INPUT,
180
183
  value="\\n\\n",
181
184
  condition="return fieldsData.output_type.value == 'text'",
185
+ condition_python=lambda ports: ports["output_type"].value == "text",
182
186
  ),
183
187
  "output_type": InputPort(
184
188
  name="output_type",
@@ -28,12 +28,14 @@ class CodebaseAnalysis(Node):
28
28
  support_file_types=[".zip"],
29
29
  multiple=False,
30
30
  condition="return fieldsData.input_type.value === 'file'",
31
+ condition_python=lambda ports: ports["input_type"].value == "file",
31
32
  ),
32
33
  "git_url": InputPort(
33
34
  name="git_url",
34
35
  port_type=PortType.INPUT,
35
36
  value="",
36
37
  condition="return fieldsData.input_type.value === 'git_url'",
38
+ condition_python=lambda ports: ports["input_type"].value == "git_url",
37
39
  ),
38
40
  "output_style": InputPort(
39
41
  name="output_style",
@@ -186,12 +188,14 @@ class TextSearch(Node):
186
188
  {"value": "custom", "label": "custom"},
187
189
  ],
188
190
  condition="return fieldsData.search_engine.value === 'bing'",
191
+ condition_python=lambda ports: ports["search_engine"].value == "bing",
189
192
  ),
190
193
  "custom_freshness": InputPort(
191
194
  name="custom_freshness",
192
195
  port_type=PortType.INPUT,
193
196
  value="",
194
197
  condition="return fieldsData.freshness.value === 'custom'",
198
+ condition_python=lambda ports: ports["freshness"].value == "custom",
195
199
  ),
196
200
  "combine_result_in_text": InputPort(
197
201
  name="combine_result_in_text",
@@ -52,24 +52,28 @@ class AddData(Node):
52
52
  {"value": "table", "label": "table"},
53
53
  ],
54
54
  condition="return fieldsData.data_type.value == 'text'",
55
+ condition_python=lambda ports: ports["data_type"].value == "text",
55
56
  ),
56
57
  "chunk_length": InputPort(
57
58
  name="chunk_length",
58
59
  port_type=PortType.NUMBER,
59
60
  value=500,
60
61
  condition="return ['general', 'markdown'].includes(fieldsData.split_method.value)",
62
+ condition_python=lambda ports: ports["split_method"].value in ["general", "markdown"],
61
63
  ),
62
64
  "chunk_overlap": InputPort(
63
65
  name="chunk_overlap",
64
66
  port_type=PortType.NUMBER,
65
67
  value=30,
66
68
  condition="return ['general', 'markdown'].includes(fieldsData.split_method.value)",
69
+ condition_python=lambda ports: ports["split_method"].value in ["general", "markdown"],
67
70
  ),
68
71
  "delimiter": InputPort(
69
72
  name="delimiter",
70
73
  port_type=PortType.INPUT,
71
74
  value="\\n",
72
75
  condition="return fieldsData.split_method.value == 'delimiter'",
76
+ condition_python=lambda ports: ports["split_method"].value == "delimiter",
73
77
  ),
74
78
  "remove_url_and_email": InputPort(
75
79
  name="remove_url_and_email",
@@ -138,6 +138,7 @@ class YoutubeCrawler(Node):
138
138
  {"value": "detailed", "label": "detailed"},
139
139
  ],
140
140
  condition="return fieldsData.get_comments.value",
141
+ condition_python=lambda ports: ports["get_comments"].value,
141
142
  ),
142
143
  "output_type": InputPort(
143
144
  name="output_type",
@@ -5,6 +5,7 @@ from ..graph.port import InputPort
5
5
  if TYPE_CHECKING:
6
6
  from ..graph.workflow import Workflow
7
7
  from ..graph.node import Node
8
+ from ..graph.port import Port
8
9
 
9
10
 
10
11
  class UIWarning(TypedDict, total=False):
@@ -22,6 +23,8 @@ class WorkflowCheckResult(TypedDict):
22
23
  no_isolated_nodes: bool # 工作流是否不包含孤立节点
23
24
  useless_nodes: list["Node"] # 工作流中无用的节点
24
25
  ui_warnings: UIWarning # UI相关警告
26
+ required_ports: list[tuple["Node", "Port"]] # 未连接的必填端口
27
+ override_ports: list[tuple["Node", "Port"]] # 被覆盖的端口
25
28
 
26
29
 
27
30
  def check_dag(workflow: "Workflow"):
@@ -180,3 +183,79 @@ def check_useless_nodes(workflow: "Workflow") -> list["Node"]:
180
183
  useless_nodes.append(node)
181
184
 
182
185
  return useless_nodes
186
+
187
+
188
+ def check_required_ports(workflow: "Workflow") -> "list[tuple[Node, Port]]":
189
+ """检查工作流中是否存在未连接的空的必填端口。"""
190
+ required_but_not_connected = []
191
+
192
+ # 找出所有连接的目标端口
193
+ connected_ports = {(edge.target, edge.target_handle) for edge in workflow.edges}
194
+
195
+ # 遍历所有节点及其端口
196
+ for node in workflow.nodes:
197
+ ports = node.ports if hasattr(node, "ports") else {}
198
+ for port_name, port in ports.items():
199
+ # 检查条件是否适用
200
+ condition_applies = True
201
+ if hasattr(port, "condition_python") and port.condition_python is not None:
202
+ try:
203
+ # 检查是否为可调用函数
204
+ if callable(port.condition_python):
205
+ # 如果是函数,传入ports作为参数
206
+ condition_result = port.condition_python(ports)
207
+ else:
208
+ raise ValueError(
209
+ f"condition_python 必须是可调用函数,当前类型为 {type(port.condition_python)}"
210
+ )
211
+
212
+ if not condition_result:
213
+ condition_applies = False
214
+ except Exception:
215
+ # 如果条件评估出错,假设条件不适用
216
+ condition_applies = False
217
+
218
+ # 检查端口是否必填
219
+ is_required = getattr(port, "required", False)
220
+
221
+ # 检查值是否为空
222
+ value_is_empty = getattr(port, "value", "") == ""
223
+
224
+ # 检查是否没有连线连接到这个端口
225
+ not_connected = (node.id, port_name) not in connected_ports
226
+
227
+ # 如果满足所有条件,将其添加到结果列表
228
+ if condition_applies and is_required and value_is_empty and not_connected:
229
+ required_but_not_connected.append((node, port))
230
+
231
+ return required_but_not_connected
232
+
233
+
234
+ def check_override_ports(workflow: "Workflow") -> list[tuple["Node", "Port"]]:
235
+ """检查工作流中是否存在覆盖的端口。
236
+
237
+ 一个端口如果其 value 值不是布尔值,且不为空(空字符串、空列表、空字典),
238
+ 然后还被一个连线作为输出端口连接了,那么这个端口的 value 值会被覆盖。
239
+ """
240
+ override_ports = []
241
+
242
+ # 找出所有作为输出端口的连接
243
+ target_ports = {(edge.target, edge.target_handle) for edge in workflow.edges}
244
+
245
+ for node in workflow.nodes:
246
+ for port_name, port in node.ports.items():
247
+ # 检查端口是否被作为输出端口连接
248
+ if (node.id, port_name) in target_ports:
249
+ value = port.value
250
+
251
+ # 检查 value 是否为布尔值
252
+ if isinstance(value, bool):
253
+ continue
254
+
255
+ # 检查 value 是否为空
256
+ is_empty = value == "" or value == [] or value == {} or value is None
257
+
258
+ if not is_empty:
259
+ override_ports.append((node, port))
260
+
261
+ return override_ports
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vectorvein
3
- Version: 0.2.56
3
+ Version: 0.2.57
4
4
  Summary: VectorVein Python SDK
5
5
  Author-Email: Anderson <andersonby@163.com>
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
- vectorvein-0.2.56.dist-info/METADATA,sha256=B8IMfZGFGChU78LXUVrw4x9ArXDD4kYpuA3pmYDaGMI,4570
2
- vectorvein-0.2.56.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- vectorvein-0.2.56.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ vectorvein-0.2.57.dist-info/METADATA,sha256=VJeKL2r_44yXAPf6bThgchug8uSWNxcx3IR2zB6iwyM,4570
2
+ vectorvein-0.2.57.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ vectorvein-0.2.57.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  vectorvein/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  vectorvein/api/__init__.py,sha256=lfY-XA46fgD2iIZTU0VYP8i07AwA03Egj4Qua0vUKrQ,738
6
6
  vectorvein/api/client.py,sha256=xF-leKDQzVyyy9FnIRaz0k4eElYW1XbbzeRLcpnyk90,33047
@@ -43,25 +43,25 @@ vectorvein/utilities/rate_limiter.py,sha256=dwolIUVw2wP83Odqpx0AAaE77de1GzxkYDGH
43
43
  vectorvein/utilities/retry.py,sha256=6KFS9R2HdhqM3_9jkjD4F36ZSpEx2YNFGOVlpOsUetM,2208
44
44
  vectorvein/workflow/graph/edge.py,sha256=1ckyyjCue_PLm7P1ItUfKOy6AKkemOpZ9m1WJ8UXIHQ,1072
45
45
  vectorvein/workflow/graph/node.py,sha256=ZzOhl7pltPBR5gyyL2ZtYeByPxjDPnM179emliqGFi8,5192
46
- vectorvein/workflow/graph/port.py,sha256=_QpHCBGAu657VhYAh0Wzjri3ZZ8-WYJp99J465mqmwo,6492
47
- vectorvein/workflow/graph/workflow.py,sha256=lDF4LafX5dmzjQQqR-mFhqQPdF_AKodiHpM3MygoqyE,6086
46
+ vectorvein/workflow/graph/port.py,sha256=HcinzQqNP7ysTvBmi3c4iaWne8nV6m-BpFFX0jTrMIE,7122
47
+ vectorvein/workflow/graph/workflow.py,sha256=f5KAQUXTKii7UFYmSZeZZQ7JCKKPmJlUg0iFAfBEJQE,6366
48
48
  vectorvein/workflow/nodes/__init__.py,sha256=dWrWtL3q0Vsn-MLgJ7gNgLCrwZ5BrqjrN2QFPNeBMuc,3240
49
49
  vectorvein/workflow/nodes/audio_generation.py,sha256=ZRFZ_ycMTSJ2LKmekctagQdJYTl-3q4TNOIKETpS9AM,5870
50
- vectorvein/workflow/nodes/control_flows.py,sha256=l8CjFQlsGV3fNGM6SVzS1Kz361K1xDv1fGT7acuDXuU,6613
51
- vectorvein/workflow/nodes/file_processing.py,sha256=h9FZM88ZAaLZq6obGCTs6mRXF6uYkwqca5JEiFxmd_k,4044
52
- vectorvein/workflow/nodes/image_generation.py,sha256=MJ628Luc5CwDWTu3uYM_nkIe6JgSV6Gqv0IiOrrsKm8,33699
53
- vectorvein/workflow/nodes/llms.py,sha256=_q65zdfugjNh1N83dytHG69TfkmpOPw2lwxKQUQnmrs,38589
54
- vectorvein/workflow/nodes/media_editing.py,sha256=hqOQCqxCPYdpnswlj4XM9fLRFJ0BTWUxW_oKH650hGs,29845
55
- vectorvein/workflow/nodes/media_processing.py,sha256=0NuJCkcRY68yO9ZJ7xmXA8RXXxPvptu_89nR83pxTEk,20066
56
- vectorvein/workflow/nodes/output.py,sha256=_UQxiddHtGv2rkjhUFE-KDgrjnh0AGJQJyq9-4Aji5A,12567
57
- vectorvein/workflow/nodes/relational_db.py,sha256=zfzUhV25TpZGhkIzO18PmAT5xhcsJC4AXKy0zyA05w8,5408
58
- vectorvein/workflow/nodes/text_processing.py,sha256=MRo_-oaC65hbzMxm7TYoeiS3rgvqh9y_Rny5RCfoATE,8342
59
- vectorvein/workflow/nodes/tools.py,sha256=xaBmjJYtlUopoflIF7BR_l8RPVjYRPpeqa1dyQfXYas,13494
50
+ vectorvein/workflow/nodes/control_flows.py,sha256=fDySWek8Isbfznwn0thmbTwTP4c99w68Up9dlASAtIo,6805
51
+ vectorvein/workflow/nodes/file_processing.py,sha256=f4PlfgSAVFhwuqcEAvcLarNIkHUFP4FJucxnb3kekTU,4498
52
+ vectorvein/workflow/nodes/image_generation.py,sha256=a1ObkmvM8dwMxQvsnoYJwaURn0WwLXsqVkPybRnXT9A,35708
53
+ vectorvein/workflow/nodes/llms.py,sha256=ePnWAF4q-Uai5ZHgrYb7ZeoWzjIZ9B8XGAPPT5QEO10,40238
54
+ vectorvein/workflow/nodes/media_editing.py,sha256=ut4NN9_VUqnsqT2rlv0JrLhyxRLNUkvHb0c4QZDiKz8,34320
55
+ vectorvein/workflow/nodes/media_processing.py,sha256=_YuoJur2EeIeZfg8dSigDtqYcUpN6uVjGXJSVNqa6uI,22067
56
+ vectorvein/workflow/nodes/output.py,sha256=JHp-Y9EtuwD9qtZvVV2zHkH1OEK_6xlYh_DT1LrKuBs,13174
57
+ vectorvein/workflow/nodes/relational_db.py,sha256=Zg4G3xIQ94uoWE-Z4YER1bBhWgBQ6mYbJVQDeAN895I,5498
58
+ vectorvein/workflow/nodes/text_processing.py,sha256=BRmFSyLPADFplbUqUNjoJdmHzQvrPknJvBvvgtzaklk,8744
59
+ vectorvein/workflow/nodes/tools.py,sha256=ejIQO2hfuRr6m1jc9NMZEUK9ABEWPpX0PVO_UA5BtSc,13853
60
60
  vectorvein/workflow/nodes/triggers.py,sha256=BolH4X6S8HSuU2kwHmYKr-ozHbgKBmdZRcnXpK5EfGA,597
61
- vectorvein/workflow/nodes/vector_db.py,sha256=t6I17q6iR3yQreiDHpRrksMdWDPIvgqJs076z-7dlQQ,5712
61
+ vectorvein/workflow/nodes/vector_db.py,sha256=p9AT_E8ASbcYHZqHYTCIGvqkIqzxaFM4UxaUELJEe-c,6112
62
62
  vectorvein/workflow/nodes/video_generation.py,sha256=qmdg-t_idpxq1veukd-jv_ChICMOoInKxprV9Z4Vi2w,4118
63
- vectorvein/workflow/nodes/web_crawlers.py,sha256=BhJBX1AZH7-22Gu95Ox4qJqmH5DU-m4dbUb5N5DTA-M,5559
64
- vectorvein/workflow/utils/check.py,sha256=N2eHyZZVCX_fFriK4pcdDShTCwaLNDngkC_247kF3-c,6836
63
+ vectorvein/workflow/nodes/web_crawlers.py,sha256=FB0bTimkk___p3Z5UwQx2YarJyQCc45jjnbXbgGA_qw,5640
64
+ vectorvein/workflow/utils/check.py,sha256=Oj_S5WQf4_Fr_ro3ipjZt9unKFSFcuwZrrSmrS9kVLE,10193
65
65
  vectorvein/workflow/utils/json_to_code.py,sha256=F7dhDy8kGc8ndOeihGLRLGFGlquoxVlb02ENtxnQ0C8,5914
66
66
  vectorvein/workflow/utils/layout.py,sha256=j0bRD3uaXu40xCS6U6BGahBI8FrHa5MiF55GbTrZ1LM,4565
67
- vectorvein-0.2.56.dist-info/RECORD,,
67
+ vectorvein-0.2.57.dist-info/RECORD,,