kailash 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/analysis/__init__.py +9 -0
- kailash/analysis/conditional_branch_analyzer.py +696 -0
- kailash/nodes/logic/intelligent_merge.py +475 -0
- kailash/nodes/logic/operations.py +41 -8
- kailash/planning/__init__.py +9 -0
- kailash/planning/dynamic_execution_planner.py +776 -0
- kailash/runtime/compatibility_reporter.py +497 -0
- kailash/runtime/hierarchical_switch_executor.py +548 -0
- kailash/runtime/local.py +1904 -27
- kailash/runtime/parallel.py +1 -1
- kailash/runtime/performance_monitor.py +215 -0
- kailash/runtime/validation/import_validator.py +7 -0
- kailash/workflow/cyclic_runner.py +436 -27
- {kailash-0.9.2.dist-info → kailash-0.9.4.dist-info}/METADATA +22 -12
- {kailash-0.9.2.dist-info → kailash-0.9.4.dist-info}/RECORD +20 -12
- {kailash-0.9.2.dist-info → kailash-0.9.4.dist-info}/WHEEL +0 -0
- {kailash-0.9.2.dist-info → kailash-0.9.4.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.2.dist-info → kailash-0.9.4.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.2.dist-info → kailash-0.9.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,475 @@
|
|
1
|
+
"""Intelligent merge node for handling conditional inputs.
|
2
|
+
|
3
|
+
This module provides an enhanced MergeNode that intelligently handles
|
4
|
+
partial inputs from conditional branches in workflows.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, Dict, List, Optional, Union
|
9
|
+
|
10
|
+
from kailash.nodes import Node, NodeParameter
|
11
|
+
from kailash.nodes.base import register_node
|
12
|
+
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
|
16
|
+
@register_node()
|
17
|
+
class IntelligentMergeNode(Node):
|
18
|
+
"""Enhanced merge node with intelligent handling of conditional inputs.
|
19
|
+
|
20
|
+
This node extends the basic MergeNode functionality with:
|
21
|
+
- Intelligent handling of None/missing inputs from skipped branches
|
22
|
+
- Multiple merge strategies (combine, first_available, weighted)
|
23
|
+
- Timeout support for async inputs
|
24
|
+
- Fallback handling for missing data
|
25
|
+
|
26
|
+
Strategies:
|
27
|
+
- combine: Merge all non-None inputs into a single output
|
28
|
+
- first_available: Return the first non-None input
|
29
|
+
- weighted: Merge inputs with weighted scoring
|
30
|
+
- fallback: Try inputs in order until one succeeds
|
31
|
+
- adaptive: Automatically choose best strategy based on input patterns
|
32
|
+
- consensus: Use majority consensus for decision-making
|
33
|
+
- priority_merge: Merge based on priority levels
|
34
|
+
- conditional_aware: Conditional execution optimized merge
|
35
|
+
"""
|
36
|
+
|
37
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
38
|
+
"""Define node parameters."""
|
39
|
+
return {
|
40
|
+
"method": NodeParameter(
|
41
|
+
name="method",
|
42
|
+
type=str,
|
43
|
+
required=False,
|
44
|
+
default="combine",
|
45
|
+
description="Merge strategy: combine, first_available, weighted, fallback, adaptive, consensus, priority_merge, conditional_aware",
|
46
|
+
),
|
47
|
+
"handle_none": NodeParameter(
|
48
|
+
name="handle_none",
|
49
|
+
type=bool,
|
50
|
+
required=False,
|
51
|
+
default=True,
|
52
|
+
description="Whether to intelligently handle None inputs",
|
53
|
+
),
|
54
|
+
"timeout": NodeParameter(
|
55
|
+
name="timeout",
|
56
|
+
type=float,
|
57
|
+
required=False,
|
58
|
+
default=None,
|
59
|
+
description="Timeout for waiting on async inputs",
|
60
|
+
),
|
61
|
+
"priority_threshold": NodeParameter(
|
62
|
+
name="priority_threshold",
|
63
|
+
type=float,
|
64
|
+
required=False,
|
65
|
+
default=0.5,
|
66
|
+
description="Minimum priority threshold for priority_merge strategy",
|
67
|
+
),
|
68
|
+
"consensus_threshold": NodeParameter(
|
69
|
+
name="consensus_threshold",
|
70
|
+
type=int,
|
71
|
+
required=False,
|
72
|
+
default=2,
|
73
|
+
description="Minimum number of inputs needed for consensus strategy",
|
74
|
+
),
|
75
|
+
"conditional_context": NodeParameter(
|
76
|
+
name="conditional_context",
|
77
|
+
type=dict,
|
78
|
+
required=False,
|
79
|
+
default=None,
|
80
|
+
description="Context from conditional execution for strategy optimization",
|
81
|
+
),
|
82
|
+
# Dynamic inputs - up to 10 for flexibility
|
83
|
+
**{
|
84
|
+
f"input{i}": NodeParameter(
|
85
|
+
name=f"input{i}",
|
86
|
+
type=Any,
|
87
|
+
required=False,
|
88
|
+
default=None,
|
89
|
+
description=f"Input source {i}",
|
90
|
+
)
|
91
|
+
for i in range(1, 11)
|
92
|
+
},
|
93
|
+
}
|
94
|
+
|
95
|
+
def get_output_schema(self) -> Dict[str, NodeParameter]:
|
96
|
+
"""Define output schema."""
|
97
|
+
return {
|
98
|
+
"output": NodeParameter(
|
99
|
+
name="output", type=Any, description="Merged result based on strategy"
|
100
|
+
),
|
101
|
+
"merge_stats": NodeParameter(
|
102
|
+
name="merge_stats",
|
103
|
+
type=dict,
|
104
|
+
description="Statistics about the merge operation",
|
105
|
+
),
|
106
|
+
}
|
107
|
+
|
108
|
+
def run(self, **kwargs) -> Dict[str, Any]:
|
109
|
+
"""Execute intelligent merge operation."""
|
110
|
+
method = kwargs.get("method", "combine")
|
111
|
+
handle_none = kwargs.get("handle_none", True)
|
112
|
+
timeout = kwargs.get("timeout")
|
113
|
+
|
114
|
+
# Collect all non-parameter inputs
|
115
|
+
inputs = {}
|
116
|
+
for key, value in kwargs.items():
|
117
|
+
if key.startswith("input") and key[5:].isdigit():
|
118
|
+
if handle_none and value is not None:
|
119
|
+
inputs[key] = value
|
120
|
+
elif not handle_none:
|
121
|
+
inputs[key] = value
|
122
|
+
|
123
|
+
logger.debug(f"Intelligent merge with method={method}, inputs={len(inputs)}")
|
124
|
+
|
125
|
+
# Execute merge based on strategy
|
126
|
+
if method == "combine":
|
127
|
+
result = self._merge_combine(inputs)
|
128
|
+
elif method == "first_available":
|
129
|
+
result = self._merge_first_available(inputs)
|
130
|
+
elif method == "weighted":
|
131
|
+
result = self._merge_weighted(inputs)
|
132
|
+
elif method == "fallback":
|
133
|
+
result = self._merge_fallback(inputs)
|
134
|
+
elif method == "adaptive":
|
135
|
+
result = self._merge_adaptive(inputs, kwargs)
|
136
|
+
elif method == "consensus":
|
137
|
+
result = self._merge_consensus(inputs, kwargs.get("consensus_threshold", 2))
|
138
|
+
elif method == "priority_merge":
|
139
|
+
result = self._merge_priority(inputs, kwargs.get("priority_threshold", 0.5))
|
140
|
+
elif method == "conditional_aware":
|
141
|
+
result = self._merge_conditional_aware(
|
142
|
+
inputs, kwargs.get("conditional_context")
|
143
|
+
)
|
144
|
+
else:
|
145
|
+
raise ValueError(f"Unknown merge method: {method}")
|
146
|
+
|
147
|
+
# Collect statistics
|
148
|
+
stats = {
|
149
|
+
"method": method,
|
150
|
+
"total_inputs": len(kwargs) - 3, # Exclude method, handle_none, timeout
|
151
|
+
"valid_inputs": len(inputs),
|
152
|
+
"skipped_inputs": len(kwargs) - 3 - len(inputs),
|
153
|
+
}
|
154
|
+
|
155
|
+
return {"output": result, "merge_stats": stats}
|
156
|
+
|
157
|
+
def _merge_combine(self, inputs: Dict[str, Any]) -> Any:
|
158
|
+
"""Combine all inputs into a single structure."""
|
159
|
+
if not inputs:
|
160
|
+
return {}
|
161
|
+
|
162
|
+
# If all inputs are dicts, merge them
|
163
|
+
if all(isinstance(v, dict) for v in inputs.values()):
|
164
|
+
result = {}
|
165
|
+
for input_dict in inputs.values():
|
166
|
+
result.update(input_dict)
|
167
|
+
return result
|
168
|
+
|
169
|
+
# If all inputs are lists, concatenate them
|
170
|
+
if all(isinstance(v, list) for v in inputs.values()):
|
171
|
+
result = []
|
172
|
+
for input_list in inputs.values():
|
173
|
+
result.extend(input_list)
|
174
|
+
return result
|
175
|
+
|
176
|
+
# Mixed types - return as dict
|
177
|
+
return inputs
|
178
|
+
|
179
|
+
def _merge_first_available(self, inputs: Dict[str, Any]) -> Any:
|
180
|
+
"""Return the first available (non-None) input."""
|
181
|
+
if not inputs:
|
182
|
+
return None
|
183
|
+
|
184
|
+
# Sort by input number to maintain order
|
185
|
+
sorted_keys = sorted(inputs.keys(), key=lambda x: int(x[5:]))
|
186
|
+
for key in sorted_keys:
|
187
|
+
if inputs[key] is not None:
|
188
|
+
return inputs[key]
|
189
|
+
|
190
|
+
return None
|
191
|
+
|
192
|
+
def _merge_weighted(self, inputs: Dict[str, Any]) -> Any:
|
193
|
+
"""Merge inputs with weighted scoring."""
|
194
|
+
if not inputs:
|
195
|
+
return {"score": 0, "components": 0}
|
196
|
+
|
197
|
+
scores = []
|
198
|
+
weights = []
|
199
|
+
components = []
|
200
|
+
|
201
|
+
for key, value in inputs.items():
|
202
|
+
if isinstance(value, dict):
|
203
|
+
if "score" in value and "weight" in value:
|
204
|
+
scores.append(value["score"])
|
205
|
+
weights.append(value["weight"])
|
206
|
+
components.append(value)
|
207
|
+
|
208
|
+
if not scores:
|
209
|
+
return {"score": 0, "components": 0}
|
210
|
+
|
211
|
+
# Calculate weighted average
|
212
|
+
weighted_sum = sum(s * w for s, w in zip(scores, weights))
|
213
|
+
total_weight = sum(weights)
|
214
|
+
|
215
|
+
if total_weight > 0:
|
216
|
+
final_score = weighted_sum / total_weight
|
217
|
+
else:
|
218
|
+
final_score = sum(scores) / len(scores)
|
219
|
+
|
220
|
+
return {
|
221
|
+
"score": final_score,
|
222
|
+
"components": len(components),
|
223
|
+
"details": components,
|
224
|
+
}
|
225
|
+
|
226
|
+
def _merge_fallback(self, inputs: Dict[str, Any]) -> Any:
|
227
|
+
"""Try inputs in order until one succeeds."""
|
228
|
+
if not inputs:
|
229
|
+
return {"source": "none", "data": None}
|
230
|
+
|
231
|
+
# Sort by input number to maintain order
|
232
|
+
sorted_keys = sorted(inputs.keys(), key=lambda x: int(x[5:]))
|
233
|
+
|
234
|
+
for key in sorted_keys:
|
235
|
+
value = inputs[key]
|
236
|
+
if value is not None:
|
237
|
+
# Check if it's a valid response
|
238
|
+
if isinstance(value, dict):
|
239
|
+
if value.get("available", True) or value.get("data") is not None:
|
240
|
+
return value
|
241
|
+
else:
|
242
|
+
return {"source": key, "data": value}
|
243
|
+
|
244
|
+
# No valid input found
|
245
|
+
return {"source": "fallback_failed", "data": None}
|
246
|
+
|
247
|
+
def _merge_adaptive(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Any:
|
248
|
+
"""Automatically choose the best merge strategy based on input patterns."""
|
249
|
+
if not inputs:
|
250
|
+
return {"strategy_used": "adaptive", "result": None, "reason": "no_inputs"}
|
251
|
+
|
252
|
+
input_values = list(inputs.values())
|
253
|
+
|
254
|
+
# Analyze input patterns to choose best strategy
|
255
|
+
if len(inputs) == 1:
|
256
|
+
# Single input - return directly
|
257
|
+
strategy_used = "first_available"
|
258
|
+
result = self._merge_first_available(inputs)
|
259
|
+
elif all(isinstance(v, dict) and "priority" in v for v in input_values):
|
260
|
+
# All inputs have priority - use priority merge
|
261
|
+
strategy_used = "priority_merge"
|
262
|
+
result = self._merge_priority(inputs, kwargs.get("priority_threshold", 0.5))
|
263
|
+
elif all(
|
264
|
+
isinstance(v, dict) and "score" in v and "weight" in v for v in input_values
|
265
|
+
):
|
266
|
+
# All inputs have scores and weights - use weighted merge
|
267
|
+
strategy_used = "weighted"
|
268
|
+
result = self._merge_weighted(inputs)
|
269
|
+
elif len(inputs) >= kwargs.get("consensus_threshold", 2):
|
270
|
+
# Multiple inputs - try consensus
|
271
|
+
strategy_used = "consensus"
|
272
|
+
result = self._merge_consensus(inputs, kwargs.get("consensus_threshold", 2))
|
273
|
+
else:
|
274
|
+
# Default to combine
|
275
|
+
strategy_used = "combine"
|
276
|
+
result = self._merge_combine(inputs)
|
277
|
+
|
278
|
+
return {
|
279
|
+
"strategy_used": strategy_used,
|
280
|
+
"result": result,
|
281
|
+
"input_count": len(inputs),
|
282
|
+
"adaptation_reason": f"Selected {strategy_used} based on input analysis",
|
283
|
+
}
|
284
|
+
|
285
|
+
def _merge_consensus(self, inputs: Dict[str, Any], threshold: int) -> Any:
|
286
|
+
"""Use majority consensus for decision-making."""
|
287
|
+
if len(inputs) < threshold:
|
288
|
+
return {
|
289
|
+
"consensus": False,
|
290
|
+
"result": None,
|
291
|
+
"reason": f"Insufficient inputs ({len(inputs)} < {threshold})",
|
292
|
+
}
|
293
|
+
|
294
|
+
# For boolean decisions
|
295
|
+
if all(isinstance(v, bool) for v in inputs.values()):
|
296
|
+
true_count = sum(1 for v in inputs.values() if v)
|
297
|
+
false_count = len(inputs) - true_count
|
298
|
+
consensus_result = true_count > false_count
|
299
|
+
|
300
|
+
return {
|
301
|
+
"consensus": True,
|
302
|
+
"result": consensus_result,
|
303
|
+
"vote_count": {"true": true_count, "false": false_count},
|
304
|
+
"confidence": max(true_count, false_count) / len(inputs),
|
305
|
+
}
|
306
|
+
|
307
|
+
# For dict inputs with decisions
|
308
|
+
if all(isinstance(v, dict) and "decision" in v for v in inputs.values()):
|
309
|
+
decisions = [v["decision"] for v in inputs.values()]
|
310
|
+
decision_counts = {}
|
311
|
+
for decision in decisions:
|
312
|
+
decision_counts[decision] = decision_counts.get(decision, 0) + 1
|
313
|
+
|
314
|
+
majority_decision = max(decision_counts, key=decision_counts.get)
|
315
|
+
majority_count = decision_counts[majority_decision]
|
316
|
+
|
317
|
+
return {
|
318
|
+
"consensus": majority_count >= threshold,
|
319
|
+
"result": majority_decision,
|
320
|
+
"vote_count": decision_counts,
|
321
|
+
"confidence": majority_count / len(inputs),
|
322
|
+
}
|
323
|
+
|
324
|
+
# For other types, use most common value
|
325
|
+
value_counts = {}
|
326
|
+
for value in inputs.values():
|
327
|
+
value_str = str(value)
|
328
|
+
value_counts[value_str] = value_counts.get(value_str, 0) + 1
|
329
|
+
|
330
|
+
if value_counts:
|
331
|
+
most_common = max(value_counts, key=value_counts.get)
|
332
|
+
most_common_count = value_counts[most_common]
|
333
|
+
|
334
|
+
# Find the actual value (not string representation)
|
335
|
+
consensus_value = None
|
336
|
+
for value in inputs.values():
|
337
|
+
if str(value) == most_common:
|
338
|
+
consensus_value = value
|
339
|
+
break
|
340
|
+
|
341
|
+
return {
|
342
|
+
"consensus": most_common_count >= threshold,
|
343
|
+
"result": consensus_value,
|
344
|
+
"vote_count": value_counts,
|
345
|
+
"confidence": most_common_count / len(inputs),
|
346
|
+
}
|
347
|
+
|
348
|
+
return {"consensus": False, "result": None, "reason": "no_valid_inputs"}
|
349
|
+
|
350
|
+
def _merge_priority(self, inputs: Dict[str, Any], threshold: float) -> Any:
|
351
|
+
"""Merge inputs based on priority levels."""
|
352
|
+
if not inputs:
|
353
|
+
return {"result": None, "priorities_processed": 0}
|
354
|
+
|
355
|
+
prioritized_inputs = []
|
356
|
+
|
357
|
+
for key, value in inputs.items():
|
358
|
+
if isinstance(value, dict) and "priority" in value:
|
359
|
+
priority = value["priority"]
|
360
|
+
if priority >= threshold:
|
361
|
+
prioritized_inputs.append((priority, key, value))
|
362
|
+
else:
|
363
|
+
# Default priority for non-dict inputs
|
364
|
+
prioritized_inputs.append((1.0, key, value))
|
365
|
+
|
366
|
+
if not prioritized_inputs:
|
367
|
+
return {
|
368
|
+
"result": None,
|
369
|
+
"priorities_processed": 0,
|
370
|
+
"reason": "no_inputs_above_threshold",
|
371
|
+
}
|
372
|
+
|
373
|
+
# Sort by priority (highest first)
|
374
|
+
prioritized_inputs.sort(key=lambda x: x[0], reverse=True)
|
375
|
+
|
376
|
+
# Merge high-priority inputs
|
377
|
+
high_priority_data = []
|
378
|
+
priorities_used = []
|
379
|
+
|
380
|
+
for priority, key, value in prioritized_inputs:
|
381
|
+
high_priority_data.append(value)
|
382
|
+
priorities_used.append(priority)
|
383
|
+
|
384
|
+
# Combine high-priority inputs
|
385
|
+
if len(high_priority_data) == 1:
|
386
|
+
result = high_priority_data[0]
|
387
|
+
else:
|
388
|
+
# Create temporary inputs dict for combining
|
389
|
+
temp_inputs = {
|
390
|
+
f"input{i}": data for i, data in enumerate(high_priority_data)
|
391
|
+
}
|
392
|
+
result = self._merge_combine(temp_inputs)
|
393
|
+
|
394
|
+
return {
|
395
|
+
"result": result,
|
396
|
+
"priorities_processed": len(high_priority_data),
|
397
|
+
"priorities_used": priorities_used,
|
398
|
+
"highest_priority": max(priorities_used) if priorities_used else 0,
|
399
|
+
}
|
400
|
+
|
401
|
+
def _merge_conditional_aware(
|
402
|
+
self, inputs: Dict[str, Any], conditional_context: Optional[Dict[str, Any]]
|
403
|
+
) -> Any:
|
404
|
+
"""Conditional execution optimized merge strategy."""
|
405
|
+
if not inputs:
|
406
|
+
return {
|
407
|
+
"result": None,
|
408
|
+
"strategy": "conditional_aware",
|
409
|
+
"inputs_processed": 0,
|
410
|
+
"conditional_context": conditional_context,
|
411
|
+
}
|
412
|
+
|
413
|
+
# Use conditional context to optimize merge
|
414
|
+
if conditional_context:
|
415
|
+
available_branches = conditional_context.get("available_branches", [])
|
416
|
+
skipped_branches = conditional_context.get("skipped_branches", [])
|
417
|
+
execution_confidence = conditional_context.get("execution_confidence", 1.0)
|
418
|
+
|
419
|
+
# Filter inputs based on conditional context
|
420
|
+
filtered_inputs = {}
|
421
|
+
for key, value in inputs.items():
|
422
|
+
# Check if this input corresponds to an available branch
|
423
|
+
input_branch = key.replace("input", "")
|
424
|
+
if not available_branches or input_branch in available_branches:
|
425
|
+
filtered_inputs[key] = value
|
426
|
+
|
427
|
+
if not filtered_inputs:
|
428
|
+
return {
|
429
|
+
"result": None,
|
430
|
+
"strategy": "conditional_aware",
|
431
|
+
"reason": "all_inputs_from_skipped_branches",
|
432
|
+
"skipped_branches": skipped_branches,
|
433
|
+
"inputs_processed": 0,
|
434
|
+
}
|
435
|
+
|
436
|
+
# Choose merge strategy based on execution confidence
|
437
|
+
if execution_confidence >= 0.8:
|
438
|
+
# High confidence - use standard combine
|
439
|
+
merge_result = self._merge_combine(filtered_inputs)
|
440
|
+
strategy_used = "combine"
|
441
|
+
elif execution_confidence >= 0.5:
|
442
|
+
# Medium confidence - use adaptive merge
|
443
|
+
temp_kwargs = {"consensus_threshold": 2, "priority_threshold": 0.5}
|
444
|
+
adaptive_result = self._merge_adaptive(filtered_inputs, temp_kwargs)
|
445
|
+
merge_result = adaptive_result["result"]
|
446
|
+
strategy_used = f"adaptive->{adaptive_result['strategy_used']}"
|
447
|
+
else:
|
448
|
+
# Low confidence - use first available
|
449
|
+
merge_result = self._merge_first_available(filtered_inputs)
|
450
|
+
strategy_used = "first_available"
|
451
|
+
|
452
|
+
return {
|
453
|
+
"result": merge_result,
|
454
|
+
"strategy": "conditional_aware",
|
455
|
+
"sub_strategy": strategy_used,
|
456
|
+
"execution_confidence": execution_confidence,
|
457
|
+
"available_branches": available_branches,
|
458
|
+
"inputs_processed": len(filtered_inputs),
|
459
|
+
"inputs_skipped": len(inputs) - len(filtered_inputs),
|
460
|
+
}
|
461
|
+
else:
|
462
|
+
# No conditional context - fall back to adaptive merge
|
463
|
+
logger.debug(
|
464
|
+
"No conditional context provided, falling back to adaptive merge"
|
465
|
+
)
|
466
|
+
temp_kwargs = {"consensus_threshold": 2, "priority_threshold": 0.5}
|
467
|
+
adaptive_result = self._merge_adaptive(inputs, temp_kwargs)
|
468
|
+
|
469
|
+
return {
|
470
|
+
"result": adaptive_result["result"],
|
471
|
+
"strategy": "conditional_aware",
|
472
|
+
"sub_strategy": f"fallback->{adaptive_result['strategy_used']}",
|
473
|
+
"reason": "no_conditional_context",
|
474
|
+
"inputs_processed": len(inputs),
|
475
|
+
}
|
@@ -106,9 +106,8 @@ class SwitchNode(Node):
|
|
106
106
|
auto_map_from=[
|
107
107
|
"data",
|
108
108
|
"input",
|
109
|
-
"value",
|
110
109
|
"items",
|
111
|
-
], # Common alternatives
|
110
|
+
], # Common alternatives - removed 'value' to prevent conflict with condition value
|
112
111
|
workflow_alias="data", # Preferred name in workflow connections
|
113
112
|
),
|
114
113
|
"condition_field": NodeParameter(
|
@@ -276,13 +275,20 @@ class SwitchNode(Node):
|
|
276
275
|
result = {"default": kwargs["input_data"], "condition_result": None}
|
277
276
|
return result
|
278
277
|
|
279
|
-
#
|
278
|
+
# Handle missing input_data during conditional execution phase 1
|
279
|
+
# When executing switches before source nodes, we need to make routing decisions
|
280
|
+
# based on the configuration alone
|
280
281
|
if "input_data" not in kwargs:
|
281
|
-
|
282
|
-
|
282
|
+
# During phase 1 of conditional execution, source nodes haven't run yet
|
283
|
+
# We can still make routing decisions based on static conditions
|
284
|
+
self.logger.debug(
|
285
|
+
"SwitchNode executing without input_data (conditional phase 1)"
|
283
286
|
)
|
284
|
-
|
285
|
-
|
287
|
+
# For static comparisons (e.g., != with a value), we can assume no match
|
288
|
+
# This allows the workflow to proceed and execute the appropriate branches
|
289
|
+
input_data = None
|
290
|
+
else:
|
291
|
+
input_data = kwargs["input_data"]
|
286
292
|
condition_field = kwargs.get("condition_field")
|
287
293
|
operator = kwargs.get("operator", "==")
|
288
294
|
value = kwargs.get("value")
|
@@ -293,7 +299,14 @@ class SwitchNode(Node):
|
|
293
299
|
break_after_first_match = kwargs.get("break_after_first_match", True)
|
294
300
|
|
295
301
|
# Extract the value to check
|
296
|
-
if
|
302
|
+
if input_data is None:
|
303
|
+
# During conditional phase 1, we don't have actual data
|
304
|
+
# Use None as check_value which will typically not match conditions
|
305
|
+
check_value = None
|
306
|
+
self.logger.debug(
|
307
|
+
"No input_data available, using None for condition checks"
|
308
|
+
)
|
309
|
+
elif condition_field:
|
297
310
|
# Handle both single dict and list of dicts
|
298
311
|
if isinstance(input_data, dict):
|
299
312
|
check_value = input_data.get(condition_field)
|
@@ -417,6 +430,20 @@ class SwitchNode(Node):
|
|
417
430
|
is_not_null: Check if not None
|
418
431
|
"""
|
419
432
|
try:
|
433
|
+
# Handle None values gracefully during conditional execution phase 1
|
434
|
+
if check_value is None and operator not in [
|
435
|
+
"is_null",
|
436
|
+
"is_not_null",
|
437
|
+
"==",
|
438
|
+
"!=",
|
439
|
+
]:
|
440
|
+
# For comparison operators with None, return False by default
|
441
|
+
# This ensures branches are properly evaluated when data is available
|
442
|
+
self.logger.debug(
|
443
|
+
f"Condition check with None value for operator '{operator}', defaulting to False"
|
444
|
+
)
|
445
|
+
return False
|
446
|
+
|
420
447
|
if operator == "==":
|
421
448
|
return check_value == compare_value
|
422
449
|
elif operator == "!=":
|
@@ -430,8 +457,14 @@ class SwitchNode(Node):
|
|
430
457
|
elif operator == "<=":
|
431
458
|
return check_value <= compare_value
|
432
459
|
elif operator == "in":
|
460
|
+
# Handle None for 'in' operator
|
461
|
+
if check_value is None or compare_value is None:
|
462
|
+
return False
|
433
463
|
return check_value in compare_value
|
434
464
|
elif operator == "contains":
|
465
|
+
# Handle None for 'contains' operator
|
466
|
+
if check_value is None or compare_value is None:
|
467
|
+
return False
|
435
468
|
return compare_value in check_value
|
436
469
|
elif operator == "is_null":
|
437
470
|
return check_value is None
|