kailash 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -408,21 +408,32 @@ class CyclicWorkflowExecutor:
408
408
  """
409
409
  results = {}
410
410
 
411
+ # Track nodes that need execution after cycles
412
+ pending_post_cycle_nodes = set()
413
+
411
414
  logger.info(f"Executing plan with {len(plan.stages)} stages")
412
415
 
413
416
  for i, stage in enumerate(plan.stages):
417
+ stage_nodes = getattr(stage, "nodes", "N/A")
414
418
  logger.info(
415
- f"Executing stage {i+1}: is_cycle={stage.is_cycle}, nodes={getattr(stage, 'nodes', 'N/A')}"
419
+ f"Executing stage {i+1}: is_cycle={stage.is_cycle}, nodes={stage_nodes}"
416
420
  )
417
421
  if stage.is_cycle:
418
422
  logger.info(
419
423
  f"Stage {i+1} is a cycle group: {stage.cycle_group.cycle_id}"
420
424
  )
421
- # Execute cycle group
422
- cycle_results = self._execute_cycle_group(
425
+ # Execute cycle group and get downstream nodes
426
+ cycle_results, downstream_nodes = self._execute_cycle_group(
423
427
  workflow, stage.cycle_group, state, task_manager
424
428
  )
425
429
  results.update(cycle_results)
430
+
431
+ # Add downstream nodes to pending execution
432
+ if downstream_nodes:
433
+ pending_post_cycle_nodes.update(downstream_nodes)
434
+ logger.info(
435
+ f"Added {len(downstream_nodes)} nodes for post-cycle execution"
436
+ )
426
437
  else:
427
438
  # Execute DAG nodes using extracted method
428
439
  dag_results = self._execute_dag_portion(
@@ -430,6 +441,40 @@ class CyclicWorkflowExecutor:
430
441
  )
431
442
  results.update(dag_results)
432
443
 
444
+ # Remove executed nodes from pending
445
+ for node in stage.nodes:
446
+ pending_post_cycle_nodes.discard(node)
447
+
448
+ # Execute any remaining post-cycle nodes
449
+ if pending_post_cycle_nodes:
450
+ logger.info(f"Executing {len(pending_post_cycle_nodes)} post-cycle nodes")
451
+
452
+ # We need to include all dependencies of post-cycle nodes to ensure they get their inputs
453
+ # This includes both cycle and non-cycle dependencies
454
+ nodes_to_execute = set(pending_post_cycle_nodes)
455
+
456
+ # For each post-cycle node, check if it has unexecuted dependencies
457
+ for node in list(pending_post_cycle_nodes):
458
+ for pred in workflow.graph.predecessors(node):
459
+ if pred not in state.node_outputs and pred not in nodes_to_execute:
460
+ # This predecessor hasn't been executed yet
461
+ nodes_to_execute.add(pred)
462
+ logger.debug(
463
+ f"Adding dependency {pred} for post-cycle node {node}"
464
+ )
465
+
466
+ # Order them topologically
467
+ subgraph = workflow.graph.subgraph(nodes_to_execute)
468
+ if nx.is_directed_acyclic_graph(subgraph):
469
+ ordered_nodes = list(nx.topological_sort(subgraph))
470
+ else:
471
+ ordered_nodes = list(nodes_to_execute)
472
+
473
+ post_cycle_results = self._execute_dag_portion(
474
+ workflow, ordered_nodes, state, task_manager
475
+ )
476
+ results.update(post_cycle_results)
477
+
433
478
  return results
434
479
 
435
480
  def _execute_dag_portion(
@@ -485,7 +530,7 @@ class CyclicWorkflowExecutor:
485
530
 
486
531
  for cycle_group in cycle_groups:
487
532
  logger.info(f"Executing cycle group: {cycle_group.cycle_id}")
488
- cycle_results = self._execute_cycle_group(
533
+ cycle_results, _ = self._execute_cycle_group(
489
534
  workflow, cycle_group, state, task_manager
490
535
  )
491
536
  results.update(cycle_results)
@@ -547,9 +592,9 @@ class CyclicWorkflowExecutor:
547
592
  Cycle execution results
548
593
  """
549
594
  cycle_id = cycle_group.cycle_id
550
- logger.info(f"*** EXECUTING CYCLE GROUP: {cycle_id} ***")
551
- logger.info(f"Cycle nodes: {cycle_group.nodes}")
552
- logger.info(f"Cycle edges: {cycle_group.edges}")
595
+ logger.info(f"Executing cycle group: {cycle_id}")
596
+ logger.debug(f"Cycle nodes: {cycle_group.nodes}")
597
+ logger.debug(f"Cycle edges: {cycle_group.edges}")
553
598
 
554
599
  # Get cycle configuration from first edge
555
600
  cycle_config = {}
@@ -636,7 +681,13 @@ class CyclicWorkflowExecutor:
636
681
 
637
682
  # Execute nodes in cycle
638
683
  iteration_results = {}
639
- for node_id in cycle_group.get_execution_order(workflow.graph):
684
+ execution_order = cycle_group.get_execution_order(workflow.graph)
685
+ logger.debug(
686
+ f"Cycle {cycle_id} iteration {loop_count}: execution_order={execution_order}"
687
+ )
688
+
689
+ for node_id in execution_order:
690
+ logger.debug(f"Executing {node_id} in iteration {loop_count}")
640
691
  node_result = self._execute_node(
641
692
  workflow,
642
693
  node_id,
@@ -644,14 +695,35 @@ class CyclicWorkflowExecutor:
644
695
  cycle_state,
645
696
  cycle_edges=cycle_group.edges,
646
697
  previous_iteration_results=previous_iteration_results,
698
+ current_iteration_results=iteration_results, # CRITICAL FIX: Pass current iteration results
647
699
  task_manager=task_manager,
648
700
  iteration=loop_count,
649
701
  )
650
- iteration_results[node_id] = node_result
651
- state.node_outputs[node_id] = node_result
652
-
653
- # Update results for this iteration
654
- results.update(iteration_results)
702
+ # CRITICAL FIX: Handle None node results gracefully
703
+ if node_result is not None:
704
+ iteration_results[node_id] = node_result
705
+ else:
706
+ logger.debug(
707
+ f"Node {node_id} returned None result in iteration {loop_count}"
708
+ )
709
+ # Store None result to track execution but don't propagate
710
+ iteration_results[node_id] = None
711
+ # CRITICAL FIX: Don't update state.node_outputs during iteration
712
+ # This was causing non-deterministic behavior because later nodes
713
+ # in the same iteration could see current iteration results
714
+ # instead of previous iteration results
715
+
716
+ # Update results for this iteration - filter out None values for final results
717
+ for node_id, node_result in iteration_results.items():
718
+ if node_result is not None:
719
+ results[node_id] = node_result
720
+
721
+ # CRITICAL FIX: Update state.node_outputs AFTER the entire iteration
722
+ # This ensures all nodes in the current iteration only see previous iteration results
723
+ for node_id, node_result in iteration_results.items():
724
+ # Only update state with non-None results to avoid downstream issues
725
+ if node_result is not None:
726
+ state.node_outputs[node_id] = node_result
655
727
 
656
728
  # Store this iteration's results for next iteration
657
729
  previous_iteration_results = iteration_results.copy()
@@ -719,14 +791,243 @@ class CyclicWorkflowExecutor:
719
791
  except Exception as e:
720
792
  logger.warning(f"Failed to update iteration task: {e}")
721
793
 
794
+ # CRITICAL FIX: Check for natural termination based on cycle connection pattern
795
+ # Different patterns:
796
+ # 1. true_output → continue cycle when True, terminate when False
797
+ # 2. false_output → continue cycle when False, terminate when True
798
+ natural_termination_detected = False
799
+ termination_reasons = [] # Collect all termination reasons
800
+
801
+ for node_id in cycle_group.nodes:
802
+ if node_id in iteration_results:
803
+ node_result = iteration_results[node_id]
804
+ if (
805
+ isinstance(node_result, dict)
806
+ and "condition_result" in node_result
807
+ ):
808
+ condition_result = node_result.get("condition_result")
809
+
810
+ # Check what type of cycle connection this node has
811
+ node_has_true_output_cycle = False
812
+ node_has_false_output_cycle = False
813
+
814
+ for pred, succ, edge_data in cycle_group.edges:
815
+ if pred == node_id and edge_data.get("mapping"):
816
+ mapping = edge_data["mapping"]
817
+ if "true_output" in mapping:
818
+ node_has_true_output_cycle = True
819
+ if "false_output" in mapping:
820
+ node_has_false_output_cycle = True
821
+
822
+ # Only check nodes that are actually part of cycle connections
823
+ if (
824
+ node_has_true_output_cycle
825
+ or node_has_false_output_cycle
826
+ ):
827
+ # Determine if cycle should terminate based on connection pattern
828
+ should_terminate_naturally = False
829
+ if node_has_true_output_cycle and not condition_result:
830
+ # true_output cycle: terminate when condition becomes False
831
+ should_terminate_naturally = True
832
+ reason = f"{node_id} condition=False in true_output cycle"
833
+ termination_reasons.append(reason)
834
+ elif node_has_false_output_cycle and condition_result:
835
+ # false_output cycle: terminate when condition becomes True
836
+ should_terminate_naturally = True
837
+ reason = f"{node_id} condition=True in false_output cycle"
838
+ termination_reasons.append(reason)
839
+
840
+ if should_terminate_naturally:
841
+ natural_termination_detected = True
842
+ should_terminate = True
843
+ # DON'T break - check all SwitchNodes for comprehensive logging
844
+
845
+ # Log all termination reasons if any found
846
+ if natural_termination_detected:
847
+ combined_reason = "; ".join(termination_reasons)
848
+ logger.info(
849
+ f"Cycle {cycle_id} naturally terminating: {combined_reason}"
850
+ )
851
+
722
852
  if should_terminate:
853
+ termination_reason = (
854
+ "max_iterations"
855
+ if loop_count
856
+ >= cycle_config.get("max_iterations", float("inf"))
857
+ else (
858
+ "natural" if natural_termination_detected else "convergence"
859
+ )
860
+ )
723
861
  logger.info(
724
862
  f"Cycle {cycle_id} terminating after {loop_count} iterations"
725
863
  )
864
+
865
+ # CRITICAL FIX: Ensure final cycle results are in state for downstream nodes
866
+ # This is essential for natural cycle termination where downstream nodes
867
+ # need access to the final iteration data
868
+ for node_id in cycle_group.exit_nodes:
869
+ if node_id in iteration_results:
870
+ final_result = iteration_results[node_id]
871
+ state.node_outputs[node_id] = final_result
872
+ logger.debug(
873
+ f"Updated state.node_outputs[{node_id}] with final iteration result for downstream nodes"
874
+ )
875
+
876
+ # CRITICAL: For exit nodes that are conditional (like SwitchNode),
877
+ # we need to ensure downstream nodes can access the appropriate outputs
878
+ # This handles both max iteration termination AND natural termination
879
+ logger.info(
880
+ f"Processing exit nodes: {cycle_group.exit_nodes}, natural_termination_detected: {natural_termination_detected}"
881
+ )
882
+ for exit_node_id in cycle_group.exit_nodes:
883
+ exit_node = workflow.get_node(exit_node_id)
884
+ if exit_node and exit_node.__class__.__name__ == "SwitchNode":
885
+ if exit_node_id in iteration_results:
886
+ exit_result = iteration_results[exit_node_id]
887
+
888
+ # Check if we terminated at max iterations with condition=true
889
+ # In this case, synthesize false_output for downstream nodes
890
+ max_iterations = cycle_config.get(
891
+ "max_iterations", float("inf")
892
+ )
893
+ terminated_at_max = loop_count >= max_iterations
894
+
895
+ if (
896
+ terminated_at_max
897
+ and exit_result is not None
898
+ and exit_result.get("condition_result", False)
899
+ and exit_result.get("true_output")
900
+ ):
901
+ # Find the actual last data from the cycle
902
+ # Look for the node that feeds into this exit node
903
+ last_cycle_data = None
904
+
905
+ # Check which nodes feed into the exit node
906
+ for pred in workflow.graph.predecessors(
907
+ exit_node_id
908
+ ):
909
+ if (
910
+ pred in cycle_group.nodes
911
+ and pred in iteration_results
912
+ ):
913
+ pred_result = iteration_results[pred]
914
+ if (
915
+ isinstance(pred_result, dict)
916
+ and "result" in pred_result
917
+ ):
918
+ last_cycle_data = pred_result["result"]
919
+ logger.debug(
920
+ f"Using data from {pred} for false_output: {last_cycle_data}"
921
+ )
922
+ break
923
+
924
+ # Synthesize a false_output with the actual last iteration's data
925
+ exit_result["false_output"] = (
926
+ last_cycle_data or exit_result["true_output"]
927
+ )
928
+ state.node_outputs[exit_node_id] = exit_result
929
+ logger.debug(
930
+ f"Synthesized false_output for {exit_node_id} on max iteration termination with data: {exit_result['false_output']}"
931
+ )
932
+
933
+ # For natural termination (condition=false), the SwitchNode should already
934
+ # have the correct false_output set, so we just ensure it's in state
935
+ elif (
936
+ not terminated_at_max
937
+ and exit_result is not None
938
+ and not exit_result.get("condition_result", True)
939
+ ):
940
+ # Natural termination - condition became false
941
+ # The SwitchNode should have correctly set false_output
942
+ state.node_outputs[exit_node_id] = exit_result
943
+ logger.debug(
944
+ f"Natural termination: {exit_node_id} condition_result={exit_result.get('condition_result')}, false_output present={exit_result.get('false_output') is not None}"
945
+ )
946
+
947
+ # CRITICAL FIX: For exit nodes that have downstream connections via false_output
948
+ # but the cycle terminated due to a different node, we need to synthesize termination data
949
+ elif (
950
+ natural_termination_detected
951
+ and exit_result is not None
952
+ ):
953
+ logger.debug(
954
+ f"Processing exit node {exit_node_id} for natural termination synthesis"
955
+ )
956
+ # Check if this exit node has downstream connections via false_output
957
+ has_false_output_connections = False
958
+ for succ in workflow.graph.successors(exit_node_id):
959
+ if (
960
+ succ not in cycle_group.nodes
961
+ ): # Downstream node outside cycle
962
+ logger.debug(
963
+ f" Checking downstream node {succ}"
964
+ )
965
+ for edge_data in workflow.graph[
966
+ exit_node_id
967
+ ][succ].values():
968
+ # Handle both dict and string edge_data formats
969
+ if isinstance(edge_data, dict):
970
+ mapping = edge_data.get(
971
+ "mapping", {}
972
+ )
973
+ else:
974
+ # Old format where edge_data might be a string
975
+ logger.debug(
976
+ f" Legacy edge_data format: {edge_data} (type: {type(edge_data)})"
977
+ )
978
+ mapping = {}
979
+ logger.debug(
980
+ f" Edge mapping: {mapping}"
981
+ )
982
+ if "false_output" in mapping:
983
+ has_false_output_connections = True
984
+ logger.debug(
985
+ f" Found false_output connection to {succ}"
986
+ )
987
+ break
988
+
989
+ logger.debug(
990
+ f" Exit node {exit_node_id} has_false_output_connections: {has_false_output_connections}"
991
+ )
992
+ logger.debug(
993
+ f" Exit node {exit_node_id} current false_output: {exit_result.get('false_output')}"
994
+ )
995
+
996
+ # If this exit node has false_output connections but the cycle terminated naturally
997
+ # due to another node, synthesize appropriate termination data
998
+ if (
999
+ has_false_output_connections
1000
+ and exit_result.get("false_output") is None
1001
+ ):
1002
+ # Use the current true_output data as termination data for false_output
1003
+ termination_data = exit_result.get(
1004
+ "true_output"
1005
+ )
1006
+ if termination_data is not None:
1007
+ exit_result["false_output"] = (
1008
+ termination_data
1009
+ )
1010
+ state.node_outputs[exit_node_id] = (
1011
+ exit_result
1012
+ )
1013
+ logger.info(
1014
+ f"Synthesized false_output for {exit_node_id} on natural termination: {termination_data}"
1015
+ )
1016
+
726
1017
  break
727
1018
 
728
1019
  logger.info(f"Cycle {cycle_id} continuing to next iteration")
729
1020
 
1021
+ # Get downstream nodes if cycle has terminated
1022
+ downstream_nodes = None
1023
+ if should_terminate:
1024
+ # Get nodes that depend on cycle output
1025
+ downstream_nodes = cycle_group.get_downstream_nodes(workflow)
1026
+ if downstream_nodes:
1027
+ logger.info(
1028
+ f"Cycle {cycle_id} has downstream nodes: {downstream_nodes}"
1029
+ )
1030
+
730
1031
  # Complete cycle group task
731
1032
  if cycle_task_id and task_manager:
732
1033
  try:
@@ -751,7 +1052,7 @@ class CyclicWorkflowExecutor:
751
1052
  summary = cycle_state.get_summary()
752
1053
  logger.info(f"Cycle {cycle_id} completed: {summary}")
753
1054
 
754
- return results
1055
+ return results, downstream_nodes
755
1056
 
756
1057
  def _execute_node(
757
1058
  self,
@@ -761,6 +1062,9 @@ class CyclicWorkflowExecutor:
761
1062
  cycle_state: CycleState | None = None,
762
1063
  cycle_edges: list[tuple] | None = None,
763
1064
  previous_iteration_results: dict[str, Any] | None = None,
1065
+ current_iteration_results: (
1066
+ dict[str, Any] | None
1067
+ ) = None, # CRITICAL FIX: Current iteration results
764
1068
  task_manager: TaskManager | None = None,
765
1069
  iteration: int | None = None,
766
1070
  ) -> Any:
@@ -792,7 +1096,26 @@ class CyclicWorkflowExecutor:
792
1096
  in_cycle = cycle_state is not None
793
1097
  is_cycle_iteration = in_cycle and cycle_state.iteration > 0
794
1098
 
795
- for pred, _, edge_data in workflow.graph.in_edges(node_id, data=True):
1099
+ # CRITICAL FIX: Process edges in priority order - non-cycle edges first, then cycle edges
1100
+ # This ensures cycle data overwrites non-cycle data when mapping to the same parameter
1101
+ all_edges = list(workflow.graph.in_edges(node_id, data=True))
1102
+
1103
+ # Sort edges: non-cycle edges first, cycle edges second (for priority)
1104
+ non_cycle_edges = []
1105
+ cycle_edges = []
1106
+
1107
+ for pred, _, edge_data in all_edges:
1108
+ # CRITICAL FIX: Synthetic edges are also cycle edges and should have priority
1109
+ is_cycle_edge = edge_data.get(
1110
+ "cycle", False
1111
+ ) # Include synthetic cycle edges
1112
+ if is_cycle_edge:
1113
+ cycle_edges.append((pred, _, edge_data))
1114
+ else:
1115
+ non_cycle_edges.append((pred, _, edge_data))
1116
+
1117
+ # Process non-cycle edges first, then cycle edges (so cycle data has priority)
1118
+ for pred, _, edge_data in non_cycle_edges + cycle_edges:
796
1119
  # Check if this edge is a cycle edge (but NOT synthetic)
797
1120
  is_cycle_edge = edge_data.get("cycle", False) and not edge_data.get(
798
1121
  "synthetic", False
@@ -802,20 +1125,42 @@ class CyclicWorkflowExecutor:
802
1125
  if is_cycle_edge and is_cycle_iteration and previous_iteration_results:
803
1126
  # For cycle edges after first iteration, use previous iteration results
804
1127
  pred_output = previous_iteration_results.get(pred)
1128
+ logger.debug(
1129
+ f"Cycle edge {pred} -> {node_id}: using previous iteration results"
1130
+ )
1131
+ elif current_iteration_results and pred in current_iteration_results:
1132
+ # For non-cycle edges, prefer current iteration results over stale state
1133
+ pred_output = current_iteration_results[pred]
1134
+ logger.debug(
1135
+ f"Non-cycle edge {pred} -> {node_id}: using current iteration results"
1136
+ )
805
1137
  elif pred in state.node_outputs:
806
- # For non-cycle edges or first iteration, use normal state
1138
+ # For non-cycle edges or first iteration, use normal state as fallback
807
1139
  pred_output = state.node_outputs[pred]
1140
+ logger.debug(
1141
+ f"Non-cycle edge {pred} -> {node_id}: using state fallback"
1142
+ )
808
1143
  else:
809
1144
  # No output available
1145
+ logger.debug(f"No output available for {pred} -> {node_id}")
810
1146
  continue
811
1147
 
812
1148
  if pred_output is None:
813
1149
  continue
814
1150
 
815
- # Apply mapping
1151
+ # Apply mapping - with None safety check
816
1152
  mapping = edge_data.get("mapping", {})
1153
+ pred_output_info = (
1154
+ "None"
1155
+ if pred_output is None
1156
+ else (
1157
+ list(pred_output.keys())
1158
+ if isinstance(pred_output, dict)
1159
+ else type(pred_output)
1160
+ )
1161
+ )
817
1162
  logger.debug(
818
- f"Edge {pred} -> {node_id}: mapping = {mapping}, pred_output keys = {list(pred_output.keys()) if isinstance(pred_output, dict) else type(pred_output)}"
1163
+ f"Edge {pred} -> {node_id}: mapping = {mapping}, pred_output keys = {pred_output_info}"
819
1164
  )
820
1165
  for src_key, dst_key in mapping.items():
821
1166
  # Handle nested output access
@@ -1033,15 +1378,57 @@ class ExecutionPlan:
1033
1378
  # Track which nodes have been scheduled
1034
1379
  scheduled = set()
1035
1380
 
1381
+ # Identify nodes that depend on cycle exit nodes through specific outputs
1382
+ # that are only available when the cycle terminates (e.g., false_output of a cycle-controlling switch)
1383
+ nodes_depending_on_cycles = set()
1384
+ for cycle_id, cycle_group in self.cycle_groups.items():
1385
+ for exit_node in cycle_group.exit_nodes:
1386
+ exit_node_obj = workflow.get_node(exit_node)
1387
+ # Special handling for SwitchNodes that control cycles
1388
+ if exit_node_obj and exit_node_obj.__class__.__name__ == "SwitchNode":
1389
+ # For switch nodes, check which output is used for the cycle
1390
+ # and which would be used for exit
1391
+ for source, target, edge_data in workflow.graph.out_edges(
1392
+ exit_node, data=True
1393
+ ):
1394
+ if target not in cycle_group.nodes:
1395
+ # This edge goes outside the cycle
1396
+ mapping = edge_data.get("mapping", {})
1397
+ # Check if this uses an output that indicates cycle termination
1398
+ for src_port, _ in mapping.items():
1399
+ if (
1400
+ "false" in src_port.lower()
1401
+ or "exit" in src_port.lower()
1402
+ ):
1403
+ # This node depends on cycle termination
1404
+ nodes_depending_on_cycles.add(target)
1405
+ logger.debug(
1406
+ f"Node {target} depends on cycle {cycle_id} exit condition via {exit_node}.{src_port}"
1407
+ )
1408
+ else:
1409
+ # For non-switch nodes, use the original logic
1410
+ for successor in workflow.graph.successors(exit_node):
1411
+ if successor not in cycle_group.nodes:
1412
+ nodes_depending_on_cycles.add(successor)
1413
+ logger.debug(
1414
+ f"Node {successor} depends on cycle {cycle_id} exit node {exit_node}"
1415
+ )
1416
+
1036
1417
  logger.debug(
1037
1418
  f"Building stages - cycle_groups: {list(self.cycle_groups.keys())}"
1038
1419
  )
1039
1420
  logger.debug(f"Building stages - topo_order: {topo_order}")
1421
+ logger.debug(f"Nodes depending on cycles: {nodes_depending_on_cycles}")
1040
1422
 
1041
1423
  for node_id in topo_order:
1042
1424
  if node_id in scheduled:
1043
1425
  continue
1044
1426
 
1427
+ # Skip nodes that depend on cycle outputs - they'll be executed post-cycle
1428
+ if node_id in nodes_depending_on_cycles:
1429
+ logger.debug(f"Skipping {node_id} - depends on cycle output")
1430
+ continue
1431
+
1045
1432
  # Check if node is part of a cycle
1046
1433
  in_cycle_id = None
1047
1434
  found_cycle_group = None
@@ -1184,6 +1571,22 @@ class CycleGroup:
1184
1571
  self.exit_nodes = exit_nodes
1185
1572
  self.edges = edges
1186
1573
 
1574
+ def get_downstream_nodes(self, workflow: Workflow) -> set[str]:
1575
+ """Get all nodes that depend on this cycle's output.
1576
+
1577
+ Args:
1578
+ workflow: The workflow containing this cycle
1579
+
1580
+ Returns:
1581
+ Set of node IDs that are downstream from the cycle
1582
+ """
1583
+ downstream = set()
1584
+ for exit_node in self.exit_nodes:
1585
+ for successor in workflow.graph.successors(exit_node):
1586
+ if successor not in self.nodes: # Not part of cycle
1587
+ downstream.add(successor)
1588
+ return downstream
1589
+
1187
1590
  def get_execution_order(self, full_graph: nx.DiGraph) -> list[str]:
1188
1591
  """Get execution order for nodes in cycle.
1189
1592
 
@@ -1194,17 +1597,23 @@ class CycleGroup:
1194
1597
  Ordered list of node IDs
1195
1598
  """
1196
1599
  # Create subgraph with only cycle nodes
1197
- cycle_subgraph = full_graph.subgraph(self.nodes)
1600
+ cycle_subgraph = full_graph.subgraph(self.nodes).copy()
1198
1601
 
1199
- # Try topological sort on the subgraph (might work if cycle edges removed)
1200
- try:
1201
- # Remove cycle edges temporarily
1202
- temp_graph = cycle_subgraph.copy()
1203
- for source, target, _ in self.edges:
1204
- if temp_graph.has_edge(source, target):
1205
- temp_graph.remove_edge(source, target)
1602
+ # Remove only non-synthetic cycle edges
1603
+ # Synthetic edges represent real dependencies and should be kept
1604
+ edges_to_remove = []
1605
+ for source, target, data in cycle_subgraph.edges(data=True):
1606
+ # Only remove edges that are cycle edges and NOT synthetic
1607
+ if data.get("cycle", False) and not data.get("synthetic", False):
1608
+ edges_to_remove.append((source, target))
1206
1609
 
1207
- return list(nx.topological_sort(temp_graph))
1610
+ # Remove the identified edges
1611
+ for source, target in edges_to_remove:
1612
+ cycle_subgraph.remove_edge(source, target)
1613
+
1614
+ # Try topological sort on the subgraph
1615
+ try:
1616
+ return list(nx.topological_sort(cycle_subgraph))
1208
1617
  except (nx.NetworkXError, nx.NetworkXUnfeasible):
1209
1618
  # Fall back to entry nodes first, then others
1210
1619
  order = list(self.entry_nodes)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kailash
3
- Version: 0.9.2
3
+ Version: 0.9.4
4
4
  Summary: Python SDK for the Kailash container-node architecture
5
5
  Home-page: https://github.com/integrum/kailash-python-sdk
6
6
  Author: Integrum
@@ -117,18 +117,28 @@ Dynamic: requires-python
117
117
 
118
118
  ---
119
119
 
120
- ## 🔥 Latest Release: v0.8.5 (January 20, 2025)
120
+ ## 🔥 Latest Release: v0.9.4 (July 31, 2025)
121
121
 
122
- **Architecture Cleanup & Enterprise Security**
122
+ **Critical DataFlow Fixes & Runtime Enhancements**
123
123
 
124
- - 🏗️ **Architecture Clarity**: Removed confusing `src/kailash/nexus` module
125
- - 🔒 **Connection Validation**: Enterprise-grade parameter validation with type safety
126
- - 🌐 **Edge Computing**: 50+ new nodes for geo-distributed infrastructure
127
- - 📊 **Advanced Monitoring**: AlertManager with proactive threshold monitoring
128
- - 🚀 **Performance**: <1ms validation overhead with intelligent caching
129
- - ✅ **Breaking Changes**: See [migration guide](sdk-users/6-reference/changelogs/releases/v0.8.5-2025-07-20.md)
124
+ ### 🛡️ DataFlow Connection String Parsing
125
+ - **Fixed**: Special characters in passwords (`#`, `$`, `@`, `?`) now work automatically
126
+ - **Before**: `invalid literal for int() with base 10` errors with special characters
127
+ - **After**: All passwords work seamlessly without manual URL encoding
128
+ - **Impact**: Major usability improvement for production deployments
130
129
 
131
- [Full Changelog](sdk-users/6-reference/changelogs/releases/v0.8.5-2025-07-20.md) | [PyPI Packages](https://pypi.org/project/kailash/0.8.5/)
130
+ ### 🎯 Runtime Content-Aware Success Detection
131
+ - **New**: LocalRuntime detects `{"success": False}` patterns in node responses
132
+ - **Benefit**: Earlier failure detection and better error reporting
133
+ - **Default**: Enabled by default, backward compatible
134
+ - **Config**: `LocalRuntime(content_aware_success_detection=True)`
135
+
136
+ ### 📚 Documentation Updates
137
+ - Updated DataFlow connection examples with special character support
138
+ - Added runtime configuration options documentation
139
+ - Enhanced troubleshooting guide with new diagnosis patterns
140
+
141
+ [Full Changelog](sdk-users/6-reference/changelogs/releases/v0.9.4-2025-07-31.md) | [Core SDK 0.9.4](https://pypi.org/project/kailash/0.9.4/) | [DataFlow 0.3.3](https://pypi.org/project/kailash-dataflow/0.3.3/)
132
142
 
133
143
  ## 🎯 What Makes Kailash Different
134
144
 
@@ -345,7 +355,7 @@ results, run_id = runtime.execute(workflow.build())
345
355
 
346
356
  ## 🚀 Applications Built with Kailash
347
357
 
348
- ### 1. DataFlow - Zero-Config Database Platform (v0.3.1)
358
+ ### 1. DataFlow - Zero-Config Database Platform (v0.3.3)
349
359
  ```bash
350
360
  pip install kailash-dataflow
351
361
  ```
@@ -353,7 +363,7 @@ pip install kailash-dataflow
353
363
  - **Redis caching** with enterprise-grade invalidation
354
364
  - **Automatic API generation** with OpenAPI documentation
355
365
  - **4 production examples** with complete deployment guides
356
- - **Latest**: v0.3.1 - Enhanced transaction support and schema management
366
+ - **Latest**: v0.3.3 - Critical connection parsing fix for special characters in passwords
357
367
 
358
368
  ### 2. Nexus - Multi-Channel Platform (v1.0.3)
359
369
  ```bash