kailash 0.4.2__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/client/__init__.py +12 -0
  3. kailash/client/enhanced_client.py +306 -0
  4. kailash/core/actors/__init__.py +16 -0
  5. kailash/core/actors/connection_actor.py +566 -0
  6. kailash/core/actors/supervisor.py +364 -0
  7. kailash/edge/__init__.py +16 -0
  8. kailash/edge/compliance.py +834 -0
  9. kailash/edge/discovery.py +659 -0
  10. kailash/edge/location.py +582 -0
  11. kailash/gateway/__init__.py +33 -0
  12. kailash/gateway/api.py +289 -0
  13. kailash/gateway/enhanced_gateway.py +357 -0
  14. kailash/gateway/resource_resolver.py +217 -0
  15. kailash/gateway/security.py +227 -0
  16. kailash/middleware/auth/models.py +2 -2
  17. kailash/middleware/database/base_models.py +1 -7
  18. kailash/middleware/database/repositories.py +3 -1
  19. kailash/middleware/gateway/__init__.py +22 -0
  20. kailash/middleware/gateway/checkpoint_manager.py +398 -0
  21. kailash/middleware/gateway/deduplicator.py +382 -0
  22. kailash/middleware/gateway/durable_gateway.py +417 -0
  23. kailash/middleware/gateway/durable_request.py +498 -0
  24. kailash/middleware/gateway/event_store.py +459 -0
  25. kailash/nodes/admin/audit_log.py +364 -6
  26. kailash/nodes/admin/permission_check.py +817 -33
  27. kailash/nodes/admin/role_management.py +1242 -108
  28. kailash/nodes/admin/schema_manager.py +438 -0
  29. kailash/nodes/admin/user_management.py +1209 -681
  30. kailash/nodes/api/http.py +95 -71
  31. kailash/nodes/base.py +281 -164
  32. kailash/nodes/base_async.py +30 -31
  33. kailash/nodes/code/__init__.py +8 -1
  34. kailash/nodes/code/async_python.py +1035 -0
  35. kailash/nodes/code/python.py +1 -0
  36. kailash/nodes/data/async_sql.py +12 -25
  37. kailash/nodes/data/sql.py +20 -11
  38. kailash/nodes/data/workflow_connection_pool.py +643 -0
  39. kailash/nodes/rag/__init__.py +1 -4
  40. kailash/resources/__init__.py +40 -0
  41. kailash/resources/factory.py +533 -0
  42. kailash/resources/health.py +319 -0
  43. kailash/resources/reference.py +288 -0
  44. kailash/resources/registry.py +392 -0
  45. kailash/runtime/async_local.py +711 -302
  46. kailash/testing/__init__.py +34 -0
  47. kailash/testing/async_test_case.py +353 -0
  48. kailash/testing/async_utils.py +345 -0
  49. kailash/testing/fixtures.py +458 -0
  50. kailash/testing/mock_registry.py +495 -0
  51. kailash/utils/resource_manager.py +420 -0
  52. kailash/workflow/__init__.py +8 -0
  53. kailash/workflow/async_builder.py +621 -0
  54. kailash/workflow/async_patterns.py +766 -0
  55. kailash/workflow/builder.py +93 -10
  56. kailash/workflow/cyclic_runner.py +111 -41
  57. kailash/workflow/graph.py +7 -2
  58. kailash/workflow/resilience.py +11 -1
  59. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/METADATA +12 -7
  60. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/RECORD +64 -28
  61. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/WHEEL +0 -0
  62. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/entry_points.txt +0 -0
  63. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/licenses/LICENSE +0 -0
  64. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,7 @@ class WorkflowBuilder:
21
21
 
22
22
  def add_node(
23
23
  self,
24
- node_type: str,
24
+ node_type: str | type | Any,
25
25
  node_id: str | None = None,
26
26
  config: dict[str, Any] | None = None,
27
27
  ) -> str:
@@ -29,9 +29,9 @@ class WorkflowBuilder:
29
29
  Add a node to the workflow.
30
30
 
31
31
  Args:
32
- node_type: Node type name
32
+ node_type: Node type name (string), Node class, or Node instance
33
33
  node_id: Unique identifier for this node (auto-generated if not provided)
34
- config: Configuration for the node
34
+ config: Configuration for the node (ignored if node_type is an instance)
35
35
 
36
36
  Returns:
37
37
  Node ID (useful for method chaining)
@@ -48,11 +48,80 @@ class WorkflowBuilder:
48
48
  f"Node ID '{node_id}' already exists in workflow"
49
49
  )
50
50
 
51
- self.nodes[node_id] = {"type": node_type, "config": config or {}}
51
+ # Import Node here to avoid circular imports
52
+ from kailash.nodes.base import Node
53
+
54
+ # Handle different input types
55
+ if isinstance(node_type, str):
56
+ # String node type name
57
+ self.nodes[node_id] = {"type": node_type, "config": config or {}}
58
+ type_name = node_type
59
+ elif isinstance(node_type, type) and issubclass(node_type, Node):
60
+ # Node class
61
+ self.nodes[node_id] = {
62
+ "type": node_type.__name__,
63
+ "config": config or {},
64
+ "class": node_type,
65
+ }
66
+ type_name = node_type.__name__
67
+ elif hasattr(node_type, "__class__") and issubclass(node_type.__class__, Node):
68
+ # Node instance
69
+ self.nodes[node_id] = {
70
+ "instance": node_type,
71
+ "type": node_type.__class__.__name__,
72
+ }
73
+ type_name = node_type.__class__.__name__
74
+ else:
75
+ raise WorkflowValidationError(
76
+ f"Invalid node type: {type(node_type)}. "
77
+ "Expected: str (node type name), Node class, or Node instance"
78
+ )
52
79
 
53
- logger.info(f"Added node '{node_id}' of type '{node_type}'")
80
+ logger.info(f"Added node '{node_id}' of type '{type_name}'")
54
81
  return node_id
55
82
 
83
+ def add_node_instance(self, node_instance: Any, node_id: str | None = None) -> str:
84
+ """
85
+ Add a node instance to the workflow.
86
+
87
+ This is a convenience method for adding pre-configured node instances.
88
+
89
+ Args:
90
+ node_instance: Pre-configured node instance
91
+ node_id: Unique identifier for this node (auto-generated if not provided)
92
+
93
+ Returns:
94
+ Node ID
95
+
96
+ Raises:
97
+ WorkflowValidationError: If node_id is already used or instance is invalid
98
+ """
99
+ return self.add_node(node_instance, node_id)
100
+
101
+ def add_node_type(
102
+ self,
103
+ node_type: str,
104
+ node_id: str | None = None,
105
+ config: dict[str, Any] | None = None,
106
+ ) -> str:
107
+ """
108
+ Add a node by type name to the workflow.
109
+
110
+ This is the original string-based method, provided for clarity and backward compatibility.
111
+
112
+ Args:
113
+ node_type: Node type name as string
114
+ node_id: Unique identifier for this node (auto-generated if not provided)
115
+ config: Configuration for the node
116
+
117
+ Returns:
118
+ Node ID
119
+
120
+ Raises:
121
+ WorkflowValidationError: If node_id is already used
122
+ """
123
+ return self.add_node(node_type, node_id, config)
124
+
56
125
  def add_connection(
57
126
  self, from_node: str, from_output: str, to_node: str, to_input: str
58
127
  ) -> None:
@@ -149,11 +218,25 @@ class WorkflowBuilder:
149
218
  # Add nodes to workflow
150
219
  for node_id, node_info in self.nodes.items():
151
220
  try:
152
- node_type = node_info["type"]
153
- node_config = node_info.get("config", {})
154
-
155
- # Add the node to workflow
156
- workflow._add_node_internal(node_id, node_type, node_config)
221
+ if "instance" in node_info:
222
+ # Node instance was provided
223
+ workflow.add_node(
224
+ node_id=node_id, node_or_type=node_info["instance"]
225
+ )
226
+ elif "class" in node_info:
227
+ # Node class was provided
228
+ node_class = node_info["class"]
229
+ node_config = node_info.get("config", {})
230
+ workflow.add_node(
231
+ node_id=node_id, node_or_type=node_class, **node_config
232
+ )
233
+ else:
234
+ # String node type
235
+ node_type = node_info["type"]
236
+ node_config = node_info.get("config", {})
237
+ workflow.add_node(
238
+ node_id=node_id, node_or_type=node_type, **node_config
239
+ )
157
240
  except Exception as e:
158
241
  raise WorkflowValidationError(
159
242
  f"Failed to add node '{node_id}' to workflow: {e}"
@@ -297,19 +297,29 @@ class CyclicWorkflowExecutor:
297
297
  entry_nodes = set()
298
298
  exit_nodes = set()
299
299
 
300
+ # First, collect all nodes in the cycle
300
301
  for source, target, data in cycle_edges:
301
302
  cycle_nodes.add(source)
302
303
  cycle_nodes.add(target)
303
304
 
305
+ # Then identify entry and exit nodes
306
+ for node in cycle_nodes:
304
307
  # Entry nodes have incoming edges from non-cycle nodes
305
- for pred in workflow.graph.predecessors(target):
308
+ for pred in workflow.graph.predecessors(node):
306
309
  if pred not in cycle_nodes:
307
- entry_nodes.add(target)
310
+ entry_nodes.add(node)
311
+ logger.debug(
312
+ f"Cycle {cycle_id}: Node {node} is an entry node (has predecessor {pred})"
313
+ )
308
314
 
309
315
  # Exit nodes have outgoing edges to non-cycle nodes
310
- for succ in workflow.graph.successors(source):
316
+ for succ in workflow.graph.successors(node):
311
317
  if succ not in cycle_nodes:
312
- exit_nodes.add(source)
318
+ exit_nodes.add(node)
319
+
320
+ logger.debug(
321
+ f"Cycle {cycle_id}: nodes={cycle_nodes}, entry_nodes={entry_nodes}, exit_nodes={exit_nodes}"
322
+ )
313
323
 
314
324
  plan.add_cycle_group(
315
325
  cycle_id=cycle_id,
@@ -320,7 +330,7 @@ class CyclicWorkflowExecutor:
320
330
  )
321
331
 
322
332
  # Build execution stages
323
- plan.build_stages(topo_order, dag_graph)
333
+ plan.build_stages(topo_order, dag_graph, workflow)
324
334
 
325
335
  return plan
326
336
 
@@ -516,12 +526,11 @@ class CyclicWorkflowExecutor:
516
526
  f"Cycle {cycle_id} iteration now at {cycle_state.iteration} (after update)"
517
527
  )
518
528
 
519
- # Check max iterations (built into monitor.record_iteration)
520
- if cycle_state.iteration >= cycle_config.get(
521
- "max_iterations", float("inf")
522
- ):
529
+ # Check max iterations - loop_count represents actual iterations executed
530
+ max_iterations = cycle_config.get("max_iterations", float("inf"))
531
+ if loop_count >= max_iterations:
523
532
  logger.info(
524
- f"Cycle {cycle_id} reached max iterations: {cycle_state.iteration}"
533
+ f"Cycle {cycle_id} reached max iterations: {loop_count}/{max_iterations}"
525
534
  )
526
535
  should_terminate = True
527
536
 
@@ -629,6 +638,10 @@ class CyclicWorkflowExecutor:
629
638
  # Gather inputs from connections
630
639
  inputs = {}
631
640
 
641
+ logger.debug(
642
+ f"_execute_node {node_id}: state.node_outputs keys = {list(state.node_outputs.keys())}"
643
+ )
644
+
632
645
  # Check if we're in a cycle and this is not the first iteration
633
646
  in_cycle = cycle_state is not None
634
647
  is_cycle_iteration = in_cycle and cycle_state.iteration > 0
@@ -643,9 +656,6 @@ class CyclicWorkflowExecutor:
643
656
  if is_cycle_edge and is_cycle_iteration and previous_iteration_results:
644
657
  # For cycle edges after first iteration, use previous iteration results
645
658
  pred_output = previous_iteration_results.get(pred)
646
- logger.debug(
647
- f"Using previous iteration result for {pred} -> {node_id}: {type(pred_output)} keys={list(pred_output.keys()) if isinstance(pred_output, dict) else 'not dict'}"
648
- )
649
659
  elif pred in state.node_outputs:
650
660
  # For non-cycle edges or first iteration, use normal state
651
661
  pred_output = state.node_outputs[pred]
@@ -658,10 +668,9 @@ class CyclicWorkflowExecutor:
658
668
 
659
669
  # Apply mapping
660
670
  mapping = edge_data.get("mapping", {})
661
- if is_cycle_edge and is_cycle_iteration:
662
- logger.debug(
663
- f"Applying cycle mapping: {mapping} from {pred} to {node_id}"
664
- )
671
+ logger.debug(
672
+ f"Edge {pred} -> {node_id}: mapping = {mapping}, pred_output keys = {list(pred_output.keys()) if isinstance(pred_output, dict) else type(pred_output)}"
673
+ )
665
674
  for src_key, dst_key in mapping.items():
666
675
  # Handle nested output access
667
676
  if "." in src_key:
@@ -677,10 +686,9 @@ class CyclicWorkflowExecutor:
677
686
  inputs[dst_key] = value
678
687
  elif isinstance(pred_output, dict) and src_key in pred_output:
679
688
  inputs[dst_key] = pred_output[src_key]
680
- if is_cycle_edge and is_cycle_iteration:
681
- logger.debug(
682
- f"Mapped {src_key}={pred_output[src_key]} to {dst_key}"
683
- )
689
+ logger.debug(
690
+ f"Mapped {src_key} -> {dst_key}: {type(pred_output[src_key])}, length={len(pred_output[src_key]) if hasattr(pred_output[src_key], '__len__') else 'N/A'}"
691
+ )
684
692
  elif src_key == "output":
685
693
  # Default output mapping
686
694
  inputs[dst_key] = pred_output
@@ -706,20 +714,15 @@ class CyclicWorkflowExecutor:
706
714
  # Recursively filter None values from context to avoid security validation errors
707
715
  context = self._filter_none_values(context)
708
716
 
709
- # Debug inputs before merging
710
- if cycle_state and cycle_state.iteration > 0:
711
- logger.debug(f"Inputs gathered from connections: {inputs}")
712
-
713
717
  # Merge node config with inputs
714
718
  # Order: config < initial_parameters < connection inputs
715
719
  merged_inputs = {**node.config}
716
720
 
717
- # Add initial parameters if available and node hasn't been executed yet
721
+ # Add initial parameters if available
722
+ # For cycle nodes, initial parameters should be available throughout all iterations
718
723
  if hasattr(state, "initial_parameters") and node_id in state.initial_parameters:
719
- if node_id not in state.node_outputs or (
720
- cycle_state and cycle_state.iteration == 0
721
- ):
722
- # Use initial parameters on first execution
724
+ if node_id not in state.node_outputs or cycle_state is not None:
725
+ # Use initial parameters on first execution or for any cycle iteration
723
726
  merged_inputs.update(state.initial_parameters[node_id])
724
727
 
725
728
  # Connection inputs override everything
@@ -728,6 +731,10 @@ class CyclicWorkflowExecutor:
728
731
  # Filter out None values to avoid security validation errors
729
732
  merged_inputs = {k: v for k, v in merged_inputs.items() if v is not None}
730
733
 
734
+ logger.debug(
735
+ f"Final merged_inputs for {node_id}: keys={list(merged_inputs.keys())}"
736
+ )
737
+
731
738
  # Create task for node execution if task manager available
732
739
  task = None
733
740
  if task_manager and state.run_id:
@@ -769,11 +776,6 @@ class CyclicWorkflowExecutor:
769
776
  logger.debug(
770
777
  f"Executing node: {node_id} (iteration: {cycle_state.iteration if cycle_state else 'N/A'})"
771
778
  )
772
- logger.debug(f"Node inputs: {list(merged_inputs.keys())}")
773
- if cycle_state:
774
- logger.debug(
775
- f"Input values - value: {merged_inputs.get('value')}, counter: {merged_inputs.get('counter')}"
776
- )
777
779
 
778
780
  try:
779
781
  with collector.collect(node_id=node_id) as metrics_context:
@@ -858,12 +860,15 @@ class ExecutionPlan:
858
860
  edges=edges,
859
861
  )
860
862
 
861
- def build_stages(self, topo_order: list[str], dag_graph: nx.DiGraph) -> None:
863
+ def build_stages(
864
+ self, topo_order: list[str], dag_graph: nx.DiGraph, workflow: Workflow
865
+ ) -> None:
862
866
  """Build execution stages.
863
867
 
864
868
  Args:
865
869
  topo_order: Topological order of DAG nodes
866
870
  dag_graph: DAG portion of the graph
871
+ workflow: The full workflow for checking dependencies
867
872
  """
868
873
  # Track which nodes have been scheduled
869
874
  scheduled = set()
@@ -894,18 +899,83 @@ class ExecutionPlan:
894
899
  f"in_cycle_id value: {in_cycle_id}, found_cycle_group: {found_cycle_group is not None}"
895
900
  )
896
901
  if found_cycle_group is not None:
897
- logger.debug(f"Scheduling cycle group {in_cycle_id} for node {node_id}")
898
- # Schedule entire cycle group
899
- self.stages.append(
900
- ExecutionStage(is_cycle=True, cycle_group=found_cycle_group)
902
+ # Check if all DAG dependencies of cycle entry nodes are satisfied
903
+ can_schedule_cycle = True
904
+ logger.debug(
905
+ f"Checking dependencies for cycle {in_cycle_id}, entry_nodes: {found_cycle_group.entry_nodes}"
901
906
  )
902
- scheduled.update(found_cycle_group.nodes)
907
+ for entry_node in found_cycle_group.entry_nodes:
908
+ # Check all predecessors of this entry node in the FULL workflow graph
909
+ # (dag_graph only contains DAG edges, not connections to cycle nodes)
910
+ preds = list(workflow.graph.predecessors(entry_node))
911
+ logger.debug(
912
+ f"Entry node {entry_node} has predecessors: {preds}, scheduled: {scheduled}"
913
+ )
914
+ for pred in preds:
915
+ # Skip self-cycles and nodes within the same cycle group
916
+ logger.debug(
917
+ f"Checking pred {pred}: in scheduled? {pred in scheduled}, in cycle? {pred in found_cycle_group.nodes}"
918
+ )
919
+ if (
920
+ pred not in scheduled
921
+ and pred not in found_cycle_group.nodes
922
+ ):
923
+ # This predecessor hasn't been scheduled yet
924
+ logger.debug(
925
+ f"Cannot schedule cycle {in_cycle_id} yet - entry node {entry_node} "
926
+ f"depends on unscheduled node {pred}"
927
+ )
928
+ can_schedule_cycle = False
929
+ break
930
+ if not can_schedule_cycle:
931
+ break
932
+
933
+ if can_schedule_cycle:
934
+ logger.debug(
935
+ f"Scheduling cycle group {in_cycle_id} for node {node_id}"
936
+ )
937
+ # Schedule entire cycle group
938
+ self.stages.append(
939
+ ExecutionStage(is_cycle=True, cycle_group=found_cycle_group)
940
+ )
941
+ scheduled.update(found_cycle_group.nodes)
942
+ else:
943
+ # Skip this node for now, it will be scheduled when its dependencies are met
944
+ logger.debug(
945
+ f"Deferring cycle group {in_cycle_id} - dependencies not met"
946
+ )
947
+ continue
903
948
  else:
904
949
  logger.debug(f"Scheduling DAG node {node_id}")
905
950
  # Schedule DAG node
906
951
  self.stages.append(ExecutionStage(is_cycle=False, nodes=[node_id]))
907
952
  scheduled.add(node_id)
908
953
 
954
+ # After processing all nodes in topological order, check for any unscheduled cycle groups
955
+ for cycle_id, cycle_group in self.cycle_groups.items():
956
+ if not any(node in scheduled for node in cycle_group.nodes):
957
+ # This cycle group hasn't been scheduled yet
958
+ # Check if all dependencies are now satisfied
959
+ can_schedule = True
960
+ for entry_node in cycle_group.entry_nodes:
961
+ for pred in workflow.graph.predecessors(entry_node):
962
+ if pred not in scheduled and pred not in cycle_group.nodes:
963
+ logger.warning(
964
+ f"Cycle group {cycle_id} has unsatisfied dependency: "
965
+ f"{entry_node} depends on {pred}"
966
+ )
967
+ can_schedule = False
968
+ break
969
+ if not can_schedule:
970
+ break
971
+
972
+ if can_schedule:
973
+ logger.debug(f"Scheduling deferred cycle group {cycle_id}")
974
+ self.stages.append(
975
+ ExecutionStage(is_cycle=True, cycle_group=cycle_group)
976
+ )
977
+ scheduled.update(cycle_group.nodes)
978
+
909
979
 
910
980
  class ExecutionStage:
911
981
  """Single stage in execution plan."""
kailash/workflow/graph.py CHANGED
@@ -657,9 +657,14 @@ class Workflow:
657
657
  cycle_groups = {}
658
658
  _, cycle_edges = self.separate_dag_and_cycle_edges()
659
659
 
660
- # First pass: group by cycle_id as before
660
+ # First pass: group by cycle_id, using edge-based IDs when not specified
661
661
  for source, target, data in cycle_edges:
662
- cycle_id = data.get("cycle_id", "default")
662
+ # Generate unique cycle_id based on edge if not provided
663
+ cycle_id = data.get("cycle_id")
664
+ if cycle_id is None:
665
+ # Create unique ID based on the cycle edge
666
+ cycle_id = f"cycle_{source}_{target}"
667
+
663
668
  if cycle_id not in cycle_groups:
664
669
  cycle_groups[cycle_id] = []
665
670
  cycle_groups[cycle_id].append((source, target, data))
@@ -6,7 +6,7 @@ including retry policies, fallback nodes, and circuit breakers.
6
6
 
7
7
  import asyncio
8
8
  import time
9
- from dataclasses import dataclass, field
9
+ from dataclasses import asdict, dataclass, field
10
10
  from datetime import datetime
11
11
  from enum import Enum
12
12
  from typing import Any, Callable, Dict, List, Optional, Union
@@ -50,6 +50,16 @@ class RetryPolicy:
50
50
 
51
51
  return min(delay, self.max_delay)
52
52
 
53
+ def to_dict(self) -> dict:
54
+ """Convert RetryPolicy to dictionary."""
55
+ return {
56
+ "max_retries": self.max_retries,
57
+ "strategy": self.strategy.value,
58
+ "base_delay": self.base_delay,
59
+ "max_delay": self.max_delay,
60
+ "retry_on": [cls.__name__ for cls in self.retry_on],
61
+ }
62
+
53
63
 
54
64
  @dataclass
55
65
  class CircuitBreakerConfig:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kailash
3
- Version: 0.4.2
3
+ Version: 0.6.0
4
4
  Summary: Python SDK for the Kailash container-node architecture
5
5
  Home-page: https://github.com/integrum/kailash-python-sdk
6
6
  Author: Integrum
@@ -61,6 +61,8 @@ Requires-Dist: asyncpg>=0.30.0
61
61
  Requires-Dist: aiomysql>=0.2.0
62
62
  Requires-Dist: twilio>=9.6.3
63
63
  Requires-Dist: qrcode>=8.2
64
+ Requires-Dist: aiofiles>=24.1.0
65
+ Requires-Dist: bcrypt>=4.3.0
64
66
  Provides-Extra: dev
65
67
  Requires-Dist: pytest>=7.0; extra == "dev"
66
68
  Requires-Dist: pytest-cov>=3.0; extra == "dev"
@@ -80,8 +82,9 @@ Dynamic: requires-python
80
82
  <a href="https://pepy.tech/project/kailash"><img src="https://static.pepy.tech/badge/kailash" alt="Downloads"></a>
81
83
  <img src="https://img.shields.io/badge/license-MIT-green.svg" alt="MIT License">
82
84
  <img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="Code style: black">
83
- <img src="https://img.shields.io/badge/tests-127%20organized-brightgreen.svg" alt="Tests: 127 organized">
84
- <img src="https://img.shields.io/badge/test%20structure-reorganized-blue.svg" alt="Test structure: reorganized">
85
+ <img src="https://img.shields.io/badge/tests-production%20quality-brightgreen.svg" alt="Tests: Production Quality">
86
+ <img src="https://img.shields.io/badge/docker-integrated-blue.svg" alt="Docker: Integrated">
87
+ <img src="https://img.shields.io/badge/AI-ollama%20validated-purple.svg" alt="AI: Ollama Validated">
85
88
  </p>
86
89
 
87
90
  <p align="center">
@@ -120,12 +123,14 @@ Dynamic: requires-python
120
123
  - 🏭 **Session 067 Enhancements**: Business workflow templates, data lineage tracking, automatic credential rotation
121
124
  - 🔄 **Zero-Downtime Operations**: Automatic credential rotation with enterprise notifications and audit trails
122
125
  - 🌉 **Enterprise Middleware (v0.4.0)**: Production-ready middleware architecture with real-time agent-frontend communication, dynamic workflows, and AI chat integration
126
+ - ⚡ **Performance Revolution (v0.5.0)**: 10-100x faster parameter resolution, clear async/sync separation, automatic resource management
127
+ - 🧪 **Production-Quality Testing (v0.5.0)**: Comprehensive testing infrastructure with Docker integration, AI workflows, and real-world business scenarios
123
128
 
124
129
  ## 🏗️ Project Architecture
125
130
 
126
131
  The Kailash project is organized into three distinct layers:
127
132
 
128
- ### Core Architecture (v0.4.0)
133
+ ### Core Architecture (v0.5.0)
129
134
  ```
130
135
  ┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
131
136
  │ Frontend │ │ Middleware │ │ Kailash Core │
@@ -150,9 +155,10 @@ kailash_python_sdk/
150
155
  1. **SDK Layer** (`src/kailash/`) - The core framework providing:
151
156
  - Nodes: Reusable computational units (100+ built-in)
152
157
  - Workflows: DAG-based orchestration with cyclic support
153
- - Runtime: Unified execution engine (async + enterprise)
154
- - Middleware: Enterprise communication layer (NEW in v0.4.0)
158
+ - Runtime: Unified execution engine with optimized async/sync separation (v0.5.0)
159
+ - Middleware: Enterprise communication layer (v0.4.0)
155
160
  - Security: RBAC/ABAC access control with audit logging
161
+ - Performance: LRU parameter caching, automatic resource pooling (NEW in v0.5.0)
156
162
 
157
163
  2. **Application Layer** (`apps/`) - Complete applications including:
158
164
  - User Management System (Django++ capabilities)
@@ -175,7 +181,6 @@ pip install kailash[user-management]
175
181
  # Everything
176
182
  pip install kailash[all]
177
183
  ```
178
- >>>>>>> origin/main
179
184
 
180
185
  ## 🎯 Who Is This For?
181
186