kailash 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/access_control.py +40 -39
- kailash/api/auth.py +26 -32
- kailash/api/custom_nodes.py +29 -29
- kailash/api/custom_nodes_secure.py +35 -35
- kailash/api/database.py +17 -17
- kailash/api/gateway.py +19 -19
- kailash/api/mcp_integration.py +24 -23
- kailash/api/studio.py +45 -45
- kailash/api/workflow_api.py +8 -8
- kailash/cli/commands.py +5 -8
- kailash/manifest.py +42 -42
- kailash/mcp/__init__.py +1 -1
- kailash/mcp/ai_registry_server.py +20 -20
- kailash/mcp/client.py +9 -11
- kailash/mcp/client_new.py +10 -10
- kailash/mcp/server.py +1 -2
- kailash/mcp/server_enhanced.py +449 -0
- kailash/mcp/servers/ai_registry.py +6 -6
- kailash/mcp/utils/__init__.py +31 -0
- kailash/mcp/utils/cache.py +267 -0
- kailash/mcp/utils/config.py +263 -0
- kailash/mcp/utils/formatters.py +293 -0
- kailash/mcp/utils/metrics.py +418 -0
- kailash/nodes/ai/agents.py +9 -9
- kailash/nodes/ai/ai_providers.py +33 -34
- kailash/nodes/ai/embedding_generator.py +31 -32
- kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
- kailash/nodes/ai/iterative_llm_agent.py +48 -48
- kailash/nodes/ai/llm_agent.py +32 -33
- kailash/nodes/ai/models.py +13 -13
- kailash/nodes/ai/self_organizing.py +44 -44
- kailash/nodes/api/auth.py +11 -11
- kailash/nodes/api/graphql.py +13 -13
- kailash/nodes/api/http.py +19 -19
- kailash/nodes/api/monitoring.py +20 -20
- kailash/nodes/api/rate_limiting.py +9 -13
- kailash/nodes/api/rest.py +29 -29
- kailash/nodes/api/security.py +44 -47
- kailash/nodes/base.py +21 -23
- kailash/nodes/base_async.py +7 -7
- kailash/nodes/base_cycle_aware.py +12 -12
- kailash/nodes/base_with_acl.py +5 -5
- kailash/nodes/code/python.py +56 -55
- kailash/nodes/data/directory.py +6 -6
- kailash/nodes/data/event_generation.py +10 -10
- kailash/nodes/data/file_discovery.py +28 -31
- kailash/nodes/data/readers.py +8 -8
- kailash/nodes/data/retrieval.py +10 -10
- kailash/nodes/data/sharepoint_graph.py +17 -17
- kailash/nodes/data/sources.py +5 -5
- kailash/nodes/data/sql.py +13 -13
- kailash/nodes/data/streaming.py +25 -25
- kailash/nodes/data/vector_db.py +22 -22
- kailash/nodes/data/writers.py +7 -7
- kailash/nodes/logic/async_operations.py +17 -17
- kailash/nodes/logic/convergence.py +11 -11
- kailash/nodes/logic/loop.py +4 -4
- kailash/nodes/logic/operations.py +11 -11
- kailash/nodes/logic/workflow.py +8 -9
- kailash/nodes/mixins/mcp.py +17 -17
- kailash/nodes/mixins.py +8 -10
- kailash/nodes/transform/chunkers.py +3 -3
- kailash/nodes/transform/formatters.py +7 -7
- kailash/nodes/transform/processors.py +10 -10
- kailash/runtime/access_controlled.py +18 -18
- kailash/runtime/async_local.py +17 -19
- kailash/runtime/docker.py +20 -22
- kailash/runtime/local.py +16 -16
- kailash/runtime/parallel.py +23 -23
- kailash/runtime/parallel_cyclic.py +27 -27
- kailash/runtime/runner.py +6 -6
- kailash/runtime/testing.py +20 -20
- kailash/sdk_exceptions.py +0 -58
- kailash/security.py +14 -26
- kailash/tracking/manager.py +38 -38
- kailash/tracking/metrics_collector.py +15 -14
- kailash/tracking/models.py +53 -53
- kailash/tracking/storage/base.py +7 -17
- kailash/tracking/storage/database.py +22 -23
- kailash/tracking/storage/filesystem.py +38 -40
- kailash/utils/export.py +21 -21
- kailash/utils/templates.py +2 -3
- kailash/visualization/api.py +30 -34
- kailash/visualization/dashboard.py +17 -17
- kailash/visualization/performance.py +16 -16
- kailash/visualization/reports.py +25 -27
- kailash/workflow/builder.py +8 -8
- kailash/workflow/convergence.py +13 -12
- kailash/workflow/cycle_analyzer.py +30 -32
- kailash/workflow/cycle_builder.py +12 -12
- kailash/workflow/cycle_config.py +16 -15
- kailash/workflow/cycle_debugger.py +40 -40
- kailash/workflow/cycle_exceptions.py +29 -29
- kailash/workflow/cycle_profiler.py +21 -21
- kailash/workflow/cycle_state.py +20 -22
- kailash/workflow/cyclic_runner.py +44 -44
- kailash/workflow/graph.py +40 -40
- kailash/workflow/mermaid_visualizer.py +9 -11
- kailash/workflow/migration.py +22 -22
- kailash/workflow/mock_registry.py +6 -6
- kailash/workflow/runner.py +9 -9
- kailash/workflow/safety.py +12 -13
- kailash/workflow/state.py +8 -11
- kailash/workflow/templates.py +19 -19
- kailash/workflow/validation.py +14 -14
- kailash/workflow/visualization.py +22 -22
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/METADATA +53 -5
- kailash-0.3.1.dist-info/RECORD +136 -0
- kailash-0.3.0.dist-info/RECORD +0 -130
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/WHEEL +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/entry_points.txt +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/top_level.txt +0 -0
kailash/workflow/state.py
CHANGED
@@ -6,7 +6,7 @@ making it easier to handle state transitions in a predictable manner.
|
|
6
6
|
|
7
7
|
import logging
|
8
8
|
from copy import deepcopy
|
9
|
-
from typing import Any, Generic,
|
9
|
+
from typing import Any, Generic, TypeVar
|
10
10
|
|
11
11
|
from pydantic import BaseModel
|
12
12
|
|
@@ -24,7 +24,7 @@ class StateManager:
|
|
24
24
|
"""
|
25
25
|
|
26
26
|
@staticmethod
|
27
|
-
def update_in(state_obj: BaseModel, path:
|
27
|
+
def update_in(state_obj: BaseModel, path: list[str], value: Any) -> BaseModel:
|
28
28
|
"""Update a nested property in the state and return a new state object.
|
29
29
|
|
30
30
|
Args:
|
@@ -61,10 +61,7 @@ class StateManager:
|
|
61
61
|
if isinstance(next_obj, BaseModel):
|
62
62
|
next_obj = next_obj.model_copy(deep=True)
|
63
63
|
setattr(current, key, next_obj)
|
64
|
-
elif isinstance(next_obj, dict):
|
65
|
-
next_obj = deepcopy(next_obj)
|
66
|
-
setattr(current, key, next_obj)
|
67
|
-
elif isinstance(next_obj, list):
|
64
|
+
elif isinstance(next_obj, dict) or isinstance(next_obj, list):
|
68
65
|
next_obj = deepcopy(next_obj)
|
69
66
|
setattr(current, key, next_obj)
|
70
67
|
|
@@ -80,7 +77,7 @@ class StateManager:
|
|
80
77
|
|
81
78
|
@staticmethod
|
82
79
|
def batch_update(
|
83
|
-
state_obj: BaseModel, updates:
|
80
|
+
state_obj: BaseModel, updates: list[tuple[list[str], Any]]
|
84
81
|
) -> BaseModel:
|
85
82
|
"""Apply multiple updates to the state atomically.
|
86
83
|
|
@@ -108,7 +105,7 @@ class StateManager:
|
|
108
105
|
return new_state
|
109
106
|
|
110
107
|
@staticmethod
|
111
|
-
def get_in(state_obj: BaseModel, path:
|
108
|
+
def get_in(state_obj: BaseModel, path: list[str]) -> Any:
|
112
109
|
"""Get the value at a nested path.
|
113
110
|
|
114
111
|
Args:
|
@@ -175,7 +172,7 @@ class WorkflowStateWrapper(Generic[StateT]):
|
|
175
172
|
"""
|
176
173
|
self._state = state
|
177
174
|
|
178
|
-
def update_in(self, path:
|
175
|
+
def update_in(self, path: list[str], value: Any) -> "WorkflowStateWrapper[StateT]":
|
179
176
|
"""Update state at path and return new wrapper.
|
180
177
|
|
181
178
|
Args:
|
@@ -189,7 +186,7 @@ class WorkflowStateWrapper(Generic[StateT]):
|
|
189
186
|
return WorkflowStateWrapper(new_state)
|
190
187
|
|
191
188
|
def batch_update(
|
192
|
-
self, updates:
|
189
|
+
self, updates: list[tuple[list[str], Any]]
|
193
190
|
) -> "WorkflowStateWrapper[StateT]":
|
194
191
|
"""Apply multiple updates to the state atomically.
|
195
192
|
|
@@ -202,7 +199,7 @@ class WorkflowStateWrapper(Generic[StateT]):
|
|
202
199
|
new_state = StateManager.batch_update(self._state, updates)
|
203
200
|
return WorkflowStateWrapper(new_state)
|
204
201
|
|
205
|
-
def get_in(self, path:
|
202
|
+
def get_in(self, path: list[str]) -> Any:
|
206
203
|
"""Get the value at a nested path.
|
207
204
|
|
208
205
|
Args:
|
kailash/workflow/templates.py
CHANGED
@@ -113,7 +113,7 @@ See Also:
|
|
113
113
|
import math
|
114
114
|
import time
|
115
115
|
from dataclasses import dataclass
|
116
|
-
from typing import Any
|
116
|
+
from typing import Any
|
117
117
|
|
118
118
|
from ..nodes.code import PythonCodeNode
|
119
119
|
from . import Workflow
|
@@ -125,11 +125,11 @@ class CycleTemplate:
|
|
125
125
|
|
126
126
|
name: str
|
127
127
|
description: str
|
128
|
-
nodes:
|
129
|
-
convergence_condition:
|
128
|
+
nodes: list[str]
|
129
|
+
convergence_condition: str | None = None
|
130
130
|
max_iterations: int = 100
|
131
|
-
timeout:
|
132
|
-
parameters:
|
131
|
+
timeout: float | None = None
|
132
|
+
parameters: dict[str, Any] | None = None
|
133
133
|
|
134
134
|
|
135
135
|
class CycleTemplates:
|
@@ -142,7 +142,7 @@ class CycleTemplates:
|
|
142
142
|
evaluator_node: str,
|
143
143
|
convergence: str = "quality > 0.9",
|
144
144
|
max_iterations: int = 50,
|
145
|
-
cycle_id:
|
145
|
+
cycle_id: str | None = None,
|
146
146
|
) -> str:
|
147
147
|
"""
|
148
148
|
Add an optimization cycle pattern to workflow.
|
@@ -195,7 +195,7 @@ class CycleTemplates:
|
|
195
195
|
max_retries: int = 3,
|
196
196
|
backoff_strategy: str = "exponential",
|
197
197
|
success_condition: str = "success == True",
|
198
|
-
cycle_id:
|
198
|
+
cycle_id: str | None = None,
|
199
199
|
) -> str:
|
200
200
|
"""
|
201
201
|
Add a retry cycle pattern to workflow.
|
@@ -300,7 +300,7 @@ result = {{
|
|
300
300
|
validator_node: str,
|
301
301
|
quality_threshold: float = 0.95,
|
302
302
|
max_iterations: int = 10,
|
303
|
-
cycle_id:
|
303
|
+
cycle_id: str | None = None,
|
304
304
|
) -> str:
|
305
305
|
"""
|
306
306
|
Add a data quality improvement cycle to workflow.
|
@@ -353,7 +353,7 @@ result = {{
|
|
353
353
|
target_accuracy: float = 0.95,
|
354
354
|
max_epochs: int = 100,
|
355
355
|
early_stopping_patience: int = 10,
|
356
|
-
cycle_id:
|
356
|
+
cycle_id: str | None = None,
|
357
357
|
) -> str:
|
358
358
|
"""
|
359
359
|
Add a machine learning training cycle to workflow.
|
@@ -462,7 +462,7 @@ result = {{
|
|
462
462
|
processor_node: str,
|
463
463
|
tolerance: float = 0.001,
|
464
464
|
max_iterations: int = 1000,
|
465
|
-
cycle_id:
|
465
|
+
cycle_id: str | None = None,
|
466
466
|
) -> str:
|
467
467
|
"""
|
468
468
|
Add a numerical convergence cycle to workflow.
|
@@ -560,8 +560,8 @@ previous_value = current_value
|
|
560
560
|
workflow: Workflow,
|
561
561
|
processor_node: str,
|
562
562
|
batch_size: int = 100,
|
563
|
-
total_items:
|
564
|
-
cycle_id:
|
563
|
+
total_items: int | None = None,
|
564
|
+
cycle_id: str | None = None,
|
565
565
|
) -> str:
|
566
566
|
"""
|
567
567
|
Add a batch processing cycle to workflow.
|
@@ -671,7 +671,7 @@ def add_optimization_cycle(
|
|
671
671
|
evaluator_node: str,
|
672
672
|
convergence: str = "quality > 0.9",
|
673
673
|
max_iterations: int = 50,
|
674
|
-
cycle_id:
|
674
|
+
cycle_id: str | None = None,
|
675
675
|
) -> str:
|
676
676
|
"""Add an optimization cycle pattern to this workflow."""
|
677
677
|
return CycleTemplates.optimization_cycle(
|
@@ -685,7 +685,7 @@ def add_retry_cycle(
|
|
685
685
|
max_retries: int = 3,
|
686
686
|
backoff_strategy: str = "exponential",
|
687
687
|
success_condition: str = "success == True",
|
688
|
-
cycle_id:
|
688
|
+
cycle_id: str | None = None,
|
689
689
|
) -> str:
|
690
690
|
"""Add a retry cycle pattern to this workflow."""
|
691
691
|
return CycleTemplates.retry_cycle(
|
@@ -699,7 +699,7 @@ def add_data_quality_cycle(
|
|
699
699
|
validator_node: str,
|
700
700
|
quality_threshold: float = 0.95,
|
701
701
|
max_iterations: int = 10,
|
702
|
-
cycle_id:
|
702
|
+
cycle_id: str | None = None,
|
703
703
|
) -> str:
|
704
704
|
"""Add a data quality improvement cycle to this workflow."""
|
705
705
|
return CycleTemplates.data_quality_cycle(
|
@@ -714,7 +714,7 @@ def add_learning_cycle(
|
|
714
714
|
target_accuracy: float = 0.95,
|
715
715
|
max_epochs: int = 100,
|
716
716
|
early_stopping_patience: int = 10,
|
717
|
-
cycle_id:
|
717
|
+
cycle_id: str | None = None,
|
718
718
|
) -> str:
|
719
719
|
"""Add a machine learning training cycle to this workflow."""
|
720
720
|
return CycleTemplates.learning_cycle(
|
@@ -733,7 +733,7 @@ def add_convergence_cycle(
|
|
733
733
|
processor_node: str,
|
734
734
|
tolerance: float = 0.001,
|
735
735
|
max_iterations: int = 1000,
|
736
|
-
cycle_id:
|
736
|
+
cycle_id: str | None = None,
|
737
737
|
) -> str:
|
738
738
|
"""Add a numerical convergence cycle to this workflow."""
|
739
739
|
return CycleTemplates.convergence_cycle(
|
@@ -745,8 +745,8 @@ def add_batch_processing_cycle(
|
|
745
745
|
self,
|
746
746
|
processor_node: str,
|
747
747
|
batch_size: int = 100,
|
748
|
-
total_items:
|
749
|
-
cycle_id:
|
748
|
+
total_items: int | None = None,
|
749
|
+
cycle_id: str | None = None,
|
750
750
|
) -> str:
|
751
751
|
"""Add a batch processing cycle to this workflow."""
|
752
752
|
return CycleTemplates.batch_processing_cycle(
|
kailash/workflow/validation.py
CHANGED
@@ -116,7 +116,7 @@ See Also:
|
|
116
116
|
import re
|
117
117
|
from dataclasses import dataclass
|
118
118
|
from enum import Enum
|
119
|
-
from typing import Any
|
119
|
+
from typing import Any
|
120
120
|
|
121
121
|
from . import Workflow
|
122
122
|
|
@@ -137,10 +137,10 @@ class ValidationIssue:
|
|
137
137
|
category: str
|
138
138
|
code: str
|
139
139
|
message: str
|
140
|
-
node_id:
|
141
|
-
cycle_id:
|
142
|
-
suggestion:
|
143
|
-
documentation_link:
|
140
|
+
node_id: str | None = None
|
141
|
+
cycle_id: str | None = None
|
142
|
+
suggestion: str | None = None
|
143
|
+
documentation_link: str | None = None
|
144
144
|
|
145
145
|
|
146
146
|
class CycleLinter:
|
@@ -160,9 +160,9 @@ class CycleLinter:
|
|
160
160
|
"""
|
161
161
|
self.workflow = workflow
|
162
162
|
self.graph = workflow.graph
|
163
|
-
self.issues:
|
163
|
+
self.issues: list[ValidationIssue] = []
|
164
164
|
|
165
|
-
def check_all(self) ->
|
165
|
+
def check_all(self) -> list[ValidationIssue]:
|
166
166
|
"""
|
167
167
|
Run all validation checks on the workflow.
|
168
168
|
|
@@ -536,7 +536,7 @@ class CycleLinter:
|
|
536
536
|
)
|
537
537
|
)
|
538
538
|
|
539
|
-
def _get_cycle_id(self, cycle_nodes:
|
539
|
+
def _get_cycle_id(self, cycle_nodes: list[str]) -> str:
|
540
540
|
"""Generate a cycle identifier from cycle nodes."""
|
541
541
|
return f"cycle_{'-'.join(sorted(cycle_nodes))}"
|
542
542
|
|
@@ -658,23 +658,23 @@ class CycleLinter:
|
|
658
658
|
node_id_lower = node_id.lower()
|
659
659
|
return any(keyword in node_id_lower for keyword in file_keywords)
|
660
660
|
|
661
|
-
def get_issues_by_severity(self, severity: IssueSeverity) ->
|
661
|
+
def get_issues_by_severity(self, severity: IssueSeverity) -> list[ValidationIssue]:
|
662
662
|
"""Get all issues of a specific severity level."""
|
663
663
|
return [issue for issue in self.issues if issue.severity == severity]
|
664
664
|
|
665
|
-
def get_issues_by_category(self, category: str) ->
|
665
|
+
def get_issues_by_category(self, category: str) -> list[ValidationIssue]:
|
666
666
|
"""Get all issues of a specific category."""
|
667
667
|
return [issue for issue in self.issues if issue.category == category]
|
668
668
|
|
669
|
-
def get_issues_for_cycle(self, cycle_id: str) ->
|
669
|
+
def get_issues_for_cycle(self, cycle_id: str) -> list[ValidationIssue]:
|
670
670
|
"""Get all issues for a specific cycle."""
|
671
671
|
return [issue for issue in self.issues if issue.cycle_id == cycle_id]
|
672
672
|
|
673
|
-
def get_issues_for_node(self, node_id: str) ->
|
673
|
+
def get_issues_for_node(self, node_id: str) -> list[ValidationIssue]:
|
674
674
|
"""Get all issues for a specific node."""
|
675
675
|
return [issue for issue in self.issues if issue.node_id == node_id]
|
676
676
|
|
677
|
-
def generate_report(self) ->
|
677
|
+
def generate_report(self) -> dict[str, Any]:
|
678
678
|
"""
|
679
679
|
Generate comprehensive validation report.
|
680
680
|
|
@@ -724,7 +724,7 @@ class CycleLinter:
|
|
724
724
|
"recommendations": self._generate_recommendations(),
|
725
725
|
}
|
726
726
|
|
727
|
-
def _generate_recommendations(self) ->
|
727
|
+
def _generate_recommendations(self) -> list[str]:
|
728
728
|
"""Generate high-level recommendations based on found issues."""
|
729
729
|
recommendations = []
|
730
730
|
|
@@ -4,7 +4,7 @@ import matplotlib
|
|
4
4
|
|
5
5
|
matplotlib.use("Agg") # Use non-interactive backend
|
6
6
|
from pathlib import Path
|
7
|
-
from typing import Any
|
7
|
+
from typing import Any
|
8
8
|
|
9
9
|
import matplotlib.pyplot as plt
|
10
10
|
import networkx as nx
|
@@ -19,8 +19,8 @@ class WorkflowVisualizer:
|
|
19
19
|
def __init__(
|
20
20
|
self,
|
21
21
|
workflow: Workflow,
|
22
|
-
node_colors:
|
23
|
-
edge_colors:
|
22
|
+
node_colors: dict[str, str] | None = None,
|
23
|
+
edge_colors: dict[str, str] | None = None,
|
24
24
|
layout: str = "hierarchical",
|
25
25
|
):
|
26
26
|
"""Initialize visualizer.
|
@@ -36,7 +36,7 @@ class WorkflowVisualizer:
|
|
36
36
|
self.edge_colors = edge_colors or self._default_edge_colors()
|
37
37
|
self.layout = layout
|
38
38
|
|
39
|
-
def _default_node_colors(self) ->
|
39
|
+
def _default_node_colors(self) -> dict[str, str]:
|
40
40
|
"""Get default node color map."""
|
41
41
|
return {
|
42
42
|
"data": "lightblue",
|
@@ -46,7 +46,7 @@ class WorkflowVisualizer:
|
|
46
46
|
"default": "lightgray",
|
47
47
|
}
|
48
48
|
|
49
|
-
def _default_edge_colors(self) ->
|
49
|
+
def _default_edge_colors(self) -> dict[str, str]:
|
50
50
|
"""Get default edge color map."""
|
51
51
|
return {"default": "gray", "error": "red", "conditional": "orange"}
|
52
52
|
|
@@ -66,7 +66,7 @@ class WorkflowVisualizer:
|
|
66
66
|
return self.node_colors["ai"]
|
67
67
|
return self.node_colors["default"]
|
68
68
|
|
69
|
-
def _get_node_colors(self) ->
|
69
|
+
def _get_node_colors(self) -> list[str]:
|
70
70
|
"""Get colors for all nodes in workflow."""
|
71
71
|
colors = []
|
72
72
|
for node_id in self.workflow.graph.nodes():
|
@@ -75,7 +75,7 @@ class WorkflowVisualizer:
|
|
75
75
|
colors.append(self._get_node_color(node_type))
|
76
76
|
return colors
|
77
77
|
|
78
|
-
def _get_node_labels(self) ->
|
78
|
+
def _get_node_labels(self) -> dict[str, str]:
|
79
79
|
"""Get labels for nodes in workflow."""
|
80
80
|
labels = {}
|
81
81
|
for node_id in self.workflow.graph.nodes():
|
@@ -93,7 +93,7 @@ class WorkflowVisualizer:
|
|
93
93
|
labels[node_id] = node_id
|
94
94
|
return labels
|
95
95
|
|
96
|
-
def _get_edge_labels(self) ->
|
96
|
+
def _get_edge_labels(self) -> dict[tuple[str, str], str]:
|
97
97
|
"""Get labels for edges in workflow."""
|
98
98
|
edge_labels = {}
|
99
99
|
|
@@ -119,7 +119,7 @@ class WorkflowVisualizer:
|
|
119
119
|
|
120
120
|
return edge_labels
|
121
121
|
|
122
|
-
def _calculate_layout(self) ->
|
122
|
+
def _calculate_layout(self) -> dict[str, tuple[float, float]]:
|
123
123
|
"""Calculate node positions for visualization."""
|
124
124
|
# Try to use stored positions first
|
125
125
|
pos = {}
|
@@ -148,7 +148,7 @@ class WorkflowVisualizer:
|
|
148
148
|
|
149
149
|
return pos
|
150
150
|
|
151
|
-
def _create_layers(self) ->
|
151
|
+
def _create_layers(self) -> dict[int, list]:
|
152
152
|
"""Create layers of nodes for hierarchical layout."""
|
153
153
|
layers = {}
|
154
154
|
remaining = set(self.workflow.graph.nodes())
|
@@ -173,8 +173,8 @@ class WorkflowVisualizer:
|
|
173
173
|
return layers
|
174
174
|
|
175
175
|
def _hierarchical_layout(
|
176
|
-
self, layers:
|
177
|
-
) ->
|
176
|
+
self, layers: dict[int, list]
|
177
|
+
) -> dict[str, tuple[float, float]]:
|
178
178
|
"""Create hierarchical layout from layers."""
|
179
179
|
pos = {}
|
180
180
|
layer_height = 2.0
|
@@ -196,8 +196,8 @@ class WorkflowVisualizer:
|
|
196
196
|
|
197
197
|
def _draw_graph(
|
198
198
|
self,
|
199
|
-
pos:
|
200
|
-
node_colors:
|
199
|
+
pos: dict[str, tuple[float, float]],
|
200
|
+
node_colors: list[str],
|
201
201
|
show_labels: bool,
|
202
202
|
show_connections: bool,
|
203
203
|
) -> None:
|
@@ -235,9 +235,9 @@ class WorkflowVisualizer:
|
|
235
235
|
|
236
236
|
def visualize(
|
237
237
|
self,
|
238
|
-
output_path:
|
239
|
-
figsize:
|
240
|
-
title:
|
238
|
+
output_path: str | None = None,
|
239
|
+
figsize: tuple[int, int] = (12, 8),
|
240
|
+
title: str | None = None,
|
241
241
|
show_labels: bool = True,
|
242
242
|
show_connections: bool = True,
|
243
243
|
dpi: int = 300,
|
@@ -303,7 +303,7 @@ class WorkflowVisualizer:
|
|
303
303
|
self.visualize(output_path=output_path, **kwargs)
|
304
304
|
|
305
305
|
def create_execution_graph(
|
306
|
-
self, run_id: str, task_manager: Any, output_path:
|
306
|
+
self, run_id: str, task_manager: Any, output_path: str | None = None
|
307
307
|
) -> str:
|
308
308
|
"""Create a Mermaid visualization showing execution status.
|
309
309
|
|
@@ -423,8 +423,8 @@ class WorkflowVisualizer:
|
|
423
423
|
return str(output_path)
|
424
424
|
|
425
425
|
def create_performance_dashboard(
|
426
|
-
self, run_id: str, task_manager: TaskManager, output_dir:
|
427
|
-
) ->
|
426
|
+
self, run_id: str, task_manager: TaskManager, output_dir: Path | None = None
|
427
|
+
) -> dict[str, Path]:
|
428
428
|
"""Create integrated performance dashboard with workflow visualization.
|
429
429
|
|
430
430
|
Args:
|
@@ -461,7 +461,7 @@ class WorkflowVisualizer:
|
|
461
461
|
return outputs
|
462
462
|
|
463
463
|
def _create_dashboard_html(
|
464
|
-
self, run_id: str, outputs:
|
464
|
+
self, run_id: str, outputs: dict[str, Path], dashboard_path: Path
|
465
465
|
) -> None:
|
466
466
|
"""Create HTML dashboard integrating all visualizations."""
|
467
467
|
html_content = f"""
|
@@ -578,7 +578,7 @@ class WorkflowVisualizer:
|
|
578
578
|
def add_visualization_to_workflow():
|
579
579
|
"""Add visualization method to Workflow class."""
|
580
580
|
|
581
|
-
def visualize(self, output_path:
|
581
|
+
def visualize(self, output_path: str | None = None, **kwargs) -> None:
|
582
582
|
"""Visualize the workflow.
|
583
583
|
|
584
584
|
Args:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: kailash
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: Python SDK for the Kailash container-node architecture
|
5
5
|
Home-page: https://github.com/integrum/kailash-python-sdk
|
6
6
|
Author: Integrum
|
@@ -109,7 +109,8 @@ Dynamic: requires-python
|
|
109
109
|
- 🔁 **Cyclic Workflows (v0.2.0)**: Universal Hybrid Cyclic Graph Architecture with 30,000+ iterations/second performance
|
110
110
|
- 🛠️ **Developer Tools**: CycleAnalyzer, CycleDebugger, CycleProfiler for production-ready cyclic workflows
|
111
111
|
- 📈 **High Performance**: Optimized execution engine supporting 100,000+ iteration workflows
|
112
|
-
- 📁 **
|
112
|
+
- 📁 **Complete Finance Workflow Library (v0.3.1)**: Production-ready financial workflows with AI analysis
|
113
|
+
- 💼 **Enterprise Workflow Patterns**: Credit risk, portfolio optimization, trading signals, fraud detection
|
113
114
|
|
114
115
|
## 🎯 Who Is This For?
|
115
116
|
|
@@ -197,6 +198,52 @@ from kailash.utils.export import export_workflow
|
|
197
198
|
export_workflow(workflow, "customer_analysis.yaml")
|
198
199
|
```
|
199
200
|
|
201
|
+
## 💼 Finance Workflow Library (New in v0.3.1)
|
202
|
+
|
203
|
+
Complete production-ready financial workflows using AI and modern quantitative methods:
|
204
|
+
|
205
|
+
### Credit Risk Assessment
|
206
|
+
|
207
|
+
```python
|
208
|
+
from kailash.workflow import Workflow
|
209
|
+
from kailash.nodes.data import CSVReaderNode
|
210
|
+
from kailash.nodes.code import PythonCodeNode
|
211
|
+
from kailash.nodes.ai import LLMAgentNode
|
212
|
+
|
213
|
+
def calculate_risk_metrics(customers, transactions):
|
214
|
+
"""Calculate comprehensive risk metrics."""
|
215
|
+
# Modern risk scoring with AI analysis
|
216
|
+
# 100+ lines of production risk calculation
|
217
|
+
return {"result": risk_scores}
|
218
|
+
|
219
|
+
workflow = Workflow("credit-risk", "Credit Risk Assessment")
|
220
|
+
workflow.add_node("customer_reader", CSVReaderNode())
|
221
|
+
workflow.add_node("risk_calculator", PythonCodeNode.from_function(func=calculate_risk_metrics))
|
222
|
+
workflow.add_node("ai_analyzer", LLMAgentNode(model="gpt-4",
|
223
|
+
system_prompt="You are a financial risk expert..."))
|
224
|
+
```
|
225
|
+
|
226
|
+
### Portfolio Optimization
|
227
|
+
|
228
|
+
```python
|
229
|
+
def optimize_portfolio(holdings, market_data, risk_profile="moderate"):
|
230
|
+
"""Modern Portfolio Theory optimization with rebalancing."""
|
231
|
+
# Sharpe ratio optimization, correlation analysis
|
232
|
+
# Risk-adjusted returns with AI market insights
|
233
|
+
return {"result": optimization_plan}
|
234
|
+
|
235
|
+
workflow = Workflow("portfolio-opt", "Portfolio Optimization")
|
236
|
+
workflow.add_node("optimizer", PythonCodeNode.from_function(func=optimize_portfolio))
|
237
|
+
# Generates rebalancing trades, risk metrics, AI market analysis
|
238
|
+
```
|
239
|
+
|
240
|
+
### Trading Signals & Fraud Detection
|
241
|
+
|
242
|
+
- **Trading Signals**: Technical indicators (RSI, MACD, Bollinger Bands) + AI sentiment
|
243
|
+
- **Fraud Detection**: Real-time transaction monitoring with velocity analysis
|
244
|
+
|
245
|
+
**See complete examples**: `sdk-users/workflows/by-industry/finance/`
|
246
|
+
|
200
247
|
## 📚 Documentation
|
201
248
|
|
202
249
|
### For SDK Users
|
@@ -204,9 +251,10 @@ export_workflow(workflow, "customer_analysis.yaml")
|
|
204
251
|
**Build solutions with the SDK:**
|
205
252
|
- `sdk-users/` - Everything you need to build with Kailash
|
206
253
|
- `developer/` - Node creation patterns and troubleshooting
|
207
|
-
- `workflows/` -
|
254
|
+
- `workflows/` - Complete production workflow library (v0.3.1)
|
255
|
+
- Finance workflows: Credit risk, portfolio optimization, trading signals, fraud detection
|
208
256
|
- Quick-start patterns (30-second workflows)
|
209
|
-
- Industry-specific solutions
|
257
|
+
- Industry-specific solutions by vertical
|
210
258
|
- Enterprise integration patterns
|
211
259
|
- `essentials/` - Quick reference and cheatsheets
|
212
260
|
- `nodes/` - Comprehensive node catalog (66+ nodes)
|
@@ -347,7 +395,7 @@ assert results["analyze"]["result"]["total_customers"] == len(test_data)
|
|
347
395
|
3. **Monitor in real-time**:
|
348
396
|
```python
|
349
397
|
from kailash.visualization import DashboardServer
|
350
|
-
|
398
|
+
|
351
399
|
server = DashboardServer(port=8080)
|
352
400
|
server.start()
|
353
401
|
# Open http://localhost:8080 for live monitoring
|