kailash 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/access_control.py +40 -39
- kailash/api/auth.py +26 -32
- kailash/api/custom_nodes.py +29 -29
- kailash/api/custom_nodes_secure.py +35 -35
- kailash/api/database.py +17 -17
- kailash/api/gateway.py +19 -19
- kailash/api/mcp_integration.py +24 -23
- kailash/api/studio.py +45 -45
- kailash/api/workflow_api.py +8 -8
- kailash/cli/commands.py +5 -8
- kailash/manifest.py +42 -42
- kailash/mcp/__init__.py +1 -1
- kailash/mcp/ai_registry_server.py +20 -20
- kailash/mcp/client.py +9 -11
- kailash/mcp/client_new.py +10 -10
- kailash/mcp/server.py +1 -2
- kailash/mcp/server_enhanced.py +449 -0
- kailash/mcp/servers/ai_registry.py +6 -6
- kailash/mcp/utils/__init__.py +31 -0
- kailash/mcp/utils/cache.py +267 -0
- kailash/mcp/utils/config.py +263 -0
- kailash/mcp/utils/formatters.py +293 -0
- kailash/mcp/utils/metrics.py +418 -0
- kailash/nodes/ai/agents.py +9 -9
- kailash/nodes/ai/ai_providers.py +33 -34
- kailash/nodes/ai/embedding_generator.py +31 -32
- kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
- kailash/nodes/ai/iterative_llm_agent.py +48 -48
- kailash/nodes/ai/llm_agent.py +32 -33
- kailash/nodes/ai/models.py +13 -13
- kailash/nodes/ai/self_organizing.py +44 -44
- kailash/nodes/api/__init__.py +5 -0
- kailash/nodes/api/auth.py +11 -11
- kailash/nodes/api/graphql.py +13 -13
- kailash/nodes/api/http.py +19 -19
- kailash/nodes/api/monitoring.py +463 -0
- kailash/nodes/api/rate_limiting.py +9 -13
- kailash/nodes/api/rest.py +29 -29
- kailash/nodes/api/security.py +819 -0
- kailash/nodes/base.py +24 -26
- kailash/nodes/base_async.py +7 -7
- kailash/nodes/base_cycle_aware.py +12 -12
- kailash/nodes/base_with_acl.py +5 -5
- kailash/nodes/code/python.py +56 -55
- kailash/nodes/data/__init__.py +6 -0
- kailash/nodes/data/directory.py +6 -6
- kailash/nodes/data/event_generation.py +297 -0
- kailash/nodes/data/file_discovery.py +598 -0
- kailash/nodes/data/readers.py +8 -8
- kailash/nodes/data/retrieval.py +10 -10
- kailash/nodes/data/sharepoint_graph.py +17 -17
- kailash/nodes/data/sources.py +5 -5
- kailash/nodes/data/sql.py +13 -13
- kailash/nodes/data/streaming.py +25 -25
- kailash/nodes/data/vector_db.py +22 -22
- kailash/nodes/data/writers.py +7 -7
- kailash/nodes/logic/async_operations.py +17 -17
- kailash/nodes/logic/convergence.py +11 -11
- kailash/nodes/logic/loop.py +4 -4
- kailash/nodes/logic/operations.py +11 -11
- kailash/nodes/logic/workflow.py +8 -9
- kailash/nodes/mixins/mcp.py +17 -17
- kailash/nodes/mixins.py +8 -10
- kailash/nodes/transform/chunkers.py +3 -3
- kailash/nodes/transform/formatters.py +7 -7
- kailash/nodes/transform/processors.py +11 -11
- kailash/runtime/access_controlled.py +18 -18
- kailash/runtime/async_local.py +18 -20
- kailash/runtime/docker.py +24 -26
- kailash/runtime/local.py +55 -31
- kailash/runtime/parallel.py +25 -25
- kailash/runtime/parallel_cyclic.py +29 -29
- kailash/runtime/runner.py +6 -6
- kailash/runtime/testing.py +22 -22
- kailash/sdk_exceptions.py +0 -58
- kailash/security.py +14 -26
- kailash/tracking/manager.py +38 -38
- kailash/tracking/metrics_collector.py +15 -14
- kailash/tracking/models.py +53 -53
- kailash/tracking/storage/base.py +7 -17
- kailash/tracking/storage/database.py +22 -23
- kailash/tracking/storage/filesystem.py +38 -40
- kailash/utils/export.py +21 -21
- kailash/utils/templates.py +8 -9
- kailash/visualization/api.py +30 -34
- kailash/visualization/dashboard.py +17 -17
- kailash/visualization/performance.py +32 -19
- kailash/visualization/reports.py +30 -28
- kailash/workflow/builder.py +8 -8
- kailash/workflow/convergence.py +13 -12
- kailash/workflow/cycle_analyzer.py +38 -33
- kailash/workflow/cycle_builder.py +12 -12
- kailash/workflow/cycle_config.py +16 -15
- kailash/workflow/cycle_debugger.py +40 -40
- kailash/workflow/cycle_exceptions.py +29 -29
- kailash/workflow/cycle_profiler.py +21 -21
- kailash/workflow/cycle_state.py +20 -22
- kailash/workflow/cyclic_runner.py +45 -45
- kailash/workflow/graph.py +57 -45
- kailash/workflow/mermaid_visualizer.py +9 -11
- kailash/workflow/migration.py +22 -22
- kailash/workflow/mock_registry.py +6 -6
- kailash/workflow/runner.py +9 -9
- kailash/workflow/safety.py +12 -13
- kailash/workflow/state.py +8 -11
- kailash/workflow/templates.py +19 -19
- kailash/workflow/validation.py +14 -14
- kailash/workflow/visualization.py +32 -24
- kailash-0.3.1.dist-info/METADATA +476 -0
- kailash-0.3.1.dist-info/RECORD +136 -0
- kailash-0.2.2.dist-info/METADATA +0 -121
- kailash-0.2.2.dist-info/RECORD +0 -126
- {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/WHEEL +0 -0
- {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/entry_points.txt +0 -0
- {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/top_level.txt +0 -0
kailash/tracking/manager.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
"""Task manager for workflow execution tracking."""
|
2
2
|
|
3
3
|
import logging
|
4
|
-
from datetime import datetime, timedelta
|
5
|
-
from typing import Any
|
4
|
+
from datetime import UTC, datetime, timedelta
|
5
|
+
from typing import Any
|
6
6
|
|
7
7
|
from kailash.sdk_exceptions import StorageException, TaskException, TaskStateError
|
8
8
|
|
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
|
23
23
|
class TaskManager:
|
24
24
|
"""Manages task tracking for workflow executions."""
|
25
25
|
|
26
|
-
def __init__(self, storage_backend:
|
26
|
+
def __init__(self, storage_backend: StorageBackend | None = None):
|
27
27
|
"""Initialize task manager.
|
28
28
|
|
29
29
|
Args:
|
@@ -37,13 +37,13 @@ class TaskManager:
|
|
37
37
|
self.logger = logger
|
38
38
|
|
39
39
|
# In-memory caches
|
40
|
-
self._runs:
|
41
|
-
self._tasks:
|
40
|
+
self._runs: dict[str, WorkflowRun] = {}
|
41
|
+
self._tasks: dict[str, TaskRun] = {}
|
42
42
|
except Exception as e:
|
43
43
|
raise TaskException(f"Failed to initialize task manager: {e}") from e
|
44
44
|
|
45
45
|
def create_run(
|
46
|
-
self, workflow_name: str, metadata:
|
46
|
+
self, workflow_name: str, metadata: dict[str, Any] | None = None
|
47
47
|
) -> str:
|
48
48
|
"""Create a new workflow run.
|
49
49
|
|
@@ -80,7 +80,7 @@ class TaskManager:
|
|
80
80
|
return run.run_id
|
81
81
|
|
82
82
|
def update_run_status(
|
83
|
-
self, run_id: str, status: str, error:
|
83
|
+
self, run_id: str, status: str, error: str | None = None
|
84
84
|
) -> None:
|
85
85
|
"""Update workflow run status.
|
86
86
|
|
@@ -129,12 +129,12 @@ class TaskManager:
|
|
129
129
|
def create_task(
|
130
130
|
self,
|
131
131
|
node_id: str,
|
132
|
-
input_data:
|
133
|
-
metadata:
|
132
|
+
input_data: dict[str, Any] | None = None,
|
133
|
+
metadata: dict[str, Any] | None = None,
|
134
134
|
run_id: str = "test-run-id",
|
135
135
|
node_type: str = "default-node-type",
|
136
|
-
dependencies:
|
137
|
-
started_at:
|
136
|
+
dependencies: list[str] | None = None,
|
137
|
+
started_at: datetime | None = None,
|
138
138
|
) -> TaskRun:
|
139
139
|
"""Create a new task.
|
140
140
|
|
@@ -197,10 +197,10 @@ class TaskManager:
|
|
197
197
|
self,
|
198
198
|
task_id: str,
|
199
199
|
status: TaskStatus,
|
200
|
-
result:
|
201
|
-
error:
|
202
|
-
ended_at:
|
203
|
-
metadata:
|
200
|
+
result: dict[str, Any] | None = None,
|
201
|
+
error: str | None = None,
|
202
|
+
ended_at: datetime | None = None,
|
203
|
+
metadata: dict[str, Any] | None = None,
|
204
204
|
) -> None:
|
205
205
|
"""Update task status.
|
206
206
|
|
@@ -249,7 +249,7 @@ class TaskManager:
|
|
249
249
|
|
250
250
|
self.logger.info(f"Updated task {task_id} status to: {status}")
|
251
251
|
|
252
|
-
def get_run(self, run_id: str) ->
|
252
|
+
def get_run(self, run_id: str) -> WorkflowRun | None:
|
253
253
|
"""Get workflow run by ID.
|
254
254
|
|
255
255
|
Args:
|
@@ -276,7 +276,7 @@ class TaskManager:
|
|
276
276
|
self._runs[run_id] = run
|
277
277
|
return run
|
278
278
|
|
279
|
-
def get_task(self, task_id: str) ->
|
279
|
+
def get_task(self, task_id: str) -> TaskRun | None:
|
280
280
|
"""Get task by ID.
|
281
281
|
|
282
282
|
Args:
|
@@ -304,8 +304,8 @@ class TaskManager:
|
|
304
304
|
return task
|
305
305
|
|
306
306
|
def list_runs(
|
307
|
-
self, workflow_name:
|
308
|
-
) ->
|
307
|
+
self, workflow_name: str | None = None, status: str | None = None
|
308
|
+
) -> list[RunSummary]:
|
309
309
|
"""List workflow runs.
|
310
310
|
|
311
311
|
Args:
|
@@ -353,9 +353,9 @@ class TaskManager:
|
|
353
353
|
def list_tasks(
|
354
354
|
self,
|
355
355
|
run_id: str,
|
356
|
-
node_id:
|
357
|
-
status:
|
358
|
-
) ->
|
356
|
+
node_id: str | None = None,
|
357
|
+
status: TaskStatus | None = None,
|
358
|
+
) -> list[TaskSummary]:
|
359
359
|
"""List tasks for a run.
|
360
360
|
|
361
361
|
Args:
|
@@ -392,7 +392,7 @@ class TaskManager:
|
|
392
392
|
|
393
393
|
return summaries
|
394
394
|
|
395
|
-
def get_run_summary(self, run_id: str) ->
|
395
|
+
def get_run_summary(self, run_id: str) -> RunSummary | None:
|
396
396
|
"""Get summary for a specific run.
|
397
397
|
|
398
398
|
Args:
|
@@ -441,7 +441,7 @@ class TaskManager:
|
|
441
441
|
self.logger.info("Cleared task manager cache")
|
442
442
|
|
443
443
|
def complete_task(
|
444
|
-
self, task_id: str, output_data:
|
444
|
+
self, task_id: str, output_data: dict[str, Any] | None = None
|
445
445
|
) -> None:
|
446
446
|
"""Complete a task successfully.
|
447
447
|
|
@@ -567,7 +567,7 @@ class TaskManager:
|
|
567
567
|
|
568
568
|
self.logger.info(f"Deleted task {task_id}")
|
569
569
|
|
570
|
-
def get_tasks_by_status(self, status: TaskStatus) ->
|
570
|
+
def get_tasks_by_status(self, status: TaskStatus) -> list[TaskRun]:
|
571
571
|
"""Get tasks by status.
|
572
572
|
|
573
573
|
Args:
|
@@ -588,7 +588,7 @@ class TaskManager:
|
|
588
588
|
except Exception as e:
|
589
589
|
raise StorageException(f"Failed to query tasks by status: {e}") from e
|
590
590
|
|
591
|
-
def get_tasks_by_node(self, node_id: str) ->
|
591
|
+
def get_tasks_by_node(self, node_id: str) -> list[TaskRun]:
|
592
592
|
"""Get tasks by node ID.
|
593
593
|
|
594
594
|
Args:
|
@@ -609,7 +609,7 @@ class TaskManager:
|
|
609
609
|
except Exception as e:
|
610
610
|
raise StorageException(f"Failed to query tasks by node: {e}") from e
|
611
611
|
|
612
|
-
def get_task_history(self, task_id: str) ->
|
612
|
+
def get_task_history(self, task_id: str) -> list[TaskRun]:
|
613
613
|
"""Get task history (original task and all retries).
|
614
614
|
|
615
615
|
Args:
|
@@ -658,7 +658,7 @@ class TaskManager:
|
|
658
658
|
|
659
659
|
def get_tasks_by_timerange(
|
660
660
|
self, start_time: datetime, end_time: datetime
|
661
|
-
) ->
|
661
|
+
) -> list[TaskRun]:
|
662
662
|
"""Get tasks created between start_time and end_time.
|
663
663
|
|
664
664
|
Args:
|
@@ -683,15 +683,15 @@ class TaskManager:
|
|
683
683
|
# Ensure timezone-aware comparison
|
684
684
|
task_created_at = t.created_at
|
685
685
|
if task_created_at and task_created_at.tzinfo is None:
|
686
|
-
task_created_at = task_created_at.replace(tzinfo=
|
686
|
+
task_created_at = task_created_at.replace(tzinfo=UTC)
|
687
687
|
|
688
688
|
start_aware = start_time
|
689
689
|
if start_aware.tzinfo is None:
|
690
|
-
start_aware = start_aware.replace(tzinfo=
|
690
|
+
start_aware = start_aware.replace(tzinfo=UTC)
|
691
691
|
|
692
692
|
end_aware = end_time
|
693
693
|
if end_aware.tzinfo is None:
|
694
|
-
end_aware = end_aware.replace(tzinfo=
|
694
|
+
end_aware = end_aware.replace(tzinfo=UTC)
|
695
695
|
|
696
696
|
if (
|
697
697
|
task_created_at
|
@@ -703,7 +703,7 @@ class TaskManager:
|
|
703
703
|
except Exception as e:
|
704
704
|
raise StorageException(f"Failed to query tasks by timerange: {e}") from e
|
705
705
|
|
706
|
-
def get_task_statistics(self) ->
|
706
|
+
def get_task_statistics(self) -> dict[str, Any]:
|
707
707
|
"""Get task statistics.
|
708
708
|
|
709
709
|
Returns:
|
@@ -751,7 +751,7 @@ class TaskManager:
|
|
751
751
|
except Exception as e:
|
752
752
|
raise StorageException(f"Failed to get tasks for cleanup: {e}") from e
|
753
753
|
|
754
|
-
cutoff = datetime.now(
|
754
|
+
cutoff = datetime.now(UTC) - timedelta(days=days)
|
755
755
|
deleted = 0
|
756
756
|
|
757
757
|
for task in tasks:
|
@@ -759,7 +759,7 @@ class TaskManager:
|
|
759
759
|
# Ensure timezone-aware comparison
|
760
760
|
task_created_at = task.created_at
|
761
761
|
if task_created_at.tzinfo is None:
|
762
|
-
task_created_at = task_created_at.replace(tzinfo=
|
762
|
+
task_created_at = task_created_at.replace(tzinfo=UTC)
|
763
763
|
|
764
764
|
if task_created_at < cutoff:
|
765
765
|
try:
|
@@ -796,7 +796,7 @@ class TaskManager:
|
|
796
796
|
|
797
797
|
self.logger.info(f"Updated metrics for task {task_id}")
|
798
798
|
|
799
|
-
def get_running_tasks(self) ->
|
799
|
+
def get_running_tasks(self) -> list[TaskRun]:
|
800
800
|
"""Get all currently running tasks.
|
801
801
|
|
802
802
|
Returns:
|
@@ -807,7 +807,7 @@ class TaskManager:
|
|
807
807
|
"""
|
808
808
|
return self.get_tasks_by_status(TaskStatus.RUNNING)
|
809
809
|
|
810
|
-
def get_task_dependencies(self, task_id: str) ->
|
810
|
+
def get_task_dependencies(self, task_id: str) -> list[TaskRun]:
|
811
811
|
"""Get tasks that are dependencies for the given task.
|
812
812
|
|
813
813
|
Args:
|
@@ -861,7 +861,7 @@ class TaskManager:
|
|
861
861
|
except Exception as e:
|
862
862
|
raise StorageException(f"Failed to save task: {e}") from e
|
863
863
|
|
864
|
-
def get_run_tasks(self, run_id: str) ->
|
864
|
+
def get_run_tasks(self, run_id: str) -> list[TaskRun]:
|
865
865
|
"""Get all tasks for a specific run.
|
866
866
|
|
867
867
|
Args:
|
@@ -882,7 +882,7 @@ class TaskManager:
|
|
882
882
|
|
883
883
|
return tasks
|
884
884
|
|
885
|
-
def get_workflow_tasks(self, workflow_id: str) ->
|
885
|
+
def get_workflow_tasks(self, workflow_id: str) -> list[TaskRun]:
|
886
886
|
"""Get all tasks for a workflow.
|
887
887
|
|
888
888
|
This is a compatibility method that returns all tasks across all runs for a workflow.
|
@@ -20,9 +20,10 @@ Downstream Consumers:
|
|
20
20
|
import asyncio
|
21
21
|
import threading
|
22
22
|
import time
|
23
|
+
from collections.abc import Callable
|
23
24
|
from contextlib import contextmanager
|
24
25
|
from dataclasses import dataclass, field
|
25
|
-
from typing import Any
|
26
|
+
from typing import Any
|
26
27
|
|
27
28
|
try:
|
28
29
|
import psutil
|
@@ -60,9 +61,9 @@ class PerformanceMetrics:
|
|
60
61
|
io_write_count: int = 0
|
61
62
|
thread_count: int = 1
|
62
63
|
context_switches: int = 0
|
63
|
-
custom:
|
64
|
+
custom: dict[str, Any] = field(default_factory=dict)
|
64
65
|
|
65
|
-
def to_task_metrics(self) ->
|
66
|
+
def to_task_metrics(self) -> dict[str, Any]:
|
66
67
|
"""Convert to TaskMetrics compatible format."""
|
67
68
|
return {
|
68
69
|
"duration": self.duration,
|
@@ -114,7 +115,7 @@ class MetricsCollector:
|
|
114
115
|
)
|
115
116
|
|
116
117
|
@contextmanager
|
117
|
-
def collect(self, node_id:
|
118
|
+
def collect(self, node_id: str | None = None):
|
118
119
|
"""Context manager for collecting metrics during execution.
|
119
120
|
|
120
121
|
Args:
|
@@ -135,7 +136,7 @@ class MetricsCollector:
|
|
135
136
|
finally:
|
136
137
|
context.stop()
|
137
138
|
|
138
|
-
async def collect_async(self, coro, node_id:
|
139
|
+
async def collect_async(self, coro, node_id: str | None = None):
|
139
140
|
"""Collect metrics for async execution.
|
140
141
|
|
141
142
|
Args:
|
@@ -163,20 +164,20 @@ class MetricsContext:
|
|
163
164
|
"""Context for collecting metrics during a specific execution."""
|
164
165
|
|
165
166
|
def __init__(
|
166
|
-
self, node_id:
|
167
|
+
self, node_id: str | None, sampling_interval: float, monitoring_enabled: bool
|
167
168
|
):
|
168
169
|
self.node_id = node_id
|
169
170
|
self.sampling_interval = sampling_interval
|
170
171
|
self.monitoring_enabled = monitoring_enabled
|
171
172
|
|
172
|
-
self.start_time:
|
173
|
-
self.end_time:
|
174
|
-
self.process:
|
175
|
-
self.initial_io:
|
176
|
-
self.initial_memory:
|
173
|
+
self.start_time: float | None = None
|
174
|
+
self.end_time: float | None = None
|
175
|
+
self.process: Any | None = None
|
176
|
+
self.initial_io: Any | None = None
|
177
|
+
self.initial_memory: float | None = None
|
177
178
|
self.peak_memory: float = 0.0
|
178
179
|
self.cpu_samples: list = []
|
179
|
-
self.monitoring_thread:
|
180
|
+
self.monitoring_thread: threading.Thread | None = None
|
180
181
|
self._stop_monitoring = threading.Event()
|
181
182
|
|
182
183
|
def start(self):
|
@@ -293,7 +294,7 @@ class MetricsContext:
|
|
293
294
|
self._custom_metrics = {}
|
294
295
|
self._custom_metrics[name] = value
|
295
296
|
|
296
|
-
def get_custom_metrics(self) ->
|
297
|
+
def get_custom_metrics(self) -> dict[str, Any]:
|
297
298
|
"""Get custom metrics."""
|
298
299
|
return getattr(self, "_custom_metrics", {})
|
299
300
|
|
@@ -302,7 +303,7 @@ class MetricsContext:
|
|
302
303
|
default_collector = MetricsCollector()
|
303
304
|
|
304
305
|
|
305
|
-
def collect_metrics(func:
|
306
|
+
def collect_metrics(func: Callable | None = None, *, node_id: str | None = None):
|
306
307
|
"""Decorator for collecting metrics on function execution.
|
307
308
|
|
308
309
|
Can be used as @collect_metrics or @collect_metrics(node_id="my_node")
|
kailash/tracking/models.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
"""Data models for task tracking."""
|
2
2
|
|
3
|
-
from datetime import
|
3
|
+
from datetime import UTC, datetime
|
4
4
|
from enum import Enum
|
5
|
-
from typing import Any
|
5
|
+
from typing import Any
|
6
6
|
from uuid import uuid4
|
7
7
|
|
8
8
|
from pydantic import BaseModel, Field, field_validator
|
@@ -14,11 +14,11 @@ from kailash.sdk_exceptions import KailashValidationError, TaskException, TaskSt
|
|
14
14
|
class TaskMetrics(BaseModel):
|
15
15
|
"""Metrics for task execution."""
|
16
16
|
|
17
|
-
duration:
|
18
|
-
memory_usage:
|
19
|
-
memory_usage_mb:
|
20
|
-
cpu_usage:
|
21
|
-
custom_metrics:
|
17
|
+
duration: float | None = 0.0
|
18
|
+
memory_usage: float | None = 0.0 # Legacy field name
|
19
|
+
memory_usage_mb: float | None = 0.0 # New field name
|
20
|
+
cpu_usage: float | None = 0.0
|
21
|
+
custom_metrics: dict[str, Any] = Field(default_factory=dict)
|
22
22
|
|
23
23
|
def __init__(self, **data):
|
24
24
|
"""Initialize metrics with unified memory field handling."""
|
@@ -37,12 +37,12 @@ class TaskMetrics(BaseModel):
|
|
37
37
|
raise ValueError("Metric values must be non-negative")
|
38
38
|
return v
|
39
39
|
|
40
|
-
def to_dict(self) ->
|
40
|
+
def to_dict(self) -> dict[str, Any]:
|
41
41
|
"""Convert metrics to dictionary representation."""
|
42
42
|
return self.model_dump()
|
43
43
|
|
44
44
|
@classmethod
|
45
|
-
def from_dict(cls, data:
|
45
|
+
def from_dict(cls, data: dict[str, Any]) -> "TaskMetrics":
|
46
46
|
"""Create metrics from dictionary representation."""
|
47
47
|
return cls.model_validate(data)
|
48
48
|
|
@@ -86,20 +86,20 @@ class TaskRun(BaseModel):
|
|
86
86
|
default="default-node-type", description="Type of node"
|
87
87
|
) # Default for backward compatibility
|
88
88
|
status: TaskStatus = Field(default=TaskStatus.PENDING)
|
89
|
-
started_at:
|
90
|
-
ended_at:
|
91
|
-
completed_at:
|
89
|
+
started_at: datetime | None = None
|
90
|
+
ended_at: datetime | None = None
|
91
|
+
completed_at: datetime | None = (
|
92
92
|
None # Alias for ended_at for backward compatibility
|
93
93
|
)
|
94
|
-
created_at: datetime = Field(default_factory=lambda: datetime.now(
|
95
|
-
result:
|
96
|
-
error:
|
97
|
-
metadata:
|
98
|
-
input_data:
|
99
|
-
output_data:
|
100
|
-
metrics:
|
101
|
-
dependencies:
|
102
|
-
parent_task_id:
|
94
|
+
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
|
95
|
+
result: dict[str, Any] | None = None
|
96
|
+
error: str | None = None
|
97
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
98
|
+
input_data: dict[str, Any] | None = None
|
99
|
+
output_data: dict[str, Any] | None = None
|
100
|
+
metrics: TaskMetrics | None = None # For storing task metrics
|
101
|
+
dependencies: list[str] = Field(default_factory=list)
|
102
|
+
parent_task_id: str | None = None
|
103
103
|
retry_count: int = 0
|
104
104
|
|
105
105
|
@field_validator("run_id", "node_id", "node_type")
|
@@ -134,26 +134,26 @@ class TaskRun(BaseModel):
|
|
134
134
|
def start(self) -> None:
|
135
135
|
"""Start the task."""
|
136
136
|
self.update_status(TaskStatus.RUNNING)
|
137
|
-
self.started_at = datetime.now(
|
137
|
+
self.started_at = datetime.now(UTC)
|
138
138
|
|
139
|
-
def complete(self, output_data:
|
139
|
+
def complete(self, output_data: dict[str, Any] | None = None) -> None:
|
140
140
|
"""Complete the task successfully."""
|
141
141
|
if output_data is not None:
|
142
142
|
self.output_data = output_data
|
143
143
|
self.update_status(TaskStatus.COMPLETED)
|
144
|
-
self.completed_at = datetime.now(
|
144
|
+
self.completed_at = datetime.now(UTC)
|
145
145
|
|
146
146
|
def fail(self, error_message: str) -> None:
|
147
147
|
"""Mark the task as failed."""
|
148
148
|
self.error = error_message
|
149
149
|
self.update_status(TaskStatus.FAILED)
|
150
|
-
self.completed_at = datetime.now(
|
150
|
+
self.completed_at = datetime.now(UTC)
|
151
151
|
|
152
152
|
def cancel(self, reason: str) -> None:
|
153
153
|
"""Cancel the task."""
|
154
154
|
self.error = reason
|
155
155
|
self.update_status(TaskStatus.CANCELLED)
|
156
|
-
self.completed_at = datetime.now(
|
156
|
+
self.completed_at = datetime.now(UTC)
|
157
157
|
|
158
158
|
def create_retry(self) -> "TaskRun":
|
159
159
|
"""Create a new task as a retry of this task."""
|
@@ -171,7 +171,7 @@ class TaskRun(BaseModel):
|
|
171
171
|
return retry_task
|
172
172
|
|
173
173
|
@property
|
174
|
-
def duration(self) ->
|
174
|
+
def duration(self) -> float | None:
|
175
175
|
"""Get task duration in seconds."""
|
176
176
|
if self.started_at and self.ended_at:
|
177
177
|
return (self.ended_at - self.started_at).total_seconds()
|
@@ -204,7 +204,7 @@ class TaskRun(BaseModel):
|
|
204
204
|
# Check other validation rules as needed
|
205
205
|
|
206
206
|
@classmethod
|
207
|
-
def from_dict(cls, data:
|
207
|
+
def from_dict(cls, data: dict[str, Any]) -> "TaskRun":
|
208
208
|
"""Create from dictionary representation."""
|
209
209
|
# Make a copy to avoid modifying the original
|
210
210
|
data_copy = data.copy()
|
@@ -234,10 +234,10 @@ class TaskRun(BaseModel):
|
|
234
234
|
def update_status(
|
235
235
|
self,
|
236
236
|
status: TaskStatus,
|
237
|
-
result:
|
238
|
-
error:
|
239
|
-
ended_at:
|
240
|
-
metadata:
|
237
|
+
result: dict[str, Any] | None = None,
|
238
|
+
error: str | None = None,
|
239
|
+
ended_at: datetime | None = None,
|
240
|
+
metadata: dict[str, Any] | None = None,
|
241
241
|
) -> None:
|
242
242
|
"""Update task status.
|
243
243
|
|
@@ -275,15 +275,15 @@ class TaskRun(BaseModel):
|
|
275
275
|
if ended_at is not None:
|
276
276
|
self.ended_at = ended_at
|
277
277
|
elif status in [TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.SKIPPED]:
|
278
|
-
self.ended_at = datetime.now(
|
278
|
+
self.ended_at = datetime.now(UTC)
|
279
279
|
|
280
280
|
if status == TaskStatus.RUNNING and self.started_at is None:
|
281
|
-
self.started_at = datetime.now(
|
281
|
+
self.started_at = datetime.now(UTC)
|
282
282
|
|
283
283
|
if metadata is not None:
|
284
284
|
self.metadata.update(metadata)
|
285
285
|
|
286
|
-
def get_duration(self) ->
|
286
|
+
def get_duration(self) -> float | None:
|
287
287
|
"""Get task duration in seconds.
|
288
288
|
|
289
289
|
Returns:
|
@@ -293,7 +293,7 @@ class TaskRun(BaseModel):
|
|
293
293
|
return (self.ended_at - self.started_at).total_seconds()
|
294
294
|
return None
|
295
295
|
|
296
|
-
def to_dict(self) ->
|
296
|
+
def to_dict(self) -> dict[str, Any]:
|
297
297
|
"""Convert to dictionary representation."""
|
298
298
|
try:
|
299
299
|
data = self.model_dump()
|
@@ -335,11 +335,11 @@ class WorkflowRun(BaseModel):
|
|
335
335
|
run_id: str = Field(default_factory=lambda: str(uuid4()))
|
336
336
|
workflow_name: str = Field(..., description="Name of the workflow")
|
337
337
|
status: str = Field(default="running", description="Run status")
|
338
|
-
started_at: datetime = Field(default_factory=lambda: datetime.now(
|
339
|
-
ended_at:
|
340
|
-
tasks:
|
341
|
-
metadata:
|
342
|
-
error:
|
338
|
+
started_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
|
339
|
+
ended_at: datetime | None = None
|
340
|
+
tasks: list[str] = Field(default_factory=list, description="Task IDs")
|
341
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
342
|
+
error: str | None = None
|
343
343
|
|
344
344
|
@field_validator("workflow_name")
|
345
345
|
@classmethod
|
@@ -360,7 +360,7 @@ class WorkflowRun(BaseModel):
|
|
360
360
|
)
|
361
361
|
return v
|
362
362
|
|
363
|
-
def update_status(self, status: str, error:
|
363
|
+
def update_status(self, status: str, error: str | None = None) -> None:
|
364
364
|
"""Update run status.
|
365
365
|
|
366
366
|
Args:
|
@@ -387,7 +387,7 @@ class WorkflowRun(BaseModel):
|
|
387
387
|
self.error = error
|
388
388
|
|
389
389
|
if status in ["completed", "failed"] and self.ended_at is None:
|
390
|
-
self.ended_at = datetime.now(
|
390
|
+
self.ended_at = datetime.now(UTC)
|
391
391
|
|
392
392
|
def add_task(self, task_id: str) -> None:
|
393
393
|
"""Add a task to this run.
|
@@ -404,7 +404,7 @@ class WorkflowRun(BaseModel):
|
|
404
404
|
if task_id not in self.tasks:
|
405
405
|
self.tasks.append(task_id)
|
406
406
|
|
407
|
-
def get_duration(self) ->
|
407
|
+
def get_duration(self) -> float | None:
|
408
408
|
"""Get run duration in seconds.
|
409
409
|
|
410
410
|
Returns:
|
@@ -414,7 +414,7 @@ class WorkflowRun(BaseModel):
|
|
414
414
|
return (self.ended_at - self.started_at).total_seconds()
|
415
415
|
return None
|
416
416
|
|
417
|
-
def to_dict(self) ->
|
417
|
+
def to_dict(self) -> dict[str, Any]:
|
418
418
|
"""Convert to dictionary representation."""
|
419
419
|
try:
|
420
420
|
data = self.model_dump()
|
@@ -434,10 +434,10 @@ class TaskSummary(BaseModel):
|
|
434
434
|
node_id: str
|
435
435
|
node_type: str
|
436
436
|
status: TaskStatus
|
437
|
-
duration:
|
438
|
-
started_at:
|
439
|
-
ended_at:
|
440
|
-
error:
|
437
|
+
duration: float | None = None
|
438
|
+
started_at: str | None = None
|
439
|
+
ended_at: str | None = None
|
440
|
+
error: str | None = None
|
441
441
|
|
442
442
|
@classmethod
|
443
443
|
def from_task_run(cls, task: TaskRun) -> "TaskSummary":
|
@@ -473,16 +473,16 @@ class RunSummary(BaseModel):
|
|
473
473
|
run_id: str
|
474
474
|
workflow_name: str
|
475
475
|
status: str
|
476
|
-
duration:
|
476
|
+
duration: float | None = None
|
477
477
|
started_at: str
|
478
|
-
ended_at:
|
478
|
+
ended_at: str | None = None
|
479
479
|
task_count: int = 0
|
480
480
|
completed_tasks: int = 0
|
481
481
|
failed_tasks: int = 0
|
482
|
-
error:
|
482
|
+
error: str | None = None
|
483
483
|
|
484
484
|
@classmethod
|
485
|
-
def from_workflow_run(cls, run: WorkflowRun, tasks:
|
485
|
+
def from_workflow_run(cls, run: WorkflowRun, tasks: list[TaskRun]) -> "RunSummary":
|
486
486
|
"""Create summary from a WorkflowRun and its tasks.
|
487
487
|
|
488
488
|
Args:
|
kailash/tracking/storage/base.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
"""Abstract base class for storage backends."""
|
2
2
|
|
3
3
|
from abc import ABC, abstractmethod
|
4
|
-
from typing import List, Optional
|
5
4
|
|
6
5
|
from ..models import TaskRun, TaskStatus, WorkflowRun
|
7
6
|
|
@@ -16,10 +15,9 @@ class StorageBackend(ABC):
|
|
16
15
|
Args:
|
17
16
|
run: WorkflowRun to save
|
18
17
|
"""
|
19
|
-
pass
|
20
18
|
|
21
19
|
@abstractmethod
|
22
|
-
def load_run(self, run_id: str) ->
|
20
|
+
def load_run(self, run_id: str) -> WorkflowRun | None:
|
23
21
|
"""Load a workflow run by ID.
|
24
22
|
|
25
23
|
Args:
|
@@ -28,12 +26,11 @@ class StorageBackend(ABC):
|
|
28
26
|
Returns:
|
29
27
|
WorkflowRun or None if not found
|
30
28
|
"""
|
31
|
-
pass
|
32
29
|
|
33
30
|
@abstractmethod
|
34
31
|
def list_runs(
|
35
|
-
self, workflow_name:
|
36
|
-
) ->
|
32
|
+
self, workflow_name: str | None = None, status: str | None = None
|
33
|
+
) -> list[WorkflowRun]:
|
37
34
|
"""List workflow runs.
|
38
35
|
|
39
36
|
Args:
|
@@ -43,7 +40,6 @@ class StorageBackend(ABC):
|
|
43
40
|
Returns:
|
44
41
|
List of WorkflowRun instances
|
45
42
|
"""
|
46
|
-
pass
|
47
43
|
|
48
44
|
@abstractmethod
|
49
45
|
def save_task(self, task: TaskRun) -> None:
|
@@ -52,10 +48,9 @@ class StorageBackend(ABC):
|
|
52
48
|
Args:
|
53
49
|
task: TaskRun to save
|
54
50
|
"""
|
55
|
-
pass
|
56
51
|
|
57
52
|
@abstractmethod
|
58
|
-
def load_task(self, task_id: str) ->
|
53
|
+
def load_task(self, task_id: str) -> TaskRun | None:
|
59
54
|
"""Load a task by ID.
|
60
55
|
|
61
56
|
Args:
|
@@ -64,15 +59,14 @@ class StorageBackend(ABC):
|
|
64
59
|
Returns:
|
65
60
|
TaskRun or None if not found
|
66
61
|
"""
|
67
|
-
pass
|
68
62
|
|
69
63
|
@abstractmethod
|
70
64
|
def list_tasks(
|
71
65
|
self,
|
72
66
|
run_id: str,
|
73
|
-
node_id:
|
74
|
-
status:
|
75
|
-
) ->
|
67
|
+
node_id: str | None = None,
|
68
|
+
status: TaskStatus | None = None,
|
69
|
+
) -> list[TaskRun]:
|
76
70
|
"""List tasks for a run.
|
77
71
|
|
78
72
|
Args:
|
@@ -83,12 +77,10 @@ class StorageBackend(ABC):
|
|
83
77
|
Returns:
|
84
78
|
List of TaskRun instances
|
85
79
|
"""
|
86
|
-
pass
|
87
80
|
|
88
81
|
@abstractmethod
|
89
82
|
def clear(self) -> None:
|
90
83
|
"""Clear all stored data."""
|
91
|
-
pass
|
92
84
|
|
93
85
|
@abstractmethod
|
94
86
|
def export_run(self, run_id: str, output_path: str) -> None:
|
@@ -98,7 +90,6 @@ class StorageBackend(ABC):
|
|
98
90
|
run_id: Run ID to export
|
99
91
|
output_path: Path to write export
|
100
92
|
"""
|
101
|
-
pass
|
102
93
|
|
103
94
|
@abstractmethod
|
104
95
|
def import_run(self, input_path: str) -> str:
|
@@ -110,4 +101,3 @@ class StorageBackend(ABC):
|
|
110
101
|
Returns:
|
111
102
|
Imported run ID
|
112
103
|
"""
|
113
|
-
pass
|