kailash 0.8.4__py3-none-any.whl → 0.8.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +5 -11
- kailash/channels/__init__.py +2 -1
- kailash/channels/mcp_channel.py +23 -4
- kailash/cli/__init__.py +11 -1
- kailash/cli/validate_imports.py +202 -0
- kailash/cli/validation_audit.py +570 -0
- kailash/core/actors/supervisor.py +1 -1
- kailash/core/resilience/bulkhead.py +15 -5
- kailash/core/resilience/circuit_breaker.py +74 -1
- kailash/core/resilience/health_monitor.py +433 -33
- kailash/edge/compliance.py +33 -0
- kailash/edge/consistency.py +609 -0
- kailash/edge/coordination/__init__.py +30 -0
- kailash/edge/coordination/global_ordering.py +355 -0
- kailash/edge/coordination/leader_election.py +217 -0
- kailash/edge/coordination/partition_detector.py +296 -0
- kailash/edge/coordination/raft.py +485 -0
- kailash/edge/discovery.py +63 -1
- kailash/edge/migration/__init__.py +19 -0
- kailash/edge/migration/edge_migration_service.py +384 -0
- kailash/edge/migration/edge_migrator.py +832 -0
- kailash/edge/monitoring/__init__.py +21 -0
- kailash/edge/monitoring/edge_monitor.py +736 -0
- kailash/edge/prediction/__init__.py +10 -0
- kailash/edge/prediction/predictive_warmer.py +591 -0
- kailash/edge/resource/__init__.py +102 -0
- kailash/edge/resource/cloud_integration.py +796 -0
- kailash/edge/resource/cost_optimizer.py +949 -0
- kailash/edge/resource/docker_integration.py +919 -0
- kailash/edge/resource/kubernetes_integration.py +893 -0
- kailash/edge/resource/platform_integration.py +913 -0
- kailash/edge/resource/predictive_scaler.py +959 -0
- kailash/edge/resource/resource_analyzer.py +824 -0
- kailash/edge/resource/resource_pools.py +610 -0
- kailash/integrations/dataflow_edge.py +261 -0
- kailash/mcp_server/registry_integration.py +1 -1
- kailash/mcp_server/server.py +351 -8
- kailash/mcp_server/transports.py +305 -0
- kailash/middleware/gateway/event_store.py +1 -0
- kailash/monitoring/__init__.py +18 -0
- kailash/monitoring/alerts.py +646 -0
- kailash/monitoring/metrics.py +677 -0
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/ai/semantic_memory.py +2 -2
- kailash/nodes/base.py +622 -1
- kailash/nodes/code/python.py +44 -3
- kailash/nodes/data/async_sql.py +42 -20
- kailash/nodes/edge/__init__.py +36 -0
- kailash/nodes/edge/base.py +240 -0
- kailash/nodes/edge/cloud_node.py +710 -0
- kailash/nodes/edge/coordination.py +239 -0
- kailash/nodes/edge/docker_node.py +825 -0
- kailash/nodes/edge/edge_data.py +582 -0
- kailash/nodes/edge/edge_migration_node.py +396 -0
- kailash/nodes/edge/edge_monitoring_node.py +421 -0
- kailash/nodes/edge/edge_state.py +673 -0
- kailash/nodes/edge/edge_warming_node.py +393 -0
- kailash/nodes/edge/kubernetes_node.py +652 -0
- kailash/nodes/edge/platform_node.py +766 -0
- kailash/nodes/edge/resource_analyzer_node.py +378 -0
- kailash/nodes/edge/resource_optimizer_node.py +501 -0
- kailash/nodes/edge/resource_scaler_node.py +397 -0
- kailash/nodes/governance.py +410 -0
- kailash/nodes/ports.py +676 -0
- kailash/nodes/rag/registry.py +1 -1
- kailash/nodes/transaction/distributed_transaction_manager.py +48 -1
- kailash/nodes/transaction/saga_state_storage.py +2 -1
- kailash/nodes/validation.py +8 -8
- kailash/runtime/local.py +374 -1
- kailash/runtime/validation/__init__.py +12 -0
- kailash/runtime/validation/connection_context.py +119 -0
- kailash/runtime/validation/enhanced_error_formatter.py +202 -0
- kailash/runtime/validation/error_categorizer.py +164 -0
- kailash/runtime/validation/import_validator.py +446 -0
- kailash/runtime/validation/metrics.py +380 -0
- kailash/runtime/validation/performance.py +615 -0
- kailash/runtime/validation/suggestion_engine.py +212 -0
- kailash/testing/fixtures.py +2 -2
- kailash/utils/data_paths.py +74 -0
- kailash/workflow/builder.py +413 -8
- kailash/workflow/contracts.py +418 -0
- kailash/workflow/edge_infrastructure.py +369 -0
- kailash/workflow/mermaid_visualizer.py +3 -1
- kailash/workflow/migration.py +3 -3
- kailash/workflow/templates.py +6 -6
- kailash/workflow/type_inference.py +669 -0
- kailash/workflow/validation.py +134 -3
- {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/METADATA +52 -34
- {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/RECORD +93 -42
- kailash/nexus/__init__.py +0 -21
- kailash/nexus/cli/__init__.py +0 -5
- kailash/nexus/cli/__main__.py +0 -6
- kailash/nexus/cli/main.py +0 -176
- kailash/nexus/factory.py +0 -413
- kailash/nexus/gateway.py +0 -545
- {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/WHEEL +0 -0
- {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/entry_points.txt +0 -0
- {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,369 @@
|
|
1
|
+
"""Edge infrastructure management for WorkflowBuilder.
|
2
|
+
|
3
|
+
This module provides a singleton EdgeInfrastructure class that manages
|
4
|
+
shared edge computing resources across workflows, including EdgeDiscovery
|
5
|
+
and ComplianceRouter instances.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import asyncio
|
9
|
+
import logging
|
10
|
+
import threading
|
11
|
+
import time
|
12
|
+
from collections import defaultdict
|
13
|
+
from typing import Any, Dict, List, Optional
|
14
|
+
|
15
|
+
from kailash.edge.compliance import ComplianceRouter
|
16
|
+
from kailash.edge.discovery import EdgeDiscovery
|
17
|
+
from kailash.edge.location import EdgeLocation
|
18
|
+
from kailash.utils.resource_manager import AsyncResourcePool
|
19
|
+
|
20
|
+
logger = logging.getLogger(__name__)
|
21
|
+
|
22
|
+
|
23
|
+
class EdgeInfrastructure:
|
24
|
+
"""Singleton class managing shared edge infrastructure for workflows.
|
25
|
+
|
26
|
+
This class provides centralized management of edge computing resources
|
27
|
+
including discovery service, compliance routing, and connection pooling.
|
28
|
+
It follows the singleton pattern to ensure resource sharing across
|
29
|
+
multiple workflows and edge nodes.
|
30
|
+
"""
|
31
|
+
|
32
|
+
_instance: Optional["EdgeInfrastructure"] = None
|
33
|
+
_lock = threading.Lock()
|
34
|
+
|
35
|
+
def __new__(cls, config: Optional[Dict[str, Any]] = None):
|
36
|
+
"""Create or return the singleton instance.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
config: Edge infrastructure configuration
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
The singleton EdgeInfrastructure instance
|
43
|
+
"""
|
44
|
+
if cls._instance is None:
|
45
|
+
with cls._lock:
|
46
|
+
# Double-check locking pattern
|
47
|
+
if cls._instance is None:
|
48
|
+
cls._instance = super().__new__(cls)
|
49
|
+
cls._instance._initialized = False
|
50
|
+
return cls._instance
|
51
|
+
|
52
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
53
|
+
"""Initialize the edge infrastructure.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
config: Edge infrastructure configuration
|
57
|
+
"""
|
58
|
+
# Only initialize once
|
59
|
+
if self._initialized:
|
60
|
+
return
|
61
|
+
|
62
|
+
with self._lock:
|
63
|
+
if self._initialized:
|
64
|
+
return
|
65
|
+
|
66
|
+
logger.info("Initializing EdgeInfrastructure singleton")
|
67
|
+
|
68
|
+
# Merge with defaults
|
69
|
+
self._config = self._merge_with_defaults(config or {})
|
70
|
+
|
71
|
+
# Lazy-initialized components
|
72
|
+
self._discovery: Optional[EdgeDiscovery] = None
|
73
|
+
self._compliance_router: Optional[ComplianceRouter] = None
|
74
|
+
self._connection_pools: Dict[str, AsyncResourcePool] = {}
|
75
|
+
|
76
|
+
# Metrics tracking
|
77
|
+
self._metrics = {
|
78
|
+
"edge_nodes_registered": 0,
|
79
|
+
"active_connections": 0,
|
80
|
+
"total_requests": 0,
|
81
|
+
"latency_by_location": defaultdict(
|
82
|
+
lambda: {"count": 0, "total": 0, "avg": 0}
|
83
|
+
),
|
84
|
+
}
|
85
|
+
|
86
|
+
# Node registry
|
87
|
+
self._edge_nodes: Dict[str, Dict[str, Any]] = {}
|
88
|
+
|
89
|
+
# Infrastructure state
|
90
|
+
self._start_time = time.time()
|
91
|
+
self._initialized = True
|
92
|
+
|
93
|
+
def _merge_with_defaults(self, config: Dict[str, Any]) -> Dict[str, Any]:
|
94
|
+
"""Merge user config with default values.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
config: User-provided configuration
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
Merged configuration
|
101
|
+
"""
|
102
|
+
defaults = {
|
103
|
+
"discovery": {
|
104
|
+
"locations": [],
|
105
|
+
"refresh_interval": 300,
|
106
|
+
"selection_strategy": "balanced",
|
107
|
+
},
|
108
|
+
"compliance": {
|
109
|
+
"strict_mode": True,
|
110
|
+
"default_classification": "public",
|
111
|
+
"audit_logging": True,
|
112
|
+
},
|
113
|
+
"performance": {
|
114
|
+
"connection_pool_size": 10,
|
115
|
+
"health_check_interval": 60,
|
116
|
+
"request_timeout": 30,
|
117
|
+
},
|
118
|
+
}
|
119
|
+
|
120
|
+
# Deep merge
|
121
|
+
merged = defaults.copy()
|
122
|
+
for key, value in config.items():
|
123
|
+
if (
|
124
|
+
key in merged
|
125
|
+
and isinstance(merged[key], dict)
|
126
|
+
and isinstance(value, dict)
|
127
|
+
):
|
128
|
+
merged[key].update(value)
|
129
|
+
else:
|
130
|
+
merged[key] = value
|
131
|
+
|
132
|
+
return merged
|
133
|
+
|
134
|
+
def get_discovery(self) -> EdgeDiscovery:
|
135
|
+
"""Get or create the EdgeDiscovery instance.
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
The shared EdgeDiscovery instance
|
139
|
+
"""
|
140
|
+
if self._discovery is None:
|
141
|
+
with self._lock:
|
142
|
+
if self._discovery is None:
|
143
|
+
logger.debug("Creating EdgeDiscovery instance")
|
144
|
+
|
145
|
+
# Load predefined locations from config
|
146
|
+
locations = []
|
147
|
+
for location_id in self._config["discovery"]["locations"]:
|
148
|
+
# Import here to avoid circular dependency
|
149
|
+
from kailash.edge.location import get_predefined_location
|
150
|
+
|
151
|
+
location = get_predefined_location(location_id)
|
152
|
+
if location:
|
153
|
+
locations.append(location)
|
154
|
+
|
155
|
+
self._discovery = EdgeDiscovery(locations=locations)
|
156
|
+
|
157
|
+
return self._discovery
|
158
|
+
|
159
|
+
def get_compliance_router(self) -> ComplianceRouter:
|
160
|
+
"""Get or create the ComplianceRouter instance.
|
161
|
+
|
162
|
+
Returns:
|
163
|
+
The shared ComplianceRouter instance
|
164
|
+
"""
|
165
|
+
if self._compliance_router is None:
|
166
|
+
with self._lock:
|
167
|
+
if self._compliance_router is None:
|
168
|
+
logger.debug("Creating ComplianceRouter instance")
|
169
|
+
self._compliance_router = ComplianceRouter()
|
170
|
+
|
171
|
+
return self._compliance_router
|
172
|
+
|
173
|
+
def get_connection_pool(self, location_id: str) -> AsyncResourcePool:
|
174
|
+
"""Get or create a connection pool for an edge location.
|
175
|
+
|
176
|
+
Args:
|
177
|
+
location_id: Edge location identifier
|
178
|
+
|
179
|
+
Returns:
|
180
|
+
Connection pool for the location
|
181
|
+
"""
|
182
|
+
if location_id not in self._connection_pools:
|
183
|
+
with self._lock:
|
184
|
+
if location_id not in self._connection_pools:
|
185
|
+
logger.debug(f"Creating connection pool for {location_id}")
|
186
|
+
|
187
|
+
async def create_connection():
|
188
|
+
# Placeholder for actual edge connection creation
|
189
|
+
# In real implementation, this would create connections
|
190
|
+
# to the edge location's services
|
191
|
+
return {"location": location_id, "connected": True}
|
192
|
+
|
193
|
+
async def cleanup_connection(conn):
|
194
|
+
# Placeholder for connection cleanup
|
195
|
+
logger.debug(f"Cleaning up connection to {location_id}")
|
196
|
+
|
197
|
+
pool = AsyncResourcePool(
|
198
|
+
factory=create_connection,
|
199
|
+
max_size=self._config["performance"]["connection_pool_size"],
|
200
|
+
timeout=self._config["performance"]["request_timeout"],
|
201
|
+
cleanup=cleanup_connection,
|
202
|
+
)
|
203
|
+
|
204
|
+
self._connection_pools[location_id] = pool
|
205
|
+
|
206
|
+
return self._connection_pools[location_id]
|
207
|
+
|
208
|
+
def is_edge_node(self, node_type: str) -> bool:
|
209
|
+
"""Check if a node type is an edge node.
|
210
|
+
|
211
|
+
Args:
|
212
|
+
node_type: The node type to check
|
213
|
+
|
214
|
+
Returns:
|
215
|
+
True if the node is an edge node
|
216
|
+
"""
|
217
|
+
# Check exact matches and subclasses
|
218
|
+
edge_prefixes = ["Edge", "edge"]
|
219
|
+
edge_suffixes = [
|
220
|
+
"EdgeNode",
|
221
|
+
"EdgeDataNode",
|
222
|
+
"EdgeStateMachine",
|
223
|
+
"EdgeCacheNode",
|
224
|
+
]
|
225
|
+
edge_keywords = ["Edge", "edge"]
|
226
|
+
|
227
|
+
# Exact match
|
228
|
+
if node_type in edge_suffixes:
|
229
|
+
return True
|
230
|
+
|
231
|
+
# Check if it starts with Edge/edge
|
232
|
+
for prefix in edge_prefixes:
|
233
|
+
if node_type.startswith(prefix):
|
234
|
+
return True
|
235
|
+
|
236
|
+
# Check if it ends with EdgeNode (for custom edge nodes)
|
237
|
+
if node_type.endswith("EdgeNode"):
|
238
|
+
return True
|
239
|
+
|
240
|
+
# Check if it contains Edge keywords (for variations like MyEdgeDataNode)
|
241
|
+
for keyword in edge_keywords:
|
242
|
+
if keyword in node_type:
|
243
|
+
return True
|
244
|
+
|
245
|
+
return False
|
246
|
+
|
247
|
+
def register_edge_node(self, node_id: str, node_info: Dict[str, Any]):
|
248
|
+
"""Register an edge node with the infrastructure.
|
249
|
+
|
250
|
+
Args:
|
251
|
+
node_id: Unique node identifier
|
252
|
+
node_info: Node information including type and config
|
253
|
+
"""
|
254
|
+
with self._lock:
|
255
|
+
self._edge_nodes[node_id] = {**node_info, "registered_at": time.time()}
|
256
|
+
self._metrics["edge_nodes_registered"] = len(self._edge_nodes)
|
257
|
+
logger.debug(f"Registered edge node: {node_id}")
|
258
|
+
|
259
|
+
def add_location(self, location: EdgeLocation):
|
260
|
+
"""Add an edge location to the discovery service.
|
261
|
+
|
262
|
+
Args:
|
263
|
+
location: EdgeLocation to add
|
264
|
+
"""
|
265
|
+
discovery = self.get_discovery()
|
266
|
+
discovery.add_location(location)
|
267
|
+
logger.info(f"Added edge location: {location.location_id}")
|
268
|
+
|
269
|
+
def get_all_locations(self) -> List[EdgeLocation]:
|
270
|
+
"""Get all registered edge locations.
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
List of EdgeLocation instances
|
274
|
+
"""
|
275
|
+
discovery = self.get_discovery()
|
276
|
+
return discovery.get_all_edges()
|
277
|
+
|
278
|
+
async def select_edge(self, criteria: Dict[str, Any]) -> Optional[EdgeLocation]:
|
279
|
+
"""Select an edge location based on criteria.
|
280
|
+
|
281
|
+
Args:
|
282
|
+
criteria: Selection criteria
|
283
|
+
|
284
|
+
Returns:
|
285
|
+
Selected EdgeLocation or None
|
286
|
+
"""
|
287
|
+
discovery = self.get_discovery()
|
288
|
+
return await discovery.select_edge(criteria)
|
289
|
+
|
290
|
+
def record_request(self, location_id: str, latency_ms: float):
|
291
|
+
"""Record a request for metrics tracking.
|
292
|
+
|
293
|
+
Args:
|
294
|
+
location_id: Edge location that handled the request
|
295
|
+
latency_ms: Request latency in milliseconds
|
296
|
+
"""
|
297
|
+
with self._lock:
|
298
|
+
self._metrics["total_requests"] += 1
|
299
|
+
|
300
|
+
location_metrics = self._metrics["latency_by_location"][location_id]
|
301
|
+
location_metrics["count"] += 1
|
302
|
+
location_metrics["total"] += latency_ms
|
303
|
+
location_metrics["avg"] = (
|
304
|
+
location_metrics["total"] / location_metrics["count"]
|
305
|
+
)
|
306
|
+
|
307
|
+
def get_metrics(self) -> Dict[str, Any]:
|
308
|
+
"""Get infrastructure metrics.
|
309
|
+
|
310
|
+
Returns:
|
311
|
+
Dictionary of metrics
|
312
|
+
"""
|
313
|
+
with self._lock:
|
314
|
+
return {
|
315
|
+
**self._metrics,
|
316
|
+
"uptime_seconds": time.time() - self._start_time,
|
317
|
+
"connection_pools": len(self._connection_pools),
|
318
|
+
}
|
319
|
+
|
320
|
+
def get_health_status(self) -> Dict[str, Any]:
|
321
|
+
"""Get health status of the infrastructure.
|
322
|
+
|
323
|
+
Returns:
|
324
|
+
Health status dictionary
|
325
|
+
"""
|
326
|
+
with self._lock:
|
327
|
+
discovery_initialized = self._discovery is not None
|
328
|
+
compliance_initialized = self._compliance_router is not None
|
329
|
+
|
330
|
+
health = {
|
331
|
+
"status": "healthy",
|
332
|
+
"uptime_seconds": time.time() - self._start_time,
|
333
|
+
"discovery": {
|
334
|
+
"initialized": discovery_initialized,
|
335
|
+
"location_count": (
|
336
|
+
len(self.get_all_locations()) if discovery_initialized else 0
|
337
|
+
),
|
338
|
+
},
|
339
|
+
"compliance": {
|
340
|
+
"initialized": compliance_initialized,
|
341
|
+
"strict_mode": self._config["compliance"]["strict_mode"],
|
342
|
+
},
|
343
|
+
"metrics": {
|
344
|
+
"edge_nodes": self._metrics["edge_nodes_registered"],
|
345
|
+
"total_requests": self._metrics["total_requests"],
|
346
|
+
},
|
347
|
+
}
|
348
|
+
|
349
|
+
return health
|
350
|
+
|
351
|
+
async def cleanup(self):
|
352
|
+
"""Clean up all resources asynchronously."""
|
353
|
+
logger.info("Cleaning up EdgeInfrastructure resources")
|
354
|
+
|
355
|
+
# Clean up connection pools
|
356
|
+
cleanup_tasks = []
|
357
|
+
for location_id, pool in self._connection_pools.items():
|
358
|
+
cleanup_tasks.append(pool.cleanup_all())
|
359
|
+
|
360
|
+
if cleanup_tasks:
|
361
|
+
await asyncio.gather(*cleanup_tasks, return_exceptions=True)
|
362
|
+
|
363
|
+
with self._lock:
|
364
|
+
self._connection_pools.clear()
|
365
|
+
self._discovery = None
|
366
|
+
self._compliance_router = None
|
367
|
+
self._edge_nodes.clear()
|
368
|
+
|
369
|
+
logger.info("EdgeInfrastructure cleanup complete")
|
@@ -215,7 +215,9 @@ class MermaidVisualizer:
|
|
215
215
|
Sanitized node ID safe for Mermaid
|
216
216
|
"""
|
217
217
|
# Replace special characters with underscores
|
218
|
-
|
218
|
+
import re
|
219
|
+
|
220
|
+
sanitized = re.sub(r"[^a-zA-Z0-9_]", "_", node_id)
|
219
221
|
# Ensure it starts with a letter
|
220
222
|
if sanitized and sanitized[0].isdigit():
|
221
223
|
sanitized = f"node_{sanitized}"
|
kailash/workflow/migration.py
CHANGED
@@ -205,7 +205,7 @@ class DAGToCycleConverter:
|
|
205
205
|
pattern_type="retry_cycle",
|
206
206
|
confidence=0.7,
|
207
207
|
description=f"Node '{node_id}' appears to implement retry logic manually",
|
208
|
-
suggested_convergence="success
|
208
|
+
suggested_convergence="success",
|
209
209
|
estimated_benefit="improved_reliability",
|
210
210
|
implementation_complexity="low",
|
211
211
|
)
|
@@ -290,7 +290,7 @@ class DAGToCycleConverter:
|
|
290
290
|
pattern_type="batch_processing_cycle",
|
291
291
|
confidence=0.6,
|
292
292
|
description=f"'{node_id}' appears to process data in batches",
|
293
|
-
suggested_convergence="all_batches_processed
|
293
|
+
suggested_convergence="all_batches_processed",
|
294
294
|
estimated_benefit="memory_efficiency",
|
295
295
|
implementation_complexity="medium",
|
296
296
|
)
|
@@ -398,7 +398,7 @@ cycle_id = workflow.add_retry_cycle(
|
|
398
398
|
target_node="{main_node}",
|
399
399
|
max_retries=3,
|
400
400
|
backoff_strategy="exponential",
|
401
|
-
success_condition="success
|
401
|
+
success_condition="success"
|
402
402
|
)
|
403
403
|
|
404
404
|
print(f"Created retry cycle: {{cycle_id}}")
|
kailash/workflow/templates.py
CHANGED
@@ -773,8 +773,8 @@ validation_result = {
|
|
773
773
|
|
774
774
|
# Add report generation node if not exists
|
775
775
|
if output not in workflow.nodes:
|
776
|
-
from kailash.examples.utils.data_paths import get_output_data_path
|
777
776
|
from kailash.nodes.data import JSONWriterNode
|
777
|
+
from kailash.utils.data_paths import get_output_data_path
|
778
778
|
|
779
779
|
workflow.add_node(
|
780
780
|
output,
|
@@ -814,8 +814,8 @@ validation_result = {
|
|
814
814
|
"""
|
815
815
|
# Add document reader if not exists
|
816
816
|
if document_reader not in workflow.nodes:
|
817
|
-
from kailash.examples.utils.data_paths import get_input_data_path
|
818
817
|
from kailash.nodes.data import DirectoryReaderNode
|
818
|
+
from kailash.utils.data_paths import get_input_data_path
|
819
819
|
|
820
820
|
workflow.add_node(
|
821
821
|
document_reader,
|
@@ -907,8 +907,8 @@ result = {
|
|
907
907
|
|
908
908
|
# Add output writer if not exists
|
909
909
|
if output not in workflow.nodes:
|
910
|
-
from kailash.examples.utils.data_paths import get_output_data_path
|
911
910
|
from kailash.nodes.data import JSONWriterNode
|
911
|
+
from kailash.utils.data_paths import get_output_data_path
|
912
912
|
|
913
913
|
workflow.add_node(
|
914
914
|
output,
|
@@ -1060,8 +1060,8 @@ validated_result = {
|
|
1060
1060
|
|
1061
1061
|
# Add output node if not exists
|
1062
1062
|
if output not in workflow.nodes:
|
1063
|
-
from kailash.examples.utils.data_paths import get_output_data_path
|
1064
1063
|
from kailash.nodes.data import JSONWriterNode
|
1064
|
+
from kailash.utils.data_paths import get_output_data_path
|
1065
1065
|
|
1066
1066
|
workflow.add_node(
|
1067
1067
|
output,
|
@@ -1106,8 +1106,8 @@ validated_result = {
|
|
1106
1106
|
"""
|
1107
1107
|
# Add data reader if not exists
|
1108
1108
|
if data_reader not in workflow.nodes:
|
1109
|
-
from kailash.examples.utils.data_paths import get_input_data_path
|
1110
1109
|
from kailash.nodes.data import CSVReaderNode
|
1110
|
+
from kailash.utils.data_paths import get_input_data_path
|
1111
1111
|
|
1112
1112
|
workflow.add_node(
|
1113
1113
|
data_reader,
|
@@ -1304,8 +1304,8 @@ result = {
|
|
1304
1304
|
|
1305
1305
|
# Add data writer if not exists
|
1306
1306
|
if writer not in workflow.nodes:
|
1307
|
-
from kailash.examples.utils.data_paths import get_output_data_path
|
1308
1307
|
from kailash.nodes.data import JSONWriterNode
|
1308
|
+
from kailash.utils.data_paths import get_output_data_path
|
1309
1309
|
|
1310
1310
|
workflow.add_node(
|
1311
1311
|
writer,
|