kailash 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/api/__init__.py +11 -1
  3. kailash/api/gateway.py +394 -0
  4. kailash/api/mcp_integration.py +478 -0
  5. kailash/api/workflow_api.py +29 -13
  6. kailash/nodes/ai/__init__.py +40 -4
  7. kailash/nodes/ai/a2a.py +1143 -0
  8. kailash/nodes/ai/agents.py +120 -6
  9. kailash/nodes/ai/ai_providers.py +224 -30
  10. kailash/nodes/ai/embedding_generator.py +34 -38
  11. kailash/nodes/ai/intelligent_agent_orchestrator.py +2114 -0
  12. kailash/nodes/ai/llm_agent.py +351 -356
  13. kailash/nodes/ai/self_organizing.py +1624 -0
  14. kailash/nodes/api/http.py +106 -25
  15. kailash/nodes/api/rest.py +116 -21
  16. kailash/nodes/base.py +60 -64
  17. kailash/nodes/code/python.py +61 -42
  18. kailash/nodes/data/__init__.py +10 -10
  19. kailash/nodes/data/readers.py +117 -66
  20. kailash/nodes/data/retrieval.py +1 -1
  21. kailash/nodes/data/sharepoint_graph.py +23 -25
  22. kailash/nodes/data/sql.py +24 -26
  23. kailash/nodes/data/writers.py +41 -44
  24. kailash/nodes/logic/__init__.py +9 -3
  25. kailash/nodes/logic/async_operations.py +60 -21
  26. kailash/nodes/logic/operations.py +43 -22
  27. kailash/nodes/logic/workflow.py +26 -18
  28. kailash/nodes/mcp/client.py +29 -33
  29. kailash/nodes/transform/__init__.py +8 -1
  30. kailash/nodes/transform/formatters.py +1 -1
  31. kailash/nodes/transform/processors.py +119 -4
  32. kailash/tracking/metrics_collector.py +6 -7
  33. kailash/utils/export.py +2 -2
  34. kailash/utils/templates.py +16 -16
  35. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/METADATA +293 -29
  36. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/RECORD +40 -35
  37. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/WHEEL +0 -0
  38. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/entry_points.txt +0 -0
  39. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/licenses/LICENSE +0 -0
  40. {kailash-0.1.3.dist-info → kailash-0.1.5.dist-info}/top_level.txt +0 -0
@@ -12,9 +12,9 @@ Design Philosophy:
12
12
  5. Type-safe parameter validation
13
13
 
14
14
  Node Categories:
15
- - CSVReader: Tabular data from CSV files
16
- - JSONReader: Structured data from JSON files
17
- - TextReader: Raw text from any text file
15
+ - CSVReaderNode: Tabular data from CSV files
16
+ - JSONReaderNode: Structured data from JSON files
17
+ - TextReaderNode: Raw text from any text file
18
18
 
19
19
  Upstream Components:
20
20
  - FileSystem: Provides files to read
@@ -36,61 +36,113 @@ from kailash.nodes.base import Node, NodeParameter, register_node
36
36
 
37
37
 
38
38
  @register_node()
39
- class CSVReader(Node):
40
- """Reads data from a CSV file.
41
-
42
- This node provides robust CSV file reading capabilities with support for
43
- various delimiters, header detection, and encoding options. It's designed
44
- to handle common CSV formats and edge cases.
45
-
46
- Design Features:
47
- 1. Automatic header detection
48
- 2. Configurable delimiters
49
- 3. Memory-efficient line-by-line reading
50
- 4. Consistent dictionary output format
51
- 5. Unicode support through encoding parameter
52
-
53
- Data Flow:
54
- - Input: File path and configuration parameters
55
- - Processing: Reads CSV line by line, converting to dictionaries
56
- - Output: List of dictionaries (with headers) or list of lists
57
-
58
- Common Usage Patterns:
59
- 1. Reading data exports from databases
60
- 2. Processing spreadsheet data
61
- 3. Loading configuration from CSV
62
- 4. Ingesting sensor data logs
63
-
64
- Upstream Sources:
65
- - File system paths from user input
66
- - Output paths from previous nodes
67
- - Configuration management systems
39
+ class CSVReaderNode(Node):
40
+ """
41
+ Reads data from CSV files with automatic header detection and type inference.
42
+
43
+ This node provides comprehensive CSV file reading capabilities, handling various
44
+ formats, encodings, and edge cases. It automatically detects headers, infers data
45
+ types, and provides consistent structured output for downstream processing in
46
+ Kailash workflows.
47
+
48
+ Design Philosophy:
49
+ The CSVReaderNode embodies the principle of "data accessibility without
50
+ complexity." It abstracts the intricacies of CSV parsing while providing
51
+ flexibility for various formats. The design prioritizes memory efficiency,
52
+ automatic format detection, and consistent output structure, making it easy
53
+ to integrate diverse CSV data sources into workflows.
54
+
55
+ Upstream Dependencies:
56
+ - File system providing CSV files
57
+ - Workflow orchestrators specifying file paths
58
+ - Configuration systems providing parsing options
59
+ - Previous nodes generating CSV file paths
60
+ - User inputs defining data sources
68
61
 
69
62
  Downstream Consumers:
70
- - DataTransformer: Processes tabular data
71
- - Aggregator: Summarizes data
72
- - CSVWriter: Reformats and saves
73
- - Visualizer: Creates charts from data
63
+ - DataTransformNode: Processes tabular data
64
+ - FilterNode: Applies row/column filtering
65
+ - AggregatorNode: Summarizes data
66
+ - PythonCodeNode: Custom data processing
67
+ - WriterNodes: Exports to other formats
68
+ - Visualization nodes: Creates charts
69
+ - ML nodes: Uses as training data
70
+
71
+ Configuration:
72
+ The node supports extensive CSV parsing options:
73
+ - Delimiter detection (comma, tab, pipe, etc.)
74
+ - Header row identification
75
+ - Encoding specification (UTF-8, Latin-1, etc.)
76
+ - Quote character handling
77
+ - Skip rows/comments functionality
78
+ - Column type inference
79
+ - Missing value handling
80
+
81
+ Implementation Details:
82
+ - Uses Python's csv module for robust parsing
83
+ - Implements streaming for large files
84
+ - Automatic delimiter detection when not specified
85
+ - Header detection based on first row analysis
86
+ - Type inference for numeric/date columns
87
+ - Memory-efficient processing with generators
88
+ - Unicode normalization for consistent encoding
74
89
 
75
90
  Error Handling:
76
- - FileNotFoundError: Invalid file path
77
- - PermissionError: Insufficient read permissions
78
- - UnicodeDecodeError: Encoding mismatch
79
- - csv.Error: Malformed CSV data
80
-
81
- Example::
82
-
83
- # Read customer data with headers
84
- reader = CSVReader(
85
- file_path='customers.csv',
86
- headers=True,
87
- delimiter=','
88
- )
89
- result = reader.execute()
90
- # result['data'] = [
91
- # {'id': '1', 'name': 'John', 'age': '30'},
92
- # {'id': '2', 'name': 'Jane', 'age': '25'}
93
- # ]
91
+ - FileNotFoundError: Clear message with path
92
+ - PermissionError: Access rights guidance
93
+ - UnicodeDecodeError: Encoding detection hints
94
+ - csv.Error: Malformed data diagnostics
95
+ - EmptyFileError: Handles zero-byte files
96
+ - Partial read recovery for corrupted files
97
+
98
+ Side Effects:
99
+ - Reads from file system
100
+ - May consume significant memory for large files
101
+ - Creates file handles (properly closed)
102
+ - Updates internal read statistics
103
+
104
+ Examples:
105
+ >>> # Basic CSV reading with headers
106
+ >>> reader = CSVReaderNode()
107
+ >>> result = reader.run(
108
+ ... file_path="customers.csv",
109
+ ... headers=True
110
+ ... )
111
+ >>> assert isinstance(result["data"], list)
112
+ >>> assert all(isinstance(row, dict) for row in result["data"])
113
+ >>> # Example output:
114
+ >>> # result["data"] = [
115
+ >>> # {"id": "1", "name": "John Doe", "age": "30"},
116
+ >>> # {"id": "2", "name": "Jane Smith", "age": "25"}
117
+ >>> # ]
118
+ >>>
119
+ >>> # Reading with custom delimiter
120
+ >>> result = reader.run(
121
+ ... file_path="data.tsv",
122
+ ... delimiter="\\t",
123
+ ... headers=True
124
+ ... )
125
+ >>>
126
+ >>> # Reading without headers (returns list of lists)
127
+ >>> result = reader.run(
128
+ ... file_path="data.csv",
129
+ ... headers=False
130
+ ... )
131
+ >>> assert all(isinstance(row, list) for row in result["data"])
132
+ >>>
133
+ >>> # Reading with specific encoding
134
+ >>> result = reader.run(
135
+ ... file_path="european_data.csv",
136
+ ... encoding="iso-8859-1",
137
+ ... headers=True
138
+ ... )
139
+ >>>
140
+ >>> # Handling quoted fields
141
+ >>> result = reader.run(
142
+ ... file_path="complex.csv",
143
+ ... headers=True,
144
+ ... quotechar='"'
145
+ ... )
94
146
  """
95
147
 
96
148
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -235,7 +287,7 @@ class CSVReader(Node):
235
287
 
236
288
 
237
289
  @register_node()
238
- class JSONReader(Node):
290
+ class JSONReaderNode(Node):
239
291
  """Reads data from a JSON file.
240
292
 
241
293
  This node handles JSON file reading with support for complex nested
@@ -280,7 +332,7 @@ class JSONReader(Node):
280
332
 
281
333
  Example:
282
334
  # Read API response data
283
- reader = JSONReader(file_path='api_response.json')
335
+ reader = JSONReaderNode(file_path='api_response.json')
284
336
  result = reader.execute()
285
337
  # result['data'] = {
286
338
  # 'status': 'success',
@@ -359,7 +411,7 @@ class JSONReader(Node):
359
411
 
360
412
 
361
413
  @register_node()
362
- class TextReader(Node):
414
+ class TextReaderNode(Node):
363
415
  """Reads text from a file.
364
416
 
365
417
  This node provides simple text file reading with encoding support.
@@ -403,15 +455,14 @@ class TextReader(Node):
403
455
  - UnicodeDecodeError: Wrong encoding
404
456
  - MemoryError: File too large
405
457
 
406
- Example::
407
-
408
- # Read a log file
409
- reader = TextReader(
410
- file_path='application.log',
411
- encoding='utf-8'
412
- )
413
- result = reader.execute()
414
- # result['text'] = "2024-01-01 INFO: Application started\\n..."
458
+ Example:
459
+ >>> # Read a log file
460
+ >>> reader = TextReaderNode(
461
+ ... file_path='application.log',
462
+ ... encoding='utf-8'
463
+ ... )
464
+ >>> result = reader.execute()
465
+ >>> # result['text'] = "2024-01-01 INFO: Application started\\n..."
415
466
  """
416
467
 
417
468
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -102,7 +102,7 @@ class RelevanceScorerNode(Node):
102
102
  ) -> List[Dict]:
103
103
  """Score chunks using cosine similarity."""
104
104
  # Extract actual embedding vectors from the embedding objects
105
- # EmbeddingGenerator returns embeddings in format: {"embedding": [...], "text": "...", "dimensions": X}
105
+ # EmbeddingGeneratorNode returns embeddings in format: {"embedding": [...], "text": "...", "dimensions": X}
106
106
 
107
107
  # Handle query embedding - should be the first (and only) embedding in the list
108
108
  query_embedding_obj = query_embeddings[0] if query_embeddings else {}
@@ -56,18 +56,17 @@ class SharePointGraphReader(Node):
56
56
  3. Search for files by name
57
57
  4. Navigate folder structures
58
58
 
59
- Example::
60
-
61
- reader = SharePointGraphReader()
62
- result = reader.execute(
63
- tenant_id="your-tenant-id",
64
- client_id="your-client-id",
65
- client_secret="your-secret",
66
- site_url="https://company.sharepoint.com/sites/project",
67
- operation="list_files",
68
- library_name="Documents",
69
- folder_path="Reports/2024"
70
- )
59
+ Example:
60
+ >>> reader = SharePointGraphReader()
61
+ >>> result = reader.execute(
62
+ ... tenant_id="your-tenant-id",
63
+ ... client_id="your-client-id",
64
+ ... client_secret="your-secret",
65
+ ... site_url="https://company.sharepoint.com/sites/project",
66
+ ... operation="list_files",
67
+ ... library_name="Documents",
68
+ ... folder_path="Reports/2024"
69
+ ... )
71
70
  """
72
71
 
73
72
  def get_metadata(self) -> NodeMetadata:
@@ -471,19 +470,18 @@ class SharePointGraphWriter(Node):
471
470
  This node handles file uploads to SharePoint document libraries,
472
471
  supporting folder structures and metadata.
473
472
 
474
- Example::
475
-
476
- writer = SharePointGraphWriter()
477
- result = writer.execute(
478
- tenant_id="your-tenant-id",
479
- client_id="your-client-id",
480
- client_secret="your-secret",
481
- site_url="https://company.sharepoint.com/sites/project",
482
- local_path="report.pdf",
483
- library_name="Documents",
484
- folder_path="Reports/2024",
485
- sharepoint_name="Q4_Report_2024.pdf"
486
- )
473
+ Example:
474
+ >>> writer = SharePointGraphWriter()
475
+ >>> result = writer.execute(
476
+ ... tenant_id="your-tenant-id",
477
+ ... client_id="your-client-id",
478
+ ... client_secret="your-secret",
479
+ ... site_url="https://company.sharepoint.com/sites/project",
480
+ ... local_path="report.pdf",
481
+ ... library_name="Documents",
482
+ ... folder_path="Reports/2024",
483
+ ... sharepoint_name="Q4_Report_2024.pdf"
484
+ ... )
487
485
  """
488
486
 
489
487
  def get_metadata(self) -> NodeMetadata:
kailash/nodes/data/sql.py CHANGED
@@ -63,20 +63,19 @@ class SQLDatabaseNode(Node):
63
63
  - TimeoutError: Query execution timeout
64
64
  - PermissionError: Access denied
65
65
 
66
- Example::
67
-
68
- # Query customer data
69
- sql_node = SQLDatabaseNode(
70
- connection_string='postgresql://user:pass@host/db',
71
- query='SELECT * FROM customers WHERE active = ?',
72
- parameters=[True],
73
- result_format='dict'
74
- )
75
- result = sql_node.execute()
76
- # result['data'] = [
77
- # {'id': 1, 'name': 'John', 'active': True},
78
- # {'id': 2, 'name': 'Jane', 'active': True}
79
- # ]
66
+ Example:
67
+ >>> # Query customer data
68
+ >>> sql_node = SQLDatabaseNode(
69
+ ... connection_string='postgresql://user:pass@host/db',
70
+ ... query='SELECT * FROM customers WHERE active = ?',
71
+ ... parameters=[True],
72
+ ... result_format='dict'
73
+ ... )
74
+ >>> result = sql_node.execute()
75
+ >>> # result['data'] = [
76
+ >>> # {'id': 1, 'name': 'John', 'active': True},
77
+ >>> # {'id': 2, 'name': 'Jane', 'active': True}
78
+ >>> # ]
80
79
  """
81
80
 
82
81
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -259,18 +258,17 @@ class SQLQueryBuilderNode(Node):
259
258
  3. Multi-table joins
260
259
  4. Aggregation queries
261
260
 
262
- Example::
263
-
264
- builder = SQLQueryBuilderNode(
265
- table='customers',
266
- select=['name', 'email'],
267
- where={'active': True, 'country': 'USA'},
268
- order_by=['name'],
269
- limit=100
270
- )
271
- result = builder.execute()
272
- # result['query'] = 'SELECT name, email FROM customers WHERE active = ? AND country = ? ORDER BY name LIMIT 100'
273
- # result['parameters'] = [True, 'USA']
261
+ Example:
262
+ >>> builder = SQLQueryBuilderNode(
263
+ ... table='customers',
264
+ ... select=['name', 'email'],
265
+ ... where={'active': True, 'country': 'USA'},
266
+ ... order_by=['name'],
267
+ ... limit=100
268
+ ... )
269
+ >>> result = builder.execute()
270
+ >>> # result['query'] = 'SELECT name, email FROM customers WHERE active = ? AND country = ? ORDER BY name LIMIT 100'
271
+ >>> # result['parameters'] = [True, 'USA']
274
272
  """
275
273
 
276
274
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -12,9 +12,9 @@ Design Philosophy:
12
12
  5. Progress tracking and feedback
13
13
 
14
14
  Node Categories:
15
- - CSVWriter: Tabular data to CSV files
16
- - JSONWriter: Structured data to JSON files
17
- - TextWriter: Raw text to any text file
15
+ - CSVWriterNode: Tabular data to CSV files
16
+ - JSONWriterNode: Structured data to JSON files
17
+ - TextWriterNode: Raw text to any text file
18
18
 
19
19
  Upstream Components:
20
20
  - Reader nodes: Provide data to transform
@@ -37,7 +37,7 @@ from kailash.nodes.base import Node, NodeParameter, register_node
37
37
 
38
38
 
39
39
  @register_node()
40
- class CSVWriter(Node):
40
+ class CSVWriterNode(Node):
41
41
  """Writes data to a CSV file.
42
42
 
43
43
  This node handles CSV file writing with support for both dictionary
@@ -81,19 +81,18 @@ class CSVWriter(Node):
81
81
  - TypeError: Invalid data structure
82
82
  - UnicodeEncodeError: Encoding issues
83
83
 
84
- Example::
85
-
86
- # Write customer data
87
- writer = CSVWriter(
88
- file_path='output.csv',
89
- data=[
90
- {'id': 1, 'name': 'John', 'age': 30},
91
- {'id': 2, 'name': 'Jane', 'age': 25}
92
- ],
93
- delimiter=','
94
- )
95
- result = writer.execute()
96
- # result = {'rows_written': 2, 'file_path': 'output.csv'}
84
+ Example:
85
+ >>> # Write customer data
86
+ >>> writer = CSVWriterNode(
87
+ ... file_path='output.csv',
88
+ ... data=[
89
+ ... {'id': 1, 'name': 'John', 'age': 30},
90
+ ... {'id': 2, 'name': 'Jane', 'age': 25}
91
+ ... ],
92
+ ... delimiter=','
93
+ ... )
94
+ >>> result = writer.execute()
95
+ >>> # result = {'rows_written': 2, 'file_path': 'output.csv'}
97
96
  """
98
97
 
99
98
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -218,7 +217,7 @@ class CSVWriter(Node):
218
217
 
219
218
 
220
219
  @register_node()
221
- class JSONWriter(Node):
220
+ class JSONWriterNode(Node):
222
221
  """Writes data to a JSON file.
223
222
 
224
223
  This node handles JSON serialization with support for complex
@@ -262,20 +261,19 @@ class JSONWriter(Node):
262
261
  - OSError: Path or disk issues
263
262
  - JSONEncodeError: Encoding problems
264
263
 
265
- Example::
266
-
267
- # Write API response
268
- writer = JSONWriter(
269
- file_path='response.json',
270
- data={
271
- 'status': 'success',
272
- 'results': [1, 2, 3],
273
- 'metadata': {'version': '1.0'}
274
- },
275
- indent=2
276
- )
277
- result = writer.execute()
278
- # result = {'file_path': 'response.json'}
264
+ Example:
265
+ >>> # Write API response
266
+ >>> writer = JSONWriterNode(
267
+ ... file_path='response.json',
268
+ ... data={
269
+ ... 'status': 'success',
270
+ ... 'results': [1, 2, 3],
271
+ ... 'metadata': {'version': '1.0'}
272
+ ... },
273
+ ... indent=2
274
+ ... )
275
+ >>> result = writer.execute()
276
+ >>> # result = {'file_path': 'response.json'}
279
277
  """
280
278
 
281
279
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -370,7 +368,7 @@ class JSONWriter(Node):
370
368
 
371
369
 
372
370
  @register_node()
373
- class TextWriter(Node):
371
+ class TextWriterNode(Node):
374
372
  """Writes text to a file.
375
373
 
376
374
  This node provides flexible text file writing with support for
@@ -414,17 +412,16 @@ class TextWriter(Node):
414
412
  - UnicodeEncodeError: Encoding mismatch
415
413
  - MemoryError: Text too large
416
414
 
417
- Example::
418
-
419
- # Append to log file
420
- writer = TextWriter(
421
- file_path='app.log',
422
- text='ERROR: Connection failed\\n',
423
- encoding='utf-8',
424
- append=True
425
- )
426
- result = writer.execute()
427
- # result = {'file_path': 'app.log', 'bytes_written': 25}
415
+ Example:
416
+ >>> # Append to log file
417
+ >>> writer = TextWriterNode(
418
+ ... file_path='app.log',
419
+ ... text='ERROR: Connection failed\\n',
420
+ ... encoding='utf-8',
421
+ ... append=True
422
+ ... )
423
+ >>> result = writer.execute()
424
+ >>> # result = {'file_path': 'app.log', 'bytes_written': 25}
428
425
  """
429
426
 
430
427
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -1,7 +1,13 @@
1
1
  """Logic operation nodes for the Kailash SDK."""
2
2
 
3
- from kailash.nodes.logic.async_operations import AsyncMerge, AsyncSwitch
4
- from kailash.nodes.logic.operations import Merge, Switch
3
+ from kailash.nodes.logic.async_operations import AsyncMergeNode, AsyncSwitchNode
4
+ from kailash.nodes.logic.operations import MergeNode, SwitchNode
5
5
  from kailash.nodes.logic.workflow import WorkflowNode
6
6
 
7
- __all__ = ["Switch", "Merge", "AsyncSwitch", "AsyncMerge", "WorkflowNode"]
7
+ __all__ = [
8
+ "SwitchNode",
9
+ "MergeNode",
10
+ "AsyncSwitchNode",
11
+ "AsyncMergeNode",
12
+ "WorkflowNode",
13
+ ]