kailash 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/api/__init__.py +17 -0
  3. kailash/api/gateway.py +394 -0
  4. kailash/api/mcp_integration.py +478 -0
  5. kailash/api/workflow_api.py +399 -0
  6. kailash/nodes/ai/__init__.py +4 -4
  7. kailash/nodes/ai/agents.py +4 -4
  8. kailash/nodes/ai/ai_providers.py +18 -22
  9. kailash/nodes/ai/embedding_generator.py +34 -38
  10. kailash/nodes/ai/llm_agent.py +351 -356
  11. kailash/nodes/api/http.py +0 -4
  12. kailash/nodes/api/rest.py +1 -1
  13. kailash/nodes/base.py +60 -64
  14. kailash/nodes/code/python.py +61 -42
  15. kailash/nodes/data/__init__.py +10 -10
  16. kailash/nodes/data/readers.py +27 -29
  17. kailash/nodes/data/retrieval.py +1 -1
  18. kailash/nodes/data/sharepoint_graph.py +23 -25
  19. kailash/nodes/data/sql.py +27 -29
  20. kailash/nodes/data/vector_db.py +2 -2
  21. kailash/nodes/data/writers.py +41 -44
  22. kailash/nodes/logic/__init__.py +10 -3
  23. kailash/nodes/logic/async_operations.py +14 -14
  24. kailash/nodes/logic/operations.py +18 -22
  25. kailash/nodes/logic/workflow.py +439 -0
  26. kailash/nodes/mcp/client.py +29 -33
  27. kailash/nodes/mcp/resource.py +1 -1
  28. kailash/nodes/mcp/server.py +10 -4
  29. kailash/nodes/transform/formatters.py +1 -1
  30. kailash/nodes/transform/processors.py +5 -3
  31. kailash/runtime/docker.py +2 -0
  32. kailash/tracking/metrics_collector.py +6 -7
  33. kailash/tracking/models.py +0 -20
  34. kailash/tracking/storage/database.py +4 -4
  35. kailash/tracking/storage/filesystem.py +0 -1
  36. kailash/utils/export.py +2 -2
  37. kailash/utils/templates.py +16 -16
  38. kailash/visualization/performance.py +7 -7
  39. kailash/visualization/reports.py +1 -1
  40. kailash/workflow/graph.py +4 -4
  41. kailash/workflow/mock_registry.py +1 -1
  42. {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/METADATA +198 -27
  43. kailash-0.1.4.dist-info/RECORD +85 -0
  44. kailash-0.1.2.dist-info/RECORD +0 -80
  45. {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/WHEEL +0 -0
  46. {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/entry_points.txt +0 -0
  47. {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/licenses/LICENSE +0 -0
  48. {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/top_level.txt +0 -0
kailash/nodes/api/http.py CHANGED
@@ -428,7 +428,6 @@ class HTTPRequestNode(Node):
428
428
  self.logger.info(f"Making {method} request to {url}")
429
429
 
430
430
  response = None
431
- last_error = None
432
431
 
433
432
  for attempt in range(retry_count + 1):
434
433
  if attempt > 0:
@@ -453,7 +452,6 @@ class HTTPRequestNode(Node):
453
452
  break
454
453
 
455
454
  except requests.RequestException as e:
456
- last_error = e
457
455
  self.logger.warning(f"Request failed: {str(e)}")
458
456
 
459
457
  # Last attempt, no more retries
@@ -779,7 +777,6 @@ class AsyncHTTPRequestNode(AsyncNode):
779
777
  self.logger.info(f"Making async {method} request to {url}")
780
778
 
781
779
  response = None
782
- last_error = None
783
780
 
784
781
  for attempt in range(retry_count + 1):
785
782
  if attempt > 0:
@@ -860,7 +857,6 @@ class AsyncHTTPRequestNode(AsyncNode):
860
857
  return result
861
858
 
862
859
  except (aiohttp.ClientError, asyncio.TimeoutError) as e:
863
- last_error = e
864
860
  self.logger.warning(f"Async request failed: {str(e)}")
865
861
 
866
862
  # Last attempt, no more retries
kailash/nodes/api/rest.py CHANGED
@@ -335,7 +335,7 @@ class RESTClientNode(Node):
335
335
 
336
336
  pagination_type = pagination_params.get("type", "page")
337
337
  items_path = pagination_params.get("items_path", "data")
338
- max_pages = pagination_params.get("max_pages", 10)
338
+ # max_pages = pagination_params.get("max_pages", 10) # TODO: Implement max pages limit
339
339
 
340
340
  # Extract items from initial response
341
341
  all_items = self._get_nested_value(initial_response, items_path, [])
kailash/nodes/base.py CHANGED
@@ -214,24 +214,23 @@ class Node(ABC):
214
214
  - During workflow creation: Used for connection validation
215
215
  - During export: Included in workflow manifests
216
216
 
217
- Example::
218
-
219
- def get_parameters(self):
220
- return {
221
- 'input_file': NodeParameter(
222
- name='input_file',
223
- type=str,
224
- required=True,
225
- description='Path to input CSV file'
226
- ),
227
- 'delimiter': NodeParameter(
228
- name='delimiter',
229
- type=str,
230
- required=False,
231
- default=',',
232
- description='CSV delimiter character'
233
- )
234
- }
217
+ Example:
218
+ >>> def get_parameters(self):
219
+ ... return {
220
+ ... 'input_file': NodeParameter(
221
+ ... name='input_file',
222
+ ... type=str,
223
+ ... required=True,
224
+ ... description='Path to input CSV file'
225
+ ... ),
226
+ ... 'delimiter': NodeParameter(
227
+ ... name='delimiter',
228
+ ... type=str,
229
+ ... required=False,
230
+ ... default=',',
231
+ ... description='CSV delimiter character'
232
+ ... )
233
+ ... }
235
234
 
236
235
  Returns:
237
236
  Dictionary mapping parameter names to their definitions
@@ -265,29 +264,28 @@ class Node(ABC):
265
264
  3. Workflow connection validation
266
265
  4. Export manifest generation
267
266
 
268
- Example::
269
-
270
- def get_output_schema(self):
271
- return {
272
- 'dataframe': NodeParameter(
273
- name='dataframe',
274
- type=dict,
275
- required=True,
276
- description='Processed data as dictionary'
277
- ),
278
- 'row_count': NodeParameter(
279
- name='row_count',
280
- type=int,
281
- required=True,
282
- description='Number of rows processed'
283
- ),
284
- 'processing_time': NodeParameter(
285
- name='processing_time',
286
- type=float,
287
- required=False,
288
- description='Time taken to process in seconds'
289
- )
290
- }
267
+ Example:
268
+ >>> def get_output_schema(self):
269
+ ... return {
270
+ ... 'dataframe': NodeParameter(
271
+ ... name='dataframe',
272
+ ... type=dict,
273
+ ... required=True,
274
+ ... description='Processed data as dictionary'
275
+ ... ),
276
+ ... 'row_count': NodeParameter(
277
+ ... name='row_count',
278
+ ... type=int,
279
+ ... required=True,
280
+ ... description='Number of rows processed'
281
+ ... ),
282
+ ... 'processing_time': NodeParameter(
283
+ ... name='processing_time',
284
+ ... type=float,
285
+ ... required=False,
286
+ ... description='Time taken to process in seconds'
287
+ ... )
288
+ ... }
291
289
 
292
290
  Returns:
293
291
  Dictionary mapping output names to their parameter definitions
@@ -325,15 +323,14 @@ class Node(ABC):
325
323
  - Error wrapping and logging
326
324
  - Execution timing and metrics
327
325
 
328
- Example::
329
-
330
- def run(self, input_file, delimiter=','):
331
- df = pd.read_csv(input_file, delimiter=delimiter)
332
- return {
333
- 'dataframe': df.to_dict(),
334
- 'row_count': len(df),
335
- 'columns': list(df.columns)
336
- }
326
+ Example:
327
+ >>> def run(self, input_file, delimiter=','):
328
+ ... df = pd.read_csv(input_file, delimiter=delimiter)
329
+ ... return {
330
+ ... 'dataframe': df.to_dict(),
331
+ ... 'row_count': len(df),
332
+ ... 'columns': list(df.columns)
333
+ ... }
337
334
 
338
335
  Args:
339
336
  **kwargs: Validated input parameters matching get_parameters()
@@ -1010,11 +1007,11 @@ class NodeRegistry:
1010
1007
  - Logs the clearing action
1011
1008
  - Existing node instances remain valid
1012
1009
 
1013
- Warning::
1014
-
1015
- - Subsequent get() calls will fail
1016
- - Workflows may not deserialize
1017
- - Should re-register needed nodes
1010
+ Warning:
1011
+ >>> # Warning: This affects all future operations
1012
+ >>> # - Subsequent get() calls will fail
1013
+ >>> # - Workflows may not deserialize
1014
+ >>> # - Should re-register needed nodes
1018
1015
  """
1019
1016
  cls._nodes.clear()
1020
1017
  logging.info("Cleared all registered nodes")
@@ -1057,15 +1054,14 @@ def register_node(alias: Optional[str] = None):
1057
1054
  - Returns the unmodified class
1058
1055
  - Handles registration errors
1059
1056
 
1060
- Example::
1061
-
1062
- @register_node(alias='CSV')
1063
- class CSVReaderNode(Node):
1064
- def get_parameters(self):
1065
- return {'file': NodeParameter(...)}
1066
-
1067
- def run(self, file):
1068
- return pd.read_csv(file)
1057
+ Example:
1058
+ >>> @register_node(alias='CSV')
1059
+ ... class CSVReaderNode(Node):
1060
+ ... def get_parameters(self):
1061
+ ... return {'file': NodeParameter(...)}
1062
+ ...
1063
+ ... def run(self, file):
1064
+ ... return pd.read_csv(file)
1069
1065
  """
1070
1066
 
1071
1067
  def decorator(node_class: Type[Node]):
@@ -144,6 +144,7 @@ class CodeExecutor:
144
144
  "filter",
145
145
  "float",
146
146
  "int",
147
+ "isinstance", # Common type checking
147
148
  "len",
148
149
  "list",
149
150
  "map",
@@ -546,48 +547,47 @@ class PythonCodeNode(Node):
546
547
  - State management for class-based nodes
547
548
  - AST-based security validation
548
549
 
549
- Example::
550
-
551
- # Function-based node
552
- def custom_filter(data: pd.DataFrame, threshold: float) -> pd.DataFrame:
553
- return data[data['value'] > threshold]
554
-
555
- node = PythonCodeNode.from_function(
556
- func=custom_filter,
557
- name="threshold_filter"
558
- )
559
-
560
- # Class-based stateful node
561
- class MovingAverage:
562
- def __init__(self, window_size: int = 3):
563
- self.window_size = window_size
564
- self.values = []
565
-
566
- def process(self, value: float) -> float:
567
- self.values.append(value)
568
- if len(self.values) > self.window_size:
569
- self.values.pop(0)
570
- return sum(self.values) / len(self.values)
571
-
572
- node = PythonCodeNode.from_class(
573
- cls=MovingAverage,
574
- name="moving_avg"
575
- )
576
-
577
- # Code string node
578
- code = '''
579
- result = []
580
- for item in data:
581
- if item > threshold:
582
- result.append(item * 2)
583
- '''
584
-
585
- node = PythonCodeNode(
586
- name="custom_processor",
587
- code=code,
588
- input_types={'data': list, 'threshold': float},
589
- output_type=list
590
- )
550
+ Example:
551
+ >>> # Function-based node
552
+ >>> def custom_filter(data: pd.DataFrame, threshold: float) -> pd.DataFrame:
553
+ ... return data[data['value'] > threshold]
554
+
555
+ >>> node = PythonCodeNode.from_function(
556
+ ... func=custom_filter,
557
+ ... name="threshold_filter"
558
+ ... )
559
+
560
+ >>> # Class-based stateful node
561
+ >>> class MovingAverage:
562
+ ... def __init__(self, window_size: int = 3):
563
+ ... self.window_size = window_size
564
+ ... self.values = []
565
+ ...
566
+ ... def process(self, value: float) -> float:
567
+ ... self.values.append(value)
568
+ ... if len(self.values) > self.window_size:
569
+ ... self.values.pop(0)
570
+ ... return sum(self.values) / len(self.values)
571
+
572
+ >>> node = PythonCodeNode.from_class(
573
+ ... cls=MovingAverage,
574
+ ... name="moving_avg"
575
+ ... )
576
+
577
+ >>> # Code string node
578
+ >>> code = '''
579
+ ... result = []
580
+ ... for item in data:
581
+ ... if item > threshold:
582
+ ... result.append(item * 2)
583
+ ... '''
584
+
585
+ >>> node = PythonCodeNode(
586
+ ... name="custom_processor",
587
+ ... code=code,
588
+ ... input_types={'data': list, 'threshold': float},
589
+ ... output_type=list
590
+ ... )
591
591
  """
592
592
 
593
593
  def __init__(
@@ -727,6 +727,25 @@ class PythonCodeNode(Node):
727
727
 
728
728
  return parameters
729
729
 
730
+ def validate_inputs(self, **kwargs) -> Dict[str, Any]:
731
+ """Validate runtime inputs.
732
+
733
+ For code-based nodes, we accept any inputs since the code
734
+ can use whatever variables it needs.
735
+
736
+ Args:
737
+ **kwargs: Runtime inputs
738
+
739
+ Returns:
740
+ All inputs as-is for code nodes, validated inputs for function/class nodes
741
+ """
742
+ # If using code string, pass through all inputs
743
+ if self.code:
744
+ return kwargs
745
+
746
+ # Otherwise use standard validation for function/class nodes
747
+ return super().validate_inputs(**kwargs)
748
+
730
749
  def get_output_schema(self) -> Dict[str, "NodeParameter"]:
731
750
  """Define output parameters for this node.
732
751
 
@@ -57,9 +57,9 @@ All nodes provide detailed error messages for:
57
57
  Example Workflows:
58
58
  # Traditional ETL
59
59
  workflow = Workflow()
60
- workflow.add_node('read', CSVReader(file_path='input.csv'))
60
+ workflow.add_node('read', CSVReaderNode(file_path='input.csv'))
61
61
  workflow.add_node('transform', DataTransform())
62
- workflow.add_node('write', JSONWriter(file_path='output.json'))
62
+ workflow.add_node('write', JSONWriterNode(file_path='output.json'))
63
63
  workflow.connect('read', 'transform')
64
64
  workflow.connect('transform', 'write')
65
65
 
@@ -80,7 +80,7 @@ Example Workflows:
80
80
  workflow.connect('process', 'publish')
81
81
  """
82
82
 
83
- from kailash.nodes.data.readers import CSVReader, JSONReader, TextReader
83
+ from kailash.nodes.data.readers import CSVReaderNode, JSONReaderNode, TextReaderNode
84
84
  from kailash.nodes.data.retrieval import RelevanceScorerNode
85
85
  from kailash.nodes.data.sharepoint_graph import (
86
86
  SharePointGraphReader,
@@ -99,18 +99,18 @@ from kailash.nodes.data.vector_db import (
99
99
  TextSplitterNode,
100
100
  VectorDatabaseNode,
101
101
  )
102
- from kailash.nodes.data.writers import CSVWriter, JSONWriter, TextWriter
102
+ from kailash.nodes.data.writers import CSVWriterNode, JSONWriterNode, TextWriterNode
103
103
 
104
104
  __all__ = [
105
105
  # Readers
106
- "CSVReader",
107
- "JSONReader",
108
- "TextReader",
106
+ "CSVReaderNode",
107
+ "JSONReaderNode",
108
+ "TextReaderNode",
109
109
  "SharePointGraphReader",
110
110
  # Writers
111
- "CSVWriter",
112
- "JSONWriter",
113
- "TextWriter",
111
+ "CSVWriterNode",
112
+ "JSONWriterNode",
113
+ "TextWriterNode",
114
114
  "SharePointGraphWriter",
115
115
  # Sources
116
116
  "DocumentSourceNode",
@@ -12,9 +12,9 @@ Design Philosophy:
12
12
  5. Type-safe parameter validation
13
13
 
14
14
  Node Categories:
15
- - CSVReader: Tabular data from CSV files
16
- - JSONReader: Structured data from JSON files
17
- - TextReader: Raw text from any text file
15
+ - CSVReaderNode: Tabular data from CSV files
16
+ - JSONReaderNode: Structured data from JSON files
17
+ - TextReaderNode: Raw text from any text file
18
18
 
19
19
  Upstream Components:
20
20
  - FileSystem: Provides files to read
@@ -36,7 +36,7 @@ from kailash.nodes.base import Node, NodeParameter, register_node
36
36
 
37
37
 
38
38
  @register_node()
39
- class CSVReader(Node):
39
+ class CSVReaderNode(Node):
40
40
  """Reads data from a CSV file.
41
41
 
42
42
  This node provides robust CSV file reading capabilities with support for
@@ -78,19 +78,18 @@ class CSVReader(Node):
78
78
  - UnicodeDecodeError: Encoding mismatch
79
79
  - csv.Error: Malformed CSV data
80
80
 
81
- Example::
82
-
83
- # Read customer data with headers
84
- reader = CSVReader(
85
- file_path='customers.csv',
86
- headers=True,
87
- delimiter=','
88
- )
89
- result = reader.execute()
90
- # result['data'] = [
91
- # {'id': '1', 'name': 'John', 'age': '30'},
92
- # {'id': '2', 'name': 'Jane', 'age': '25'}
93
- # ]
81
+ Example:
82
+ >>> # Read customer data with headers
83
+ >>> reader = CSVReaderNode(
84
+ ... file_path='customers.csv',
85
+ ... headers=True,
86
+ ... delimiter=','
87
+ ... )
88
+ >>> result = reader.execute()
89
+ >>> # result['data'] = [
90
+ >>> # {'id': '1', 'name': 'John', 'age': '30'},
91
+ >>> # {'id': '2', 'name': 'Jane', 'age': '25'}
92
+ >>> # ]
94
93
  """
95
94
 
96
95
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -235,7 +234,7 @@ class CSVReader(Node):
235
234
 
236
235
 
237
236
  @register_node()
238
- class JSONReader(Node):
237
+ class JSONReaderNode(Node):
239
238
  """Reads data from a JSON file.
240
239
 
241
240
  This node handles JSON file reading with support for complex nested
@@ -280,7 +279,7 @@ class JSONReader(Node):
280
279
 
281
280
  Example:
282
281
  # Read API response data
283
- reader = JSONReader(file_path='api_response.json')
282
+ reader = JSONReaderNode(file_path='api_response.json')
284
283
  result = reader.execute()
285
284
  # result['data'] = {
286
285
  # 'status': 'success',
@@ -359,7 +358,7 @@ class JSONReader(Node):
359
358
 
360
359
 
361
360
  @register_node()
362
- class TextReader(Node):
361
+ class TextReaderNode(Node):
363
362
  """Reads text from a file.
364
363
 
365
364
  This node provides simple text file reading with encoding support.
@@ -403,15 +402,14 @@ class TextReader(Node):
403
402
  - UnicodeDecodeError: Wrong encoding
404
403
  - MemoryError: File too large
405
404
 
406
- Example::
407
-
408
- # Read a log file
409
- reader = TextReader(
410
- file_path='application.log',
411
- encoding='utf-8'
412
- )
413
- result = reader.execute()
414
- # result['text'] = "2024-01-01 INFO: Application started\\n..."
405
+ Example:
406
+ >>> # Read a log file
407
+ >>> reader = TextReaderNode(
408
+ ... file_path='application.log',
409
+ ... encoding='utf-8'
410
+ ... )
411
+ >>> result = reader.execute()
412
+ >>> # result['text'] = "2024-01-01 INFO: Application started\\n..."
415
413
  """
416
414
 
417
415
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -102,7 +102,7 @@ class RelevanceScorerNode(Node):
102
102
  ) -> List[Dict]:
103
103
  """Score chunks using cosine similarity."""
104
104
  # Extract actual embedding vectors from the embedding objects
105
- # EmbeddingGenerator returns embeddings in format: {"embedding": [...], "text": "...", "dimensions": X}
105
+ # EmbeddingGeneratorNode returns embeddings in format: {"embedding": [...], "text": "...", "dimensions": X}
106
106
 
107
107
  # Handle query embedding - should be the first (and only) embedding in the list
108
108
  query_embedding_obj = query_embeddings[0] if query_embeddings else {}
@@ -56,18 +56,17 @@ class SharePointGraphReader(Node):
56
56
  3. Search for files by name
57
57
  4. Navigate folder structures
58
58
 
59
- Example::
60
-
61
- reader = SharePointGraphReader()
62
- result = reader.execute(
63
- tenant_id="your-tenant-id",
64
- client_id="your-client-id",
65
- client_secret="your-secret",
66
- site_url="https://company.sharepoint.com/sites/project",
67
- operation="list_files",
68
- library_name="Documents",
69
- folder_path="Reports/2024"
70
- )
59
+ Example:
60
+ >>> reader = SharePointGraphReader()
61
+ >>> result = reader.execute(
62
+ ... tenant_id="your-tenant-id",
63
+ ... client_id="your-client-id",
64
+ ... client_secret="your-secret",
65
+ ... site_url="https://company.sharepoint.com/sites/project",
66
+ ... operation="list_files",
67
+ ... library_name="Documents",
68
+ ... folder_path="Reports/2024"
69
+ ... )
71
70
  """
72
71
 
73
72
  def get_metadata(self) -> NodeMetadata:
@@ -471,19 +470,18 @@ class SharePointGraphWriter(Node):
471
470
  This node handles file uploads to SharePoint document libraries,
472
471
  supporting folder structures and metadata.
473
472
 
474
- Example::
475
-
476
- writer = SharePointGraphWriter()
477
- result = writer.execute(
478
- tenant_id="your-tenant-id",
479
- client_id="your-client-id",
480
- client_secret="your-secret",
481
- site_url="https://company.sharepoint.com/sites/project",
482
- local_path="report.pdf",
483
- library_name="Documents",
484
- folder_path="Reports/2024",
485
- sharepoint_name="Q4_Report_2024.pdf"
486
- )
473
+ Example:
474
+ >>> writer = SharePointGraphWriter()
475
+ >>> result = writer.execute(
476
+ ... tenant_id="your-tenant-id",
477
+ ... client_id="your-client-id",
478
+ ... client_secret="your-secret",
479
+ ... site_url="https://company.sharepoint.com/sites/project",
480
+ ... local_path="report.pdf",
481
+ ... library_name="Documents",
482
+ ... folder_path="Reports/2024",
483
+ ... sharepoint_name="Q4_Report_2024.pdf"
484
+ ... )
487
485
  """
488
486
 
489
487
  def get_metadata(self) -> NodeMetadata:
kailash/nodes/data/sql.py CHANGED
@@ -63,20 +63,19 @@ class SQLDatabaseNode(Node):
63
63
  - TimeoutError: Query execution timeout
64
64
  - PermissionError: Access denied
65
65
 
66
- Example::
67
-
68
- # Query customer data
69
- sql_node = SQLDatabaseNode(
70
- connection_string='postgresql://user:pass@host/db',
71
- query='SELECT * FROM customers WHERE active = ?',
72
- parameters=[True],
73
- result_format='dict'
74
- )
75
- result = sql_node.execute()
76
- # result['data'] = [
77
- # {'id': 1, 'name': 'John', 'active': True},
78
- # {'id': 2, 'name': 'Jane', 'active': True}
79
- # ]
66
+ Example:
67
+ >>> # Query customer data
68
+ >>> sql_node = SQLDatabaseNode(
69
+ ... connection_string='postgresql://user:pass@host/db',
70
+ ... query='SELECT * FROM customers WHERE active = ?',
71
+ ... parameters=[True],
72
+ ... result_format='dict'
73
+ ... )
74
+ >>> result = sql_node.execute()
75
+ >>> # result['data'] = [
76
+ >>> # {'id': 1, 'name': 'John', 'active': True},
77
+ >>> # {'id': 2, 'name': 'Jane', 'active': True}
78
+ >>> # ]
80
79
  """
81
80
 
82
81
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -192,10 +191,10 @@ class SQLDatabaseNode(Node):
192
191
  """
193
192
  connection_string = kwargs["connection_string"]
194
193
  query = kwargs["query"]
195
- parameters = kwargs.get("parameters", [])
194
+ # parameters = kwargs.get("parameters", []) # TODO: Implement parameterized queries
196
195
  result_format = kwargs.get("result_format", "dict")
197
- timeout = kwargs.get("timeout", 30)
198
- transaction_mode = kwargs.get("transaction_mode", "auto")
196
+ # timeout = kwargs.get("timeout", 30) # TODO: Implement query timeout
197
+ # transaction_mode = kwargs.get("transaction_mode", "auto") # TODO: Implement transaction handling
199
198
 
200
199
  # This is a placeholder implementation
201
200
  # In a real implementation, you would:
@@ -259,18 +258,17 @@ class SQLQueryBuilderNode(Node):
259
258
  3. Multi-table joins
260
259
  4. Aggregation queries
261
260
 
262
- Example::
263
-
264
- builder = SQLQueryBuilderNode(
265
- table='customers',
266
- select=['name', 'email'],
267
- where={'active': True, 'country': 'USA'},
268
- order_by=['name'],
269
- limit=100
270
- )
271
- result = builder.execute()
272
- # result['query'] = 'SELECT name, email FROM customers WHERE active = ? AND country = ? ORDER BY name LIMIT 100'
273
- # result['parameters'] = [True, 'USA']
261
+ Example:
262
+ >>> builder = SQLQueryBuilderNode(
263
+ ... table='customers',
264
+ ... select=['name', 'email'],
265
+ ... where={'active': True, 'country': 'USA'},
266
+ ... order_by=['name'],
267
+ ... limit=100
268
+ ... )
269
+ >>> result = builder.execute()
270
+ >>> # result['query'] = 'SELECT name, email FROM customers WHERE active = ? AND country = ? ORDER BY name LIMIT 100'
271
+ >>> # result['parameters'] = [True, 'USA']
274
272
  """
275
273
 
276
274
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -584,7 +584,7 @@ class VectorDatabaseNode(Node):
584
584
  """
585
585
  vectors = inputs.get("vectors", [])
586
586
  ids = inputs.get("ids", [])
587
- metadata = inputs.get("metadata", [])
587
+ # metadata = inputs.get("metadata", []) # TODO: Implement metadata storage
588
588
 
589
589
  if not vectors or not ids:
590
590
  raise ValueError("Vectors and IDs are required for upsert")
@@ -611,7 +611,7 @@ class VectorDatabaseNode(Node):
611
611
  """
612
612
  query_vector = inputs.get("query_vector")
613
613
  k = inputs.get("k", 10)
614
- filter_dict = inputs.get("filter", {})
614
+ # filter_dict = inputs.get("filter", {}) # TODO: Implement filter-based queries
615
615
 
616
616
  if not query_vector:
617
617
  raise ValueError("Query vector is required")