kailash 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,9 +12,9 @@ Design Philosophy:
12
12
  5. Progress tracking and feedback
13
13
 
14
14
  Node Categories:
15
- - CSVWriter: Tabular data to CSV files
16
- - JSONWriter: Structured data to JSON files
17
- - TextWriter: Raw text to any text file
15
+ - CSVWriterNode: Tabular data to CSV files
16
+ - JSONWriterNode: Structured data to JSON files
17
+ - TextWriterNode: Raw text to any text file
18
18
 
19
19
  Upstream Components:
20
20
  - Reader nodes: Provide data to transform
@@ -37,7 +37,7 @@ from kailash.nodes.base import Node, NodeParameter, register_node
37
37
 
38
38
 
39
39
  @register_node()
40
- class CSVWriter(Node):
40
+ class CSVWriterNode(Node):
41
41
  """Writes data to a CSV file.
42
42
 
43
43
  This node handles CSV file writing with support for both dictionary
@@ -81,19 +81,18 @@ class CSVWriter(Node):
81
81
  - TypeError: Invalid data structure
82
82
  - UnicodeEncodeError: Encoding issues
83
83
 
84
- Example::
85
-
86
- # Write customer data
87
- writer = CSVWriter(
88
- file_path='output.csv',
89
- data=[
90
- {'id': 1, 'name': 'John', 'age': 30},
91
- {'id': 2, 'name': 'Jane', 'age': 25}
92
- ],
93
- delimiter=','
94
- )
95
- result = writer.execute()
96
- # result = {'rows_written': 2, 'file_path': 'output.csv'}
84
+ Example:
85
+ >>> # Write customer data
86
+ >>> writer = CSVWriterNode(
87
+ ... file_path='output.csv',
88
+ ... data=[
89
+ ... {'id': 1, 'name': 'John', 'age': 30},
90
+ ... {'id': 2, 'name': 'Jane', 'age': 25}
91
+ ... ],
92
+ ... delimiter=','
93
+ ... )
94
+ >>> result = writer.execute()
95
+ >>> # result = {'rows_written': 2, 'file_path': 'output.csv'}
97
96
  """
98
97
 
99
98
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -218,7 +217,7 @@ class CSVWriter(Node):
218
217
 
219
218
 
220
219
  @register_node()
221
- class JSONWriter(Node):
220
+ class JSONWriterNode(Node):
222
221
  """Writes data to a JSON file.
223
222
 
224
223
  This node handles JSON serialization with support for complex
@@ -262,20 +261,19 @@ class JSONWriter(Node):
262
261
  - OSError: Path or disk issues
263
262
  - JSONEncodeError: Encoding problems
264
263
 
265
- Example::
266
-
267
- # Write API response
268
- writer = JSONWriter(
269
- file_path='response.json',
270
- data={
271
- 'status': 'success',
272
- 'results': [1, 2, 3],
273
- 'metadata': {'version': '1.0'}
274
- },
275
- indent=2
276
- )
277
- result = writer.execute()
278
- # result = {'file_path': 'response.json'}
264
+ Example:
265
+ >>> # Write API response
266
+ >>> writer = JSONWriterNode(
267
+ ... file_path='response.json',
268
+ ... data={
269
+ ... 'status': 'success',
270
+ ... 'results': [1, 2, 3],
271
+ ... 'metadata': {'version': '1.0'}
272
+ ... },
273
+ ... indent=2
274
+ ... )
275
+ >>> result = writer.execute()
276
+ >>> # result = {'file_path': 'response.json'}
279
277
  """
280
278
 
281
279
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -370,7 +368,7 @@ class JSONWriter(Node):
370
368
 
371
369
 
372
370
  @register_node()
373
- class TextWriter(Node):
371
+ class TextWriterNode(Node):
374
372
  """Writes text to a file.
375
373
 
376
374
  This node provides flexible text file writing with support for
@@ -414,17 +412,16 @@ class TextWriter(Node):
414
412
  - UnicodeEncodeError: Encoding mismatch
415
413
  - MemoryError: Text too large
416
414
 
417
- Example::
418
-
419
- # Append to log file
420
- writer = TextWriter(
421
- file_path='app.log',
422
- text='ERROR: Connection failed\\n',
423
- encoding='utf-8',
424
- append=True
425
- )
426
- result = writer.execute()
427
- # result = {'file_path': 'app.log', 'bytes_written': 25}
415
+ Example:
416
+ >>> # Append to log file
417
+ >>> writer = TextWriterNode(
418
+ ... file_path='app.log',
419
+ ... text='ERROR: Connection failed\\n',
420
+ ... encoding='utf-8',
421
+ ... append=True
422
+ ... )
423
+ >>> result = writer.execute()
424
+ >>> # result = {'file_path': 'app.log', 'bytes_written': 25}
428
425
  """
429
426
 
430
427
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -1,7 +1,13 @@
1
1
  """Logic operation nodes for the Kailash SDK."""
2
2
 
3
- from kailash.nodes.logic.async_operations import AsyncMerge, AsyncSwitch
4
- from kailash.nodes.logic.operations import Merge, Switch
3
+ from kailash.nodes.logic.async_operations import AsyncMergeNode, AsyncSwitchNode
4
+ from kailash.nodes.logic.operations import MergeNode, SwitchNode
5
5
  from kailash.nodes.logic.workflow import WorkflowNode
6
6
 
7
- __all__ = ["Switch", "Merge", "AsyncSwitch", "AsyncMerge", "WorkflowNode"]
7
+ __all__ = [
8
+ "SwitchNode",
9
+ "MergeNode",
10
+ "AsyncSwitchNode",
11
+ "AsyncMergeNode",
12
+ "WorkflowNode",
13
+ ]
@@ -13,14 +13,14 @@ from kailash.nodes.base_async import AsyncNode
13
13
 
14
14
 
15
15
  @register_node()
16
- class AsyncMerge(AsyncNode):
16
+ class AsyncMergeNode(AsyncNode):
17
17
  """Asynchronously merges multiple data sources.
18
18
 
19
19
  Note: We implement run() to fulfill the Node abstract base class requirement,
20
20
  but it's just a pass-through to async_run().
21
21
 
22
22
 
23
- This node extends the standard Merge node with asynchronous execution capabilities,
23
+ This node extends the standard MergeNode with asynchronous execution capabilities,
24
24
  making it more efficient for:
25
25
 
26
26
  1. Combining large datasets from parallel branches
@@ -28,13 +28,13 @@ class AsyncMerge(AsyncNode):
28
28
  3. Processing streaming data in chunks
29
29
  4. Aggregating results from various API calls
30
30
 
31
- The merge operation supports the same types as the standard Merge node:
31
+ The merge operation supports the same types as the standard MergeNode:
32
32
  concat (list concatenation), zip (parallel iteration), and merge_dict
33
33
  (dictionary merging with optional key-based joining).
34
34
 
35
35
  Usage example:
36
- # Create an AsyncMerge node in a workflow
37
- async_merge = AsyncMerge(merge_type="merge_dict", key="id")
36
+ # Create an AsyncMergeNode in a workflow
37
+ async_merge = AsyncMergeNode(merge_type="merge_dict", key="id")
38
38
  workflow.add_node("data_combine", async_merge)
39
39
 
40
40
  # Connect multiple data sources
@@ -44,7 +44,7 @@ class AsyncMerge(AsyncNode):
44
44
  """
45
45
 
46
46
  def get_parameters(self) -> Dict[str, NodeParameter]:
47
- """Define parameters for the AsyncMerge node."""
47
+ """Define parameters for the AsyncMergeNode."""
48
48
  # Reuse parameters from SyncMerge
49
49
  return {
50
50
  "data1": NodeParameter(
@@ -107,7 +107,7 @@ class AsyncMerge(AsyncNode):
107
107
  }
108
108
 
109
109
  def get_output_schema(self) -> Dict[str, NodeParameter]:
110
- """Define the output schema for AsyncMerge."""
110
+ """Define the output schema for AsyncMergeNode."""
111
111
  return {
112
112
  "merged_data": NodeParameter(
113
113
  name="merged_data",
@@ -155,7 +155,7 @@ class AsyncMerge(AsyncNode):
155
155
 
156
156
  # Check if we have at least one valid input
157
157
  if not data_inputs:
158
- self.logger.warning("No valid data inputs provided to AsyncMerge node")
158
+ self.logger.warning("No valid data inputs provided to AsyncMergeNode")
159
159
  return {"merged_data": None}
160
160
 
161
161
  # If only one input was provided, return it directly
@@ -207,7 +207,7 @@ class AsyncMerge(AsyncNode):
207
207
  # This will be properly wrapped by the execute() method
208
208
  # which will call it in a sync context
209
209
  raise RuntimeError(
210
- "AsyncMerge.run() was called directly. Use execute() or execute_async() instead."
210
+ "AsyncMergeNode.run() was called directly. Use execute() or execute_async() instead."
211
211
  )
212
212
 
213
213
  async def _async_concat(self, data_inputs: List[Any], chunk_size: int) -> Any:
@@ -349,25 +349,25 @@ class AsyncMerge(AsyncNode):
349
349
 
350
350
 
351
351
  @register_node()
352
- class AsyncSwitch(AsyncNode):
352
+ class AsyncSwitchNode(AsyncNode):
353
353
  """Asynchronously routes data to different outputs based on conditions.
354
354
 
355
355
  Note: We implement run() to fulfill the Node abstract base class requirement,
356
356
  but it's just a pass-through to async_run().
357
357
 
358
- This node extends the standard Switch node with asynchronous execution capabilities,
358
+ This node extends the standard SwitchNode with asynchronous execution capabilities,
359
359
  making it more efficient for:
360
360
 
361
361
  1. Processing conditional routing with I/O-bound condition evaluation
362
362
  2. Handling large datasets that need to be routed based on complex criteria
363
363
  3. Integrating with other asynchronous nodes in a workflow
364
364
 
365
- The basic functionality is the same as the synchronous Switch node but optimized
365
+ The basic functionality is the same as the synchronous SwitchNode but optimized
366
366
  for asynchronous execution.
367
367
  """
368
368
 
369
369
  def get_parameters(self) -> Dict[str, NodeParameter]:
370
- """Define parameters for the AsyncSwitch node."""
370
+ """Define parameters for the AsyncSwitchNode."""
371
371
  return {
372
372
  "input_data": NodeParameter(
373
373
  name="input_data",
@@ -603,7 +603,7 @@ class AsyncSwitch(AsyncNode):
603
603
  # This will be properly wrapped by the execute() method
604
604
  # which will call it in a sync context
605
605
  raise RuntimeError(
606
- "AsyncSwitch.run() was called directly. Use execute() or execute_async() instead."
606
+ "AsyncSwitchNode.run() was called directly. Use execute() or execute_async() instead."
607
607
  )
608
608
 
609
609
  async def _evaluate_condition(
@@ -11,7 +11,7 @@ from kailash.nodes.base import Node, NodeParameter, register_node
11
11
 
12
12
 
13
13
  @register_node()
14
- class Switch(Node):
14
+ class SwitchNode(Node):
15
15
  """Routes data to different outputs based on conditions.
16
16
 
17
17
  The Switch node enables conditional branching in workflows by evaluating
@@ -23,25 +23,21 @@ class Switch(Node):
23
23
  3. Dynamic workflow paths based on data values
24
24
 
25
25
  The outputs of Switch nodes are typically connected to different processing
26
- nodes, and those branches can be rejoined later using a Merge node.
27
-
28
- Example usage::
29
-
30
- # Simple boolean condition
31
- switch_node = Switch(condition_field="status", operator="==", value="success")
32
- workflow.add_node("router", switch_node)
33
- workflow.connect("router", "success_handler", {"true_output": "input"})
34
- workflow.connect("router", "error_handler", {"false_output": "input"})
35
-
36
- # Multi-case switching
37
- switch_node = Switch(
38
- condition_field="status",
39
- cases=["success", "warning", "error"]
40
- )
41
- workflow.connect("router", "success_handler", {"case_success": "input"})
42
- workflow.connect("router", "warning_handler", {"case_warning": "input"})
43
- workflow.connect("router", "error_handler", {"case_error": "input"})
44
- workflow.connect("router", "default_handler", {"default": "input"})
26
+ nodes, and those branches can be rejoined later using a MergeNode.
27
+
28
+ Example usage:
29
+ >>> # Simple boolean condition
30
+ >>> switch_node = SwitchNode(condition_field="status", operator="==", value="success")
31
+ >>> switch_node.metadata.name
32
+ 'SwitchNode'
33
+
34
+ >>> # Multi-case switching
35
+ >>> switch_node = SwitchNode(
36
+ ... condition_field="status",
37
+ ... cases=["success", "warning", "error"]
38
+ ... )
39
+ >>> 'cases' in switch_node.get_parameters()
40
+ True
45
41
  """
46
42
 
47
43
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -360,7 +356,7 @@ class Switch(Node):
360
356
 
361
357
 
362
358
  @register_node()
363
- class Merge(Node):
359
+ class MergeNode(Node):
364
360
  """Merges multiple data sources.
365
361
 
366
362
  This node can combine data from multiple input sources in various ways,
@@ -368,7 +364,7 @@ class Merge(Node):
368
364
 
369
365
  1. Combining results from parallel branches in a workflow
370
366
  2. Joining related data sets
371
- 3. Combining outputs after conditional branching with the Switch node
367
+ 3. Combining outputs after conditional branching with the SwitchNode
372
368
  4. Aggregating collections of data
373
369
 
374
370
  The merge operation is determined by the merge_type parameter, which supports
@@ -22,7 +22,7 @@ class MCPClient(Node):
22
22
  - Input parameters for resource requests and tool calls
23
23
 
24
24
  Downstream Consumers:
25
- - LLMAgent nodes that need context from MCP servers
25
+ - LLMAgentNode nodes that need context from MCP servers
26
26
  - Workflow nodes that orchestrate multi-step MCP interactions
27
27
  - Data processing nodes that consume MCP resources
28
28
 
@@ -53,38 +53,34 @@ class MCPClient(Node):
53
53
  - Logs connection events and errors for debugging
54
54
 
55
55
  Examples:
56
-
57
- Connect to an MCP server and list resources::
58
-
59
- client = MCPClient()
60
- result = client.run(
61
- server_config={
62
- "name": "filesystem-server",
63
- "command": "python",
64
- "args": ["-m", "mcp_filesystem"]
65
- },
66
- operation="list_resources"
67
- )
68
-
69
- Fetch a specific resource:
70
-
71
- resource = client.run(
72
- server_config=server_config,
73
- operation="read_resource",
74
- resource_uri="file:///path/to/document.txt"
75
- )
76
-
77
- Call a tool on the server:
78
-
79
- tool_result = client.run(
80
- server_config=server_config,
81
- operation="call_tool",
82
- tool_name="create_file",
83
- tool_arguments={
84
- "path": "/path/to/new_file.txt",
85
- "content": "Hello, World!"
86
- }
87
- )
56
+ >>> # Connect to an MCP server and list resources
57
+ >>> client = MCPClient()
58
+ >>> result = client.run(
59
+ ... server_config={
60
+ ... "name": "filesystem-server",
61
+ ... "command": "python",
62
+ ... "args": ["-m", "mcp_filesystem"]
63
+ ... },
64
+ ... operation="list_resources"
65
+ ... )
66
+
67
+ >>> # Fetch a specific resource
68
+ >>> resource = client.run(
69
+ ... server_config=server_config,
70
+ ... operation="read_resource",
71
+ ... resource_uri="file:///path/to/document.txt"
72
+ ... )
73
+
74
+ >>> # Call a tool on the server
75
+ >>> tool_result = client.run(
76
+ ... server_config=server_config,
77
+ ... operation="call_tool",
78
+ ... tool_name="create_file",
79
+ ... tool_arguments={
80
+ ... "path": "/path/to/new_file.txt",
81
+ ... "content": "Hello, World!"
82
+ ... }
83
+ ... )
88
84
  """
89
85
 
90
86
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -90,7 +90,7 @@ Context:
90
90
 
91
91
  Please provide a comprehensive answer based on the information provided above."""
92
92
 
93
- # Create messages list for LLMAgent
93
+ # Create messages list for LLMAgentNode
94
94
  messages = [{"role": "user", "content": prompt}]
95
95
 
96
96
  return {"formatted_prompt": prompt, "messages": messages, "context": context}
@@ -88,13 +88,12 @@ class MetricsCollector:
88
88
  metrics during node execution, with support for both process-level and
89
89
  system-level monitoring.
90
90
 
91
- Usage::
92
-
93
- collector = MetricsCollector()
94
- with collector.collect() as metrics:
95
- # Execute node code here
96
- pass
97
- performance_data = metrics.result()
91
+ Usage:
92
+ >>> collector = MetricsCollector()
93
+ >>> with collector.collect() as metrics:
94
+ ... # Execute node code here
95
+ ... pass
96
+ >>> performance_data = metrics.result()
98
97
  """
99
98
 
100
99
  def __init__(self, sampling_interval: float = 0.1):
kailash/utils/export.py CHANGED
@@ -88,8 +88,8 @@ class NodeMapper:
88
88
  resources=ResourceSpec(cpu="100m", memory="256Mi"),
89
89
  )
90
90
 
91
- self.mappings["CSVReader"] = ContainerMapping(
92
- python_node="CSVReader",
91
+ self.mappings["CSVReaderNode"] = ContainerMapping(
92
+ python_node="CSVReaderNode",
93
93
  container_image="kailash/csv-reader:latest",
94
94
  command=["python", "-m", "kailash.nodes.data.csv_reader"],
95
95
  resources=ResourceSpec(cpu="100m", memory="512Mi"),
@@ -352,7 +352,7 @@ See `workflows/example_workflow.py` for a basic workflow example.
352
352
  # Create example workflow
353
353
  workflow_content = '''"""Example workflow for data processing."""
354
354
  from kailash.workflow import Workflow
355
- from kailash.nodes.data import CSVReader, CSVWriter
355
+ from kailash.nodes.data import CSVReaderNode, CSVWriterNode
356
356
  from kailash.nodes.transform import Filter, Sort
357
357
  from kailash.nodes.logic import Aggregator
358
358
 
@@ -363,11 +363,11 @@ workflow = Workflow(
363
363
  )
364
364
 
365
365
  # Add nodes
366
- workflow.add_node("reader", CSVReader(), file_path="examples/examples/data/input.csv")
366
+ workflow.add_node("reader", CSVReaderNode(), file_path="examples/examples/data/input.csv")
367
367
  workflow.add_node("filter", Filter(), field="value", operator=">", value=100)
368
368
  workflow.add_node("sort", Sort(), field="value", reverse=True)
369
369
  workflow.add_node("aggregate", Aggregator(), group_by="category", operation="sum")
370
- workflow.add_node("writer", CSVWriter(), file_path="outputs/results.csv")
370
+ workflow.add_node("writer", CSVWriterNode(), file_path="outputs/results.csv")
371
371
 
372
372
  # Connect nodes
373
373
  workflow.connect("reader", "filter", {"data": "data"})
@@ -471,9 +471,9 @@ Thumbs.db
471
471
  # Add data processing workflow
472
472
  workflow_content = '''"""Data processing pipeline workflow."""
473
473
  from kailash.workflow import Workflow
474
- from kailash.nodes.data import CSVReader, JSONReader, JSONWriter
474
+ from kailash.nodes.data import CSVReaderNode, JSONReaderNode, JSONWriterNode
475
475
  from kailash.nodes.transform import Filter, Map, Sort
476
- from kailash.nodes.logic import Aggregator, Merge
476
+ from kailash.nodes.logic import Aggregator, MergeNode
477
477
 
478
478
  # Create workflow
479
479
  workflow = Workflow(
@@ -482,20 +482,20 @@ workflow = Workflow(
482
482
  )
483
483
 
484
484
  # Data ingestion
485
- workflow.add_node("csv_reader", CSVReader(), file_path="examples/examples/data/sales_data.csv")
486
- workflow.add_node("json_reader", JSONReader(), file_path="examples/examples/data/product_data.json")
485
+ workflow.add_node("csv_reader", CSVReaderNode(), file_path="examples/examples/data/sales_data.csv")
486
+ workflow.add_node("json_reader", JSONReaderNode(), file_path="examples/examples/data/product_data.json")
487
487
 
488
488
  # Transform data
489
489
  workflow.add_node("filter_sales", Filter(), field="amount", operator=">", value=1000)
490
490
  workflow.add_node("calculate_profit", Map(), field="amount", operation="multiply", value=0.2)
491
- workflow.add_node("merge_data", Merge(), merge_type="merge_dict", key="product_id")
491
+ workflow.add_node("merge_data", MergeNode(), merge_type="merge_dict", key="product_id")
492
492
 
493
493
  # Aggregate results
494
494
  workflow.add_node("group_by_category", Aggregator(), group_by="category", operation="sum")
495
495
  workflow.add_node("sort_results", Sort(), field="value", reverse=True)
496
496
 
497
497
  # Export results
498
- workflow.add_node("write_json", JSONWriter(), file_path="outputs/analysis_results.json")
498
+ workflow.add_node("write_json", JSONWriterNode(), file_path="outputs/analysis_results.json")
499
499
 
500
500
  # Connect pipeline
501
501
  workflow.connect("csv_reader", "filter_sales", {"data": "data"})
@@ -537,7 +537,7 @@ workflow.connect("sort_results", "write_json", {"sorted_data": "data"})
537
537
  # Add ML workflow
538
538
  workflow_content = '''"""Machine learning pipeline workflow."""
539
539
  from kailash.workflow import Workflow
540
- from kailash.nodes.data import CSVReader, JSONWriter
540
+ from kailash.nodes.data import CSVReaderNode, JSONWriterNode
541
541
  from kailash.nodes.transform import Filter, Map
542
542
  from kailash.nodes.ai import (
543
543
  TextClassifier,
@@ -553,7 +553,7 @@ workflow = Workflow(
553
553
  )
554
554
 
555
555
  # Data ingestion
556
- workflow.add_node("read_data", CSVReader(), file_path="examples/examples/data/text_data.csv")
556
+ workflow.add_node("read_data", CSVReaderNode(), file_path="examples/examples/data/text_data.csv")
557
557
 
558
558
  # Preprocessing
559
559
  workflow.add_node("extract_text", Map(), field="content")
@@ -567,10 +567,10 @@ workflow.add_node("extract_entities", NamedEntityRecognizer(),
567
567
  workflow.add_node("summarize", TextSummarizer(), max_length=100)
568
568
 
569
569
  # Combine results
570
- workflow.add_node("merge_results", Merge(), merge_type="merge_dict")
570
+ workflow.add_node("merge_results", MergeNode(), merge_type="merge_dict")
571
571
 
572
572
  # Export results
573
- workflow.add_node("save_results", JSONWriter(), file_path="outputs/ml_results.json")
573
+ workflow.add_node("save_results", JSONWriterNode(), file_path="outputs/ml_results.json")
574
574
 
575
575
  # Connect pipeline
576
576
  workflow.connect("read_data", "extract_text", {"data": "data"})
@@ -604,7 +604,7 @@ workflow.connect("merge_results", "save_results", {"merged_data": "data"})
604
604
  # Add API workflow
605
605
  workflow_content = '''"""API integration workflow."""
606
606
  from kailash.workflow import Workflow
607
- from kailash.nodes.data import JSONReader, JSONWriter
607
+ from kailash.nodes.data import JSONReaderNode, JSONWriterNode
608
608
  from kailash.nodes.transform import Map, Filter
609
609
  from kailash.nodes.logic import Conditional
610
610
  from kailash.nodes.ai import ChatAgent, FunctionCallingAgent
@@ -616,7 +616,7 @@ workflow = Workflow(
616
616
  )
617
617
 
618
618
  # Read configuration
619
- workflow.add_node("read_config", JSONReader(), file_path="examples/examples/data/api_config.json")
619
+ workflow.add_node("read_config", JSONReaderNode(), file_path="examples/examples/data/api_config.json")
620
620
 
621
621
  # Process with AI agent
622
622
  workflow.add_node("chat_agent", ChatAgent(),
@@ -644,7 +644,7 @@ workflow.add_node("process_success", Map(), operation="identity")
644
644
  workflow.add_node("handle_error", Map(), operation="identity")
645
645
 
646
646
  # Save results
647
- workflow.add_node("save_results", JSONWriter(), file_path="outputs/api_results.json")
647
+ workflow.add_node("save_results", JSONWriterNode(), file_path="outputs/api_results.json")
648
648
 
649
649
  # Connect workflow
650
650
  workflow.connect("read_config", "chat_agent", {"data": "messages"})