kailash 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/api/__init__.py +17 -0
- kailash/api/gateway.py +394 -0
- kailash/api/mcp_integration.py +478 -0
- kailash/api/workflow_api.py +399 -0
- kailash/nodes/ai/__init__.py +4 -4
- kailash/nodes/ai/agents.py +4 -4
- kailash/nodes/ai/ai_providers.py +18 -22
- kailash/nodes/ai/embedding_generator.py +34 -38
- kailash/nodes/ai/llm_agent.py +351 -356
- kailash/nodes/api/http.py +0 -4
- kailash/nodes/api/rest.py +1 -1
- kailash/nodes/base.py +60 -64
- kailash/nodes/code/python.py +61 -42
- kailash/nodes/data/__init__.py +10 -10
- kailash/nodes/data/readers.py +27 -29
- kailash/nodes/data/retrieval.py +1 -1
- kailash/nodes/data/sharepoint_graph.py +23 -25
- kailash/nodes/data/sql.py +27 -29
- kailash/nodes/data/vector_db.py +2 -2
- kailash/nodes/data/writers.py +41 -44
- kailash/nodes/logic/__init__.py +10 -3
- kailash/nodes/logic/async_operations.py +14 -14
- kailash/nodes/logic/operations.py +18 -22
- kailash/nodes/logic/workflow.py +439 -0
- kailash/nodes/mcp/client.py +29 -33
- kailash/nodes/mcp/resource.py +1 -1
- kailash/nodes/mcp/server.py +10 -4
- kailash/nodes/transform/formatters.py +1 -1
- kailash/nodes/transform/processors.py +5 -3
- kailash/runtime/docker.py +2 -0
- kailash/tracking/metrics_collector.py +6 -7
- kailash/tracking/models.py +0 -20
- kailash/tracking/storage/database.py +4 -4
- kailash/tracking/storage/filesystem.py +0 -1
- kailash/utils/export.py +2 -2
- kailash/utils/templates.py +16 -16
- kailash/visualization/performance.py +7 -7
- kailash/visualization/reports.py +1 -1
- kailash/workflow/graph.py +4 -4
- kailash/workflow/mock_registry.py +1 -1
- {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/METADATA +198 -27
- kailash-0.1.4.dist-info/RECORD +85 -0
- kailash-0.1.2.dist-info/RECORD +0 -80
- {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/WHEEL +0 -0
- {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/entry_points.txt +0 -0
- {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.1.2.dist-info → kailash-0.1.4.dist-info}/top_level.txt +0 -0
kailash/utils/export.py
CHANGED
@@ -88,8 +88,8 @@ class NodeMapper:
|
|
88
88
|
resources=ResourceSpec(cpu="100m", memory="256Mi"),
|
89
89
|
)
|
90
90
|
|
91
|
-
self.mappings["
|
92
|
-
python_node="
|
91
|
+
self.mappings["CSVReaderNode"] = ContainerMapping(
|
92
|
+
python_node="CSVReaderNode",
|
93
93
|
container_image="kailash/csv-reader:latest",
|
94
94
|
command=["python", "-m", "kailash.nodes.data.csv_reader"],
|
95
95
|
resources=ResourceSpec(cpu="100m", memory="512Mi"),
|
kailash/utils/templates.py
CHANGED
@@ -352,7 +352,7 @@ See `workflows/example_workflow.py` for a basic workflow example.
|
|
352
352
|
# Create example workflow
|
353
353
|
workflow_content = '''"""Example workflow for data processing."""
|
354
354
|
from kailash.workflow import Workflow
|
355
|
-
from kailash.nodes.data import
|
355
|
+
from kailash.nodes.data import CSVReaderNode, CSVWriterNode
|
356
356
|
from kailash.nodes.transform import Filter, Sort
|
357
357
|
from kailash.nodes.logic import Aggregator
|
358
358
|
|
@@ -363,11 +363,11 @@ workflow = Workflow(
|
|
363
363
|
)
|
364
364
|
|
365
365
|
# Add nodes
|
366
|
-
workflow.add_node("reader",
|
366
|
+
workflow.add_node("reader", CSVReaderNode(), file_path="examples/examples/data/input.csv")
|
367
367
|
workflow.add_node("filter", Filter(), field="value", operator=">", value=100)
|
368
368
|
workflow.add_node("sort", Sort(), field="value", reverse=True)
|
369
369
|
workflow.add_node("aggregate", Aggregator(), group_by="category", operation="sum")
|
370
|
-
workflow.add_node("writer",
|
370
|
+
workflow.add_node("writer", CSVWriterNode(), file_path="outputs/results.csv")
|
371
371
|
|
372
372
|
# Connect nodes
|
373
373
|
workflow.connect("reader", "filter", {"data": "data"})
|
@@ -471,9 +471,9 @@ Thumbs.db
|
|
471
471
|
# Add data processing workflow
|
472
472
|
workflow_content = '''"""Data processing pipeline workflow."""
|
473
473
|
from kailash.workflow import Workflow
|
474
|
-
from kailash.nodes.data import
|
474
|
+
from kailash.nodes.data import CSVReaderNode, JSONReaderNode, JSONWriterNode
|
475
475
|
from kailash.nodes.transform import Filter, Map, Sort
|
476
|
-
from kailash.nodes.logic import Aggregator,
|
476
|
+
from kailash.nodes.logic import Aggregator, MergeNode
|
477
477
|
|
478
478
|
# Create workflow
|
479
479
|
workflow = Workflow(
|
@@ -482,20 +482,20 @@ workflow = Workflow(
|
|
482
482
|
)
|
483
483
|
|
484
484
|
# Data ingestion
|
485
|
-
workflow.add_node("csv_reader",
|
486
|
-
workflow.add_node("json_reader",
|
485
|
+
workflow.add_node("csv_reader", CSVReaderNode(), file_path="examples/examples/data/sales_data.csv")
|
486
|
+
workflow.add_node("json_reader", JSONReaderNode(), file_path="examples/examples/data/product_data.json")
|
487
487
|
|
488
488
|
# Transform data
|
489
489
|
workflow.add_node("filter_sales", Filter(), field="amount", operator=">", value=1000)
|
490
490
|
workflow.add_node("calculate_profit", Map(), field="amount", operation="multiply", value=0.2)
|
491
|
-
workflow.add_node("merge_data",
|
491
|
+
workflow.add_node("merge_data", MergeNode(), merge_type="merge_dict", key="product_id")
|
492
492
|
|
493
493
|
# Aggregate results
|
494
494
|
workflow.add_node("group_by_category", Aggregator(), group_by="category", operation="sum")
|
495
495
|
workflow.add_node("sort_results", Sort(), field="value", reverse=True)
|
496
496
|
|
497
497
|
# Export results
|
498
|
-
workflow.add_node("write_json",
|
498
|
+
workflow.add_node("write_json", JSONWriterNode(), file_path="outputs/analysis_results.json")
|
499
499
|
|
500
500
|
# Connect pipeline
|
501
501
|
workflow.connect("csv_reader", "filter_sales", {"data": "data"})
|
@@ -537,7 +537,7 @@ workflow.connect("sort_results", "write_json", {"sorted_data": "data"})
|
|
537
537
|
# Add ML workflow
|
538
538
|
workflow_content = '''"""Machine learning pipeline workflow."""
|
539
539
|
from kailash.workflow import Workflow
|
540
|
-
from kailash.nodes.data import
|
540
|
+
from kailash.nodes.data import CSVReaderNode, JSONWriterNode
|
541
541
|
from kailash.nodes.transform import Filter, Map
|
542
542
|
from kailash.nodes.ai import (
|
543
543
|
TextClassifier,
|
@@ -553,7 +553,7 @@ workflow = Workflow(
|
|
553
553
|
)
|
554
554
|
|
555
555
|
# Data ingestion
|
556
|
-
workflow.add_node("read_data",
|
556
|
+
workflow.add_node("read_data", CSVReaderNode(), file_path="examples/examples/data/text_data.csv")
|
557
557
|
|
558
558
|
# Preprocessing
|
559
559
|
workflow.add_node("extract_text", Map(), field="content")
|
@@ -567,10 +567,10 @@ workflow.add_node("extract_entities", NamedEntityRecognizer(),
|
|
567
567
|
workflow.add_node("summarize", TextSummarizer(), max_length=100)
|
568
568
|
|
569
569
|
# Combine results
|
570
|
-
workflow.add_node("merge_results",
|
570
|
+
workflow.add_node("merge_results", MergeNode(), merge_type="merge_dict")
|
571
571
|
|
572
572
|
# Export results
|
573
|
-
workflow.add_node("save_results",
|
573
|
+
workflow.add_node("save_results", JSONWriterNode(), file_path="outputs/ml_results.json")
|
574
574
|
|
575
575
|
# Connect pipeline
|
576
576
|
workflow.connect("read_data", "extract_text", {"data": "data"})
|
@@ -604,7 +604,7 @@ workflow.connect("merge_results", "save_results", {"merged_data": "data"})
|
|
604
604
|
# Add API workflow
|
605
605
|
workflow_content = '''"""API integration workflow."""
|
606
606
|
from kailash.workflow import Workflow
|
607
|
-
from kailash.nodes.data import
|
607
|
+
from kailash.nodes.data import JSONReaderNode, JSONWriterNode
|
608
608
|
from kailash.nodes.transform import Map, Filter
|
609
609
|
from kailash.nodes.logic import Conditional
|
610
610
|
from kailash.nodes.ai import ChatAgent, FunctionCallingAgent
|
@@ -616,7 +616,7 @@ workflow = Workflow(
|
|
616
616
|
)
|
617
617
|
|
618
618
|
# Read configuration
|
619
|
-
workflow.add_node("read_config",
|
619
|
+
workflow.add_node("read_config", JSONReaderNode(), file_path="examples/examples/data/api_config.json")
|
620
620
|
|
621
621
|
# Process with AI agent
|
622
622
|
workflow.add_node("chat_agent", ChatAgent(),
|
@@ -644,7 +644,7 @@ workflow.add_node("process_success", Map(), operation="identity")
|
|
644
644
|
workflow.add_node("handle_error", Map(), operation="identity")
|
645
645
|
|
646
646
|
# Save results
|
647
|
-
workflow.add_node("save_results",
|
647
|
+
workflow.add_node("save_results", JSONWriterNode(), file_path="outputs/api_results.json")
|
648
648
|
|
649
649
|
# Connect workflow
|
650
650
|
workflow.connect("read_config", "chat_agent", {"data": "messages"})
|
@@ -136,7 +136,7 @@ class PerformanceVisualizer:
|
|
136
136
|
|
137
137
|
# Calculate timeline bounds
|
138
138
|
min_time = min(t.started_at for t in tasks_with_times)
|
139
|
-
|
139
|
+
max(t.ended_at for t in tasks_with_times)
|
140
140
|
|
141
141
|
# Create timeline bars
|
142
142
|
y_positions = []
|
@@ -266,8 +266,8 @@ class PerformanceVisualizer:
|
|
266
266
|
)
|
267
267
|
|
268
268
|
# Memory usage chart
|
269
|
-
|
270
|
-
|
269
|
+
ax2.bar(x, memory_usage, color="lightgreen", edgecolor="black")
|
270
|
+
ax2.bar(
|
271
271
|
x,
|
272
272
|
memory_delta,
|
273
273
|
bottom=memory_usage,
|
@@ -482,7 +482,7 @@ class PerformanceVisualizer:
|
|
482
482
|
width = 0.35
|
483
483
|
|
484
484
|
# I/O bytes chart
|
485
|
-
|
485
|
+
ax1.bar(
|
486
486
|
x - width / 2,
|
487
487
|
io_read_bytes,
|
488
488
|
width,
|
@@ -490,7 +490,7 @@ class PerformanceVisualizer:
|
|
490
490
|
color="lightblue",
|
491
491
|
edgecolor="black",
|
492
492
|
)
|
493
|
-
|
493
|
+
ax1.bar(
|
494
494
|
x + width / 2,
|
495
495
|
io_write_bytes,
|
496
496
|
width,
|
@@ -507,7 +507,7 @@ class PerformanceVisualizer:
|
|
507
507
|
ax1.grid(True, axis="y", alpha=0.3)
|
508
508
|
|
509
509
|
# I/O operations count chart
|
510
|
-
|
510
|
+
ax2.bar(
|
511
511
|
x - width / 2,
|
512
512
|
io_read_count,
|
513
513
|
width,
|
@@ -515,7 +515,7 @@ class PerformanceVisualizer:
|
|
515
515
|
color="lightblue",
|
516
516
|
edgecolor="black",
|
517
517
|
)
|
518
|
-
|
518
|
+
ax2.bar(
|
519
519
|
x + width / 2,
|
520
520
|
io_write_count,
|
521
521
|
width,
|
kailash/visualization/reports.py
CHANGED
@@ -639,7 +639,7 @@ class WorkflowPerformanceReporter:
|
|
639
639
|
|
640
640
|
summary = analysis["summary"]
|
641
641
|
bottlenecks = analysis["bottlenecks"]
|
642
|
-
|
642
|
+
analysis["resource_analysis"]
|
643
643
|
error_analysis = analysis["error_analysis"]
|
644
644
|
|
645
645
|
# Efficiency insights
|
kailash/workflow/graph.py
CHANGED
@@ -10,11 +10,11 @@ import networkx as nx
|
|
10
10
|
import yaml
|
11
11
|
from pydantic import BaseModel, Field, ValidationError
|
12
12
|
|
13
|
-
from kailash.nodes import Node
|
13
|
+
from kailash.nodes.base import Node
|
14
14
|
|
15
15
|
try:
|
16
16
|
# For normal runtime, use the actual registry
|
17
|
-
from kailash.nodes import NodeRegistry
|
17
|
+
from kailash.nodes.base import NodeRegistry
|
18
18
|
except ImportError:
|
19
19
|
# For tests, use the mock registry
|
20
20
|
from kailash.workflow.mock_registry import MockRegistry as NodeRegistry
|
@@ -815,8 +815,8 @@ class Workflow:
|
|
815
815
|
|
816
816
|
# Try to find another key with a BaseModel
|
817
817
|
for key, value in last_node_results.items():
|
818
|
-
if isinstance(value, BaseModel) and
|
819
|
-
state_model
|
818
|
+
if isinstance(value, BaseModel) and isinstance(
|
819
|
+
value, type(state_model)
|
820
820
|
):
|
821
821
|
return value, results
|
822
822
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: kailash
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.4
|
4
4
|
Summary: Python SDK for the Kailash container-node architecture
|
5
5
|
Home-page: https://github.com/integrum/kailash-python-sdk
|
6
6
|
Author: Integrum
|
@@ -41,7 +41,8 @@ Requires-Dist: autodoc>=0.5.0
|
|
41
41
|
Requires-Dist: myst-parser>=4.0.1
|
42
42
|
Requires-Dist: black>=25.1.0
|
43
43
|
Requires-Dist: psutil>=7.0.0
|
44
|
-
Requires-Dist: fastapi
|
44
|
+
Requires-Dist: fastapi>=0.115.12
|
45
|
+
Requires-Dist: uvicorn[standard]>=0.31.0
|
45
46
|
Requires-Dist: pytest-asyncio>=1.0.0
|
46
47
|
Requires-Dist: pre-commit>=4.2.0
|
47
48
|
Requires-Dist: twine>=6.1.0
|
@@ -65,7 +66,7 @@ Dynamic: requires-python
|
|
65
66
|
<a href="https://pepy.tech/project/kailash"><img src="https://static.pepy.tech/badge/kailash" alt="Downloads"></a>
|
66
67
|
<img src="https://img.shields.io/badge/license-MIT-green.svg" alt="MIT License">
|
67
68
|
<img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="Code style: black">
|
68
|
-
<img src="https://img.shields.io/badge/tests-
|
69
|
+
<img src="https://img.shields.io/badge/tests-753%20passing-brightgreen.svg" alt="Tests: 753 passing">
|
69
70
|
<img src="https://img.shields.io/badge/coverage-100%25-brightgreen.svg" alt="Coverage: 100%">
|
70
71
|
</p>
|
71
72
|
|
@@ -89,6 +90,8 @@ Dynamic: requires-python
|
|
89
90
|
- ⚡ **Fast Installation**: Uses `uv` for lightning-fast Python package management
|
90
91
|
- 🤖 **AI-Powered**: Complete LLM agents, embeddings, and hierarchical RAG architecture
|
91
92
|
- 🧠 **Retrieval-Augmented Generation**: Full RAG pipeline with intelligent document processing
|
93
|
+
- 🌐 **REST API Wrapper**: Expose any workflow as a production-ready API in 3 lines
|
94
|
+
- 🚪 **Multi-Workflow Gateway**: Manage multiple workflows through unified API with MCP integration
|
92
95
|
|
93
96
|
## 🎯 Who Is This For?
|
94
97
|
|
@@ -122,7 +125,7 @@ uv sync
|
|
122
125
|
|
123
126
|
```python
|
124
127
|
from kailash.workflow import Workflow
|
125
|
-
from kailash.nodes.data import
|
128
|
+
from kailash.nodes.data import CSVReaderNode
|
126
129
|
from kailash.nodes.code import PythonCodeNode
|
127
130
|
from kailash.runtime.local import LocalRuntime
|
128
131
|
import pandas as pd
|
@@ -131,7 +134,7 @@ import pandas as pd
|
|
131
134
|
workflow = Workflow("customer_analysis", name="customer_analysis")
|
132
135
|
|
133
136
|
# Add data reader
|
134
|
-
reader =
|
137
|
+
reader = CSVReaderNode(file_path="customers.csv")
|
135
138
|
workflow.add_node("read_customers", reader)
|
136
139
|
|
137
140
|
# Add custom processing using Python code
|
@@ -169,7 +172,7 @@ workflow.save("customer_analysis.yaml", format="yaml")
|
|
169
172
|
|
170
173
|
```python
|
171
174
|
from kailash.workflow import Workflow
|
172
|
-
from kailash.nodes.data import SharePointGraphReader,
|
175
|
+
from kailash.nodes.data import SharePointGraphReader, CSVWriterNode
|
173
176
|
import os
|
174
177
|
|
175
178
|
# Create workflow for SharePoint file processing
|
@@ -180,7 +183,7 @@ sharepoint = SharePointGraphReader()
|
|
180
183
|
workflow.add_node("read_sharepoint", sharepoint)
|
181
184
|
|
182
185
|
# Process downloaded files
|
183
|
-
csv_writer =
|
186
|
+
csv_writer = CSVWriterNode(file_path="sharepoint_output.csv")
|
184
187
|
workflow.add_node("save_locally", csv_writer)
|
185
188
|
|
186
189
|
# Connect nodes
|
@@ -208,8 +211,8 @@ results, run_id = runtime.execute(workflow, inputs=inputs)
|
|
208
211
|
|
209
212
|
```python
|
210
213
|
from kailash.workflow import Workflow
|
211
|
-
from kailash.nodes.ai.embedding_generator import
|
212
|
-
from kailash.nodes.ai.llm_agent import
|
214
|
+
from kailash.nodes.ai.embedding_generator import EmbeddingGeneratorNode
|
215
|
+
from kailash.nodes.ai.llm_agent import LLMAgentNode
|
213
216
|
from kailash.nodes.data.sources import DocumentSourceNode, QuerySourceNode
|
214
217
|
from kailash.nodes.data.retrieval import RelevanceScorerNode
|
215
218
|
from kailash.nodes.transform.chunkers import HierarchicalChunkerNode
|
@@ -230,17 +233,17 @@ chunk_text_extractor = ChunkTextExtractorNode()
|
|
230
233
|
query_text_wrapper = QueryTextWrapperNode()
|
231
234
|
|
232
235
|
# AI processing with Ollama
|
233
|
-
chunk_embedder =
|
236
|
+
chunk_embedder = EmbeddingGeneratorNode(
|
234
237
|
provider="ollama", model="nomic-embed-text", operation="embed_batch"
|
235
238
|
)
|
236
|
-
query_embedder =
|
239
|
+
query_embedder = EmbeddingGeneratorNode(
|
237
240
|
provider="ollama", model="nomic-embed-text", operation="embed_batch"
|
238
241
|
)
|
239
242
|
|
240
243
|
# Retrieval and response generation
|
241
244
|
relevance_scorer = RelevanceScorerNode()
|
242
245
|
context_formatter = ContextFormatterNode()
|
243
|
-
llm_agent =
|
246
|
+
llm_agent = LLMAgentNode(provider="ollama", model="llama3.2", temperature=0.7)
|
244
247
|
|
245
248
|
# Add all nodes to workflow
|
246
249
|
for name, node in {
|
@@ -273,6 +276,140 @@ results, run_id = runtime.execute(workflow)
|
|
273
276
|
print("RAG Response:", results["llm_agent"]["response"])
|
274
277
|
```
|
275
278
|
|
279
|
+
### Workflow API Wrapper - Expose Workflows as REST APIs
|
280
|
+
|
281
|
+
Transform any Kailash workflow into a production-ready REST API in just 3 lines of code:
|
282
|
+
|
283
|
+
```python
|
284
|
+
from kailash.api.workflow_api import WorkflowAPI
|
285
|
+
|
286
|
+
# Take any workflow and expose it as an API
|
287
|
+
api = WorkflowAPI(workflow)
|
288
|
+
api.run(port=8000) # That's it! Your workflow is now a REST API
|
289
|
+
```
|
290
|
+
|
291
|
+
#### Features
|
292
|
+
|
293
|
+
- **Automatic REST Endpoints**:
|
294
|
+
- `POST /execute` - Execute workflow with inputs
|
295
|
+
- `GET /workflow/info` - Get workflow metadata
|
296
|
+
- `GET /health` - Health check endpoint
|
297
|
+
- Automatic OpenAPI docs at `/docs`
|
298
|
+
|
299
|
+
- **Multiple Execution Modes**:
|
300
|
+
```python
|
301
|
+
# Synchronous execution (wait for results)
|
302
|
+
curl -X POST http://localhost:8000/execute \
|
303
|
+
-d '{"inputs": {...}, "mode": "sync"}'
|
304
|
+
|
305
|
+
# Asynchronous execution (get execution ID)
|
306
|
+
curl -X POST http://localhost:8000/execute \
|
307
|
+
-d '{"inputs": {...}, "mode": "async"}'
|
308
|
+
|
309
|
+
# Check async status
|
310
|
+
curl http://localhost:8000/status/{execution_id}
|
311
|
+
```
|
312
|
+
|
313
|
+
- **Specialized APIs** for specific domains:
|
314
|
+
```python
|
315
|
+
from kailash.api.workflow_api import create_workflow_api
|
316
|
+
|
317
|
+
# Create a RAG-specific API with custom endpoints
|
318
|
+
api = create_workflow_api(rag_workflow, api_type="rag")
|
319
|
+
# Adds /documents and /query endpoints
|
320
|
+
```
|
321
|
+
|
322
|
+
- **Production Ready**:
|
323
|
+
```python
|
324
|
+
# Development
|
325
|
+
api.run(reload=True, log_level="debug")
|
326
|
+
|
327
|
+
# Production with SSL
|
328
|
+
api.run(
|
329
|
+
host="0.0.0.0",
|
330
|
+
port=443,
|
331
|
+
ssl_keyfile="key.pem",
|
332
|
+
ssl_certfile="cert.pem",
|
333
|
+
workers=4
|
334
|
+
)
|
335
|
+
```
|
336
|
+
|
337
|
+
See the [API demo example](examples/integration_examples/integration_api_demo.py) for complete usage patterns.
|
338
|
+
|
339
|
+
### Multi-Workflow API Gateway - Manage Multiple Workflows
|
340
|
+
|
341
|
+
Run multiple workflows through a single unified API gateway with dynamic routing and MCP integration:
|
342
|
+
|
343
|
+
```python
|
344
|
+
from kailash.api.gateway import WorkflowAPIGateway
|
345
|
+
from kailash.api.mcp_integration import MCPIntegration
|
346
|
+
|
347
|
+
# Create gateway
|
348
|
+
gateway = WorkflowAPIGateway(
|
349
|
+
title="Enterprise Platform",
|
350
|
+
description="Unified API for all workflows"
|
351
|
+
)
|
352
|
+
|
353
|
+
# Register multiple workflows
|
354
|
+
gateway.register_workflow("sales", sales_workflow)
|
355
|
+
gateway.register_workflow("analytics", analytics_workflow)
|
356
|
+
gateway.register_workflow("reports", reporting_workflow)
|
357
|
+
|
358
|
+
# Add AI-powered tools via MCP
|
359
|
+
mcp = MCPIntegration("ai_tools")
|
360
|
+
mcp.add_tool("analyze", analyze_function)
|
361
|
+
mcp.add_tool("predict", predict_function)
|
362
|
+
gateway.register_mcp_server("ai", mcp)
|
363
|
+
|
364
|
+
# Run unified server
|
365
|
+
gateway.run(port=8000)
|
366
|
+
```
|
367
|
+
|
368
|
+
#### Gateway Features
|
369
|
+
|
370
|
+
- **Unified Access Point**: All workflows accessible through one server
|
371
|
+
- `/sales/execute` - Execute sales workflow
|
372
|
+
- `/analytics/execute` - Execute analytics workflow
|
373
|
+
- `/workflows` - List all available workflows
|
374
|
+
- `/health` - Check health of all services
|
375
|
+
|
376
|
+
- **MCP Integration**: AI-powered tools available to all workflows
|
377
|
+
```python
|
378
|
+
# Use MCP tools in workflows
|
379
|
+
from kailash.api.mcp_integration import MCPToolNode
|
380
|
+
|
381
|
+
tool_node = MCPToolNode(
|
382
|
+
mcp_server="ai_tools",
|
383
|
+
tool_name="analyze"
|
384
|
+
)
|
385
|
+
workflow.add_node("ai_analysis", tool_node)
|
386
|
+
```
|
387
|
+
|
388
|
+
- **Flexible Deployment Patterns**:
|
389
|
+
```python
|
390
|
+
# Pattern 1: Single Gateway (most cases)
|
391
|
+
gateway.register_workflow("workflow1", wf1)
|
392
|
+
gateway.register_workflow("workflow2", wf2)
|
393
|
+
|
394
|
+
# Pattern 2: Hybrid (heavy workflows separate)
|
395
|
+
gateway.register_workflow("light", light_wf)
|
396
|
+
gateway.proxy_workflow("heavy", "http://gpu-service:8080")
|
397
|
+
|
398
|
+
# Pattern 3: High Availability
|
399
|
+
# Run multiple gateway instances behind load balancer
|
400
|
+
|
401
|
+
# Pattern 4: Kubernetes
|
402
|
+
# Deploy with horizontal pod autoscaling
|
403
|
+
```
|
404
|
+
|
405
|
+
- **Production Features**:
|
406
|
+
- WebSocket support for real-time updates
|
407
|
+
- Health monitoring across all workflows
|
408
|
+
- Dynamic workflow registration/unregistration
|
409
|
+
- Built-in CORS and authentication support
|
410
|
+
|
411
|
+
See the [Gateway examples](examples/integration_examples/gateway_comprehensive_demo.py) for complete implementation patterns.
|
412
|
+
|
276
413
|
## 📚 Documentation
|
277
414
|
|
278
415
|
| Resource | Description |
|
@@ -294,14 +431,14 @@ The SDK includes a rich set of pre-built nodes for common operations:
|
|
294
431
|
<td width="50%">
|
295
432
|
|
296
433
|
**Data Operations**
|
297
|
-
- `
|
298
|
-
- `
|
434
|
+
- `CSVReaderNode` - Read CSV files
|
435
|
+
- `JSONReaderNode` - Read JSON files
|
299
436
|
- `DocumentSourceNode` - Sample document provider
|
300
437
|
- `QuerySourceNode` - Sample query provider
|
301
438
|
- `RelevanceScorerNode` - Multi-method similarity
|
302
439
|
- `SQLDatabaseNode` - Query databases
|
303
|
-
- `
|
304
|
-
- `
|
440
|
+
- `CSVWriterNode` - Write CSV files
|
441
|
+
- `JSONWriterNode` - Write JSON files
|
305
442
|
|
306
443
|
</td>
|
307
444
|
<td width="50%">
|
@@ -316,14 +453,19 @@ The SDK includes a rich set of pre-built nodes for common operations:
|
|
316
453
|
- `Filter` - Filter records
|
317
454
|
- `Aggregator` - Aggregate data
|
318
455
|
|
456
|
+
**Logic Nodes**
|
457
|
+
- `SwitchNode` - Conditional routing
|
458
|
+
- `MergeNode` - Combine multiple inputs
|
459
|
+
- `WorkflowNode` - Wrap workflows as reusable nodes
|
460
|
+
|
319
461
|
</td>
|
320
462
|
</tr>
|
321
463
|
<tr>
|
322
464
|
<td width="50%">
|
323
465
|
|
324
466
|
**AI/ML Nodes**
|
325
|
-
- `
|
326
|
-
- `
|
467
|
+
- `LLMAgentNode` - Multi-provider LLM with memory & tools
|
468
|
+
- `EmbeddingGeneratorNode` - Vector embeddings with caching
|
327
469
|
- `MCPClient/MCPServer` - Model Context Protocol
|
328
470
|
- `TextClassifier` - Text classification
|
329
471
|
- `SentimentAnalyzer` - Sentiment analysis
|
@@ -363,14 +505,14 @@ The SDK includes a rich set of pre-built nodes for common operations:
|
|
363
505
|
#### Workflow Management
|
364
506
|
```python
|
365
507
|
from kailash.workflow import Workflow
|
366
|
-
from kailash.nodes.logic import
|
508
|
+
from kailash.nodes.logic import SwitchNode
|
367
509
|
from kailash.nodes.transform import DataTransformer
|
368
510
|
|
369
511
|
# Create complex workflows with branching logic
|
370
512
|
workflow = Workflow("data_pipeline", name="data_pipeline")
|
371
513
|
|
372
|
-
# Add conditional branching with
|
373
|
-
switch =
|
514
|
+
# Add conditional branching with SwitchNode
|
515
|
+
switch = SwitchNode()
|
374
516
|
workflow.add_node("route", switch)
|
375
517
|
|
376
518
|
# Different paths based on validation
|
@@ -384,6 +526,35 @@ workflow.connect("route", "process_valid")
|
|
384
526
|
workflow.connect("route", "handle_errors")
|
385
527
|
```
|
386
528
|
|
529
|
+
#### Hierarchical Workflow Composition
|
530
|
+
```python
|
531
|
+
from kailash.workflow import Workflow
|
532
|
+
from kailash.nodes.logic import WorkflowNode
|
533
|
+
from kailash.runtime.local import LocalRuntime
|
534
|
+
|
535
|
+
# Create a reusable data processing workflow
|
536
|
+
inner_workflow = Workflow("data_processor", name="Data Processor")
|
537
|
+
# ... add nodes to inner workflow ...
|
538
|
+
|
539
|
+
# Wrap the workflow as a node
|
540
|
+
processor_node = WorkflowNode(
|
541
|
+
workflow=inner_workflow,
|
542
|
+
name="data_processor"
|
543
|
+
)
|
544
|
+
|
545
|
+
# Use in a larger workflow
|
546
|
+
main_workflow = Workflow("main", name="Main Pipeline")
|
547
|
+
main_workflow.add_node("process", processor_node)
|
548
|
+
main_workflow.add_node("analyze", analyzer_node)
|
549
|
+
|
550
|
+
# Connect workflows
|
551
|
+
main_workflow.connect("process", "analyze")
|
552
|
+
|
553
|
+
# Execute - parameters automatically mapped to inner workflow
|
554
|
+
runtime = LocalRuntime()
|
555
|
+
results, _ = runtime.execute(main_workflow)
|
556
|
+
```
|
557
|
+
|
387
558
|
#### Immutable State Management
|
388
559
|
```python
|
389
560
|
from kailash.workflow import Workflow
|
@@ -667,13 +838,13 @@ chunk_text_extractor = ChunkTextExtractorNode()
|
|
667
838
|
query_text_wrapper = QueryTextWrapperNode()
|
668
839
|
|
669
840
|
# Create embedding generators
|
670
|
-
chunk_embedder =
|
841
|
+
chunk_embedder = EmbeddingGeneratorNode(
|
671
842
|
provider="ollama",
|
672
843
|
model="nomic-embed-text",
|
673
844
|
operation="embed_batch"
|
674
845
|
)
|
675
846
|
|
676
|
-
query_embedder =
|
847
|
+
query_embedder = EmbeddingGeneratorNode(
|
677
848
|
provider="ollama",
|
678
849
|
model="nomic-embed-text",
|
679
850
|
operation="embed_batch"
|
@@ -684,7 +855,7 @@ relevance_scorer = RelevanceScorerNode(similarity_method="cosine")
|
|
684
855
|
context_formatter = ContextFormatterNode()
|
685
856
|
|
686
857
|
# Create LLM agent for final answer generation
|
687
|
-
llm_agent =
|
858
|
+
llm_agent = LLMAgentNode(
|
688
859
|
provider="ollama",
|
689
860
|
model="llama3.2",
|
690
861
|
temperature=0.7,
|
@@ -803,10 +974,10 @@ kailash/
|
|
803
974
|
The SDK features a unified provider architecture for AI capabilities:
|
804
975
|
|
805
976
|
```python
|
806
|
-
from kailash.nodes.ai import
|
977
|
+
from kailash.nodes.ai import LLMAgentNode, EmbeddingGeneratorNode
|
807
978
|
|
808
979
|
# Multi-provider LLM support
|
809
|
-
agent =
|
980
|
+
agent = LLMAgentNode()
|
810
981
|
result = agent.run(
|
811
982
|
provider="ollama", # or "openai", "anthropic", "mock"
|
812
983
|
model="llama3.1:8b-instruct-q8_0",
|
@@ -815,7 +986,7 @@ result = agent.run(
|
|
815
986
|
)
|
816
987
|
|
817
988
|
# Vector embeddings with the same providers
|
818
|
-
embedder =
|
989
|
+
embedder = EmbeddingGeneratorNode()
|
819
990
|
embedding = embedder.run(
|
820
991
|
provider="ollama", # Same providers support embeddings
|
821
992
|
model="snowflake-arctic-embed2",
|