kailash 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. {kailash-0.1.3/src/kailash.egg-info → kailash-0.1.4}/PKG-INFO +103 -28
  2. {kailash-0.1.3 → kailash-0.1.4}/README.md +102 -27
  3. {kailash-0.1.3 → kailash-0.1.4}/pyproject.toml +1 -1
  4. {kailash-0.1.3 → kailash-0.1.4}/setup.py +1 -1
  5. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/__init__.py +1 -1
  6. kailash-0.1.4/src/kailash/api/__init__.py +17 -0
  7. kailash-0.1.4/src/kailash/api/gateway.py +394 -0
  8. kailash-0.1.4/src/kailash/api/mcp_integration.py +478 -0
  9. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/api/workflow_api.py +29 -13
  10. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/ai/__init__.py +4 -4
  11. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/ai/agents.py +4 -4
  12. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/ai/ai_providers.py +18 -22
  13. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/ai/embedding_generator.py +34 -38
  14. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/ai/llm_agent.py +351 -356
  15. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/base.py +60 -64
  16. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/code/python.py +61 -42
  17. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/__init__.py +10 -10
  18. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/readers.py +27 -29
  19. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/retrieval.py +1 -1
  20. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/sharepoint_graph.py +23 -25
  21. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/sql.py +24 -26
  22. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/writers.py +41 -44
  23. kailash-0.1.4/src/kailash/nodes/logic/__init__.py +13 -0
  24. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/logic/async_operations.py +14 -14
  25. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/logic/operations.py +18 -22
  26. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/mcp/client.py +29 -33
  27. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/transform/formatters.py +1 -1
  28. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/metrics_collector.py +6 -7
  29. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/utils/export.py +2 -2
  30. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/utils/templates.py +16 -16
  31. {kailash-0.1.3 → kailash-0.1.4/src/kailash.egg-info}/PKG-INFO +103 -28
  32. {kailash-0.1.3 → kailash-0.1.4}/src/kailash.egg-info/SOURCES.txt +2 -0
  33. kailash-0.1.3/src/kailash/api/__init__.py +0 -7
  34. kailash-0.1.3/src/kailash/nodes/logic/__init__.py +0 -7
  35. {kailash-0.1.3 → kailash-0.1.4}/LICENSE +0 -0
  36. {kailash-0.1.3 → kailash-0.1.4}/MANIFEST.in +0 -0
  37. {kailash-0.1.3 → kailash-0.1.4}/setup.cfg +0 -0
  38. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/__main__.py +0 -0
  39. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/cli/__init__.py +0 -0
  40. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/cli/commands.py +0 -0
  41. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/manifest.py +0 -0
  42. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/__init__.py +0 -0
  43. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/ai/models.py +0 -0
  44. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/api/__init__.py +0 -0
  45. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/api/auth.py +0 -0
  46. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/api/graphql.py +0 -0
  47. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/api/http.py +0 -0
  48. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/api/rate_limiting.py +0 -0
  49. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/api/rest.py +0 -0
  50. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/base_async.py +0 -0
  51. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/code/__init__.py +0 -0
  52. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/sources.py +0 -0
  53. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/streaming.py +0 -0
  54. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/data/vector_db.py +0 -0
  55. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/logic/workflow.py +0 -0
  56. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/mcp/__init__.py +0 -0
  57. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/mcp/resource.py +0 -0
  58. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/mcp/server.py +0 -0
  59. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/transform/__init__.py +0 -0
  60. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/transform/chunkers.py +0 -0
  61. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/nodes/transform/processors.py +0 -0
  62. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/__init__.py +0 -0
  63. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/async_local.py +0 -0
  64. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/docker.py +0 -0
  65. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/local.py +0 -0
  66. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/parallel.py +0 -0
  67. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/runner.py +0 -0
  68. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/runtime/testing.py +0 -0
  69. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/sdk_exceptions.py +0 -0
  70. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/__init__.py +0 -0
  71. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/manager.py +0 -0
  72. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/models.py +0 -0
  73. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/storage/__init__.py +0 -0
  74. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/storage/base.py +0 -0
  75. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/storage/database.py +0 -0
  76. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/tracking/storage/filesystem.py +0 -0
  77. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/utils/__init__.py +0 -0
  78. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/visualization/__init__.py +0 -0
  79. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/visualization/api.py +0 -0
  80. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/visualization/dashboard.py +0 -0
  81. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/visualization/performance.py +0 -0
  82. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/visualization/reports.py +0 -0
  83. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/__init__.py +0 -0
  84. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/builder.py +0 -0
  85. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/graph.py +0 -0
  86. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/mermaid_visualizer.py +0 -0
  87. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/mock_registry.py +0 -0
  88. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/runner.py +0 -0
  89. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/state.py +0 -0
  90. {kailash-0.1.3 → kailash-0.1.4}/src/kailash/workflow/visualization.py +0 -0
  91. {kailash-0.1.3 → kailash-0.1.4}/src/kailash.egg-info/dependency_links.txt +0 -0
  92. {kailash-0.1.3 → kailash-0.1.4}/src/kailash.egg-info/entry_points.txt +0 -0
  93. {kailash-0.1.3 → kailash-0.1.4}/src/kailash.egg-info/not-zip-safe +0 -0
  94. {kailash-0.1.3 → kailash-0.1.4}/src/kailash.egg-info/requires.txt +0 -0
  95. {kailash-0.1.3 → kailash-0.1.4}/src/kailash.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kailash
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: Python SDK for the Kailash container-node architecture
5
5
  Home-page: https://github.com/integrum/kailash-python-sdk
6
6
  Author: Integrum
@@ -66,7 +66,7 @@ Dynamic: requires-python
66
66
  <a href="https://pepy.tech/project/kailash"><img src="https://static.pepy.tech/badge/kailash" alt="Downloads"></a>
67
67
  <img src="https://img.shields.io/badge/license-MIT-green.svg" alt="MIT License">
68
68
  <img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="Code style: black">
69
- <img src="https://img.shields.io/badge/tests-746%20passing-brightgreen.svg" alt="Tests: 746 passing">
69
+ <img src="https://img.shields.io/badge/tests-753%20passing-brightgreen.svg" alt="Tests: 753 passing">
70
70
  <img src="https://img.shields.io/badge/coverage-100%25-brightgreen.svg" alt="Coverage: 100%">
71
71
  </p>
72
72
 
@@ -91,6 +91,7 @@ Dynamic: requires-python
91
91
  - 🤖 **AI-Powered**: Complete LLM agents, embeddings, and hierarchical RAG architecture
92
92
  - 🧠 **Retrieval-Augmented Generation**: Full RAG pipeline with intelligent document processing
93
93
  - 🌐 **REST API Wrapper**: Expose any workflow as a production-ready API in 3 lines
94
+ - 🚪 **Multi-Workflow Gateway**: Manage multiple workflows through unified API with MCP integration
94
95
 
95
96
  ## 🎯 Who Is This For?
96
97
 
@@ -124,7 +125,7 @@ uv sync
124
125
 
125
126
  ```python
126
127
  from kailash.workflow import Workflow
127
- from kailash.nodes.data import CSVReader
128
+ from kailash.nodes.data import CSVReaderNode
128
129
  from kailash.nodes.code import PythonCodeNode
129
130
  from kailash.runtime.local import LocalRuntime
130
131
  import pandas as pd
@@ -133,7 +134,7 @@ import pandas as pd
133
134
  workflow = Workflow("customer_analysis", name="customer_analysis")
134
135
 
135
136
  # Add data reader
136
- reader = CSVReader(file_path="customers.csv")
137
+ reader = CSVReaderNode(file_path="customers.csv")
137
138
  workflow.add_node("read_customers", reader)
138
139
 
139
140
  # Add custom processing using Python code
@@ -171,7 +172,7 @@ workflow.save("customer_analysis.yaml", format="yaml")
171
172
 
172
173
  ```python
173
174
  from kailash.workflow import Workflow
174
- from kailash.nodes.data import SharePointGraphReader, CSVWriter
175
+ from kailash.nodes.data import SharePointGraphReader, CSVWriterNode
175
176
  import os
176
177
 
177
178
  # Create workflow for SharePoint file processing
@@ -182,7 +183,7 @@ sharepoint = SharePointGraphReader()
182
183
  workflow.add_node("read_sharepoint", sharepoint)
183
184
 
184
185
  # Process downloaded files
185
- csv_writer = CSVWriter(file_path="sharepoint_output.csv")
186
+ csv_writer = CSVWriterNode(file_path="sharepoint_output.csv")
186
187
  workflow.add_node("save_locally", csv_writer)
187
188
 
188
189
  # Connect nodes
@@ -210,8 +211,8 @@ results, run_id = runtime.execute(workflow, inputs=inputs)
210
211
 
211
212
  ```python
212
213
  from kailash.workflow import Workflow
213
- from kailash.nodes.ai.embedding_generator import EmbeddingGenerator
214
- from kailash.nodes.ai.llm_agent import LLMAgent
214
+ from kailash.nodes.ai.embedding_generator import EmbeddingGeneratorNode
215
+ from kailash.nodes.ai.llm_agent import LLMAgentNode
215
216
  from kailash.nodes.data.sources import DocumentSourceNode, QuerySourceNode
216
217
  from kailash.nodes.data.retrieval import RelevanceScorerNode
217
218
  from kailash.nodes.transform.chunkers import HierarchicalChunkerNode
@@ -232,17 +233,17 @@ chunk_text_extractor = ChunkTextExtractorNode()
232
233
  query_text_wrapper = QueryTextWrapperNode()
233
234
 
234
235
  # AI processing with Ollama
235
- chunk_embedder = EmbeddingGenerator(
236
+ chunk_embedder = EmbeddingGeneratorNode(
236
237
  provider="ollama", model="nomic-embed-text", operation="embed_batch"
237
238
  )
238
- query_embedder = EmbeddingGenerator(
239
+ query_embedder = EmbeddingGeneratorNode(
239
240
  provider="ollama", model="nomic-embed-text", operation="embed_batch"
240
241
  )
241
242
 
242
243
  # Retrieval and response generation
243
244
  relevance_scorer = RelevanceScorerNode()
244
245
  context_formatter = ContextFormatterNode()
245
- llm_agent = LLMAgent(provider="ollama", model="llama3.2", temperature=0.7)
246
+ llm_agent = LLMAgentNode(provider="ollama", model="llama3.2", temperature=0.7)
246
247
 
247
248
  # Add all nodes to workflow
248
249
  for name, node in {
@@ -335,6 +336,80 @@ api.run(port=8000) # That's it! Your workflow is now a REST API
335
336
 
336
337
  See the [API demo example](examples/integration_examples/integration_api_demo.py) for complete usage patterns.
337
338
 
339
+ ### Multi-Workflow API Gateway - Manage Multiple Workflows
340
+
341
+ Run multiple workflows through a single unified API gateway with dynamic routing and MCP integration:
342
+
343
+ ```python
344
+ from kailash.api.gateway import WorkflowAPIGateway
345
+ from kailash.api.mcp_integration import MCPIntegration
346
+
347
+ # Create gateway
348
+ gateway = WorkflowAPIGateway(
349
+ title="Enterprise Platform",
350
+ description="Unified API for all workflows"
351
+ )
352
+
353
+ # Register multiple workflows
354
+ gateway.register_workflow("sales", sales_workflow)
355
+ gateway.register_workflow("analytics", analytics_workflow)
356
+ gateway.register_workflow("reports", reporting_workflow)
357
+
358
+ # Add AI-powered tools via MCP
359
+ mcp = MCPIntegration("ai_tools")
360
+ mcp.add_tool("analyze", analyze_function)
361
+ mcp.add_tool("predict", predict_function)
362
+ gateway.register_mcp_server("ai", mcp)
363
+
364
+ # Run unified server
365
+ gateway.run(port=8000)
366
+ ```
367
+
368
+ #### Gateway Features
369
+
370
+ - **Unified Access Point**: All workflows accessible through one server
371
+ - `/sales/execute` - Execute sales workflow
372
+ - `/analytics/execute` - Execute analytics workflow
373
+ - `/workflows` - List all available workflows
374
+ - `/health` - Check health of all services
375
+
376
+ - **MCP Integration**: AI-powered tools available to all workflows
377
+ ```python
378
+ # Use MCP tools in workflows
379
+ from kailash.api.mcp_integration import MCPToolNode
380
+
381
+ tool_node = MCPToolNode(
382
+ mcp_server="ai_tools",
383
+ tool_name="analyze"
384
+ )
385
+ workflow.add_node("ai_analysis", tool_node)
386
+ ```
387
+
388
+ - **Flexible Deployment Patterns**:
389
+ ```python
390
+ # Pattern 1: Single Gateway (most cases)
391
+ gateway.register_workflow("workflow1", wf1)
392
+ gateway.register_workflow("workflow2", wf2)
393
+
394
+ # Pattern 2: Hybrid (heavy workflows separate)
395
+ gateway.register_workflow("light", light_wf)
396
+ gateway.proxy_workflow("heavy", "http://gpu-service:8080")
397
+
398
+ # Pattern 3: High Availability
399
+ # Run multiple gateway instances behind load balancer
400
+
401
+ # Pattern 4: Kubernetes
402
+ # Deploy with horizontal pod autoscaling
403
+ ```
404
+
405
+ - **Production Features**:
406
+ - WebSocket support for real-time updates
407
+ - Health monitoring across all workflows
408
+ - Dynamic workflow registration/unregistration
409
+ - Built-in CORS and authentication support
410
+
411
+ See the [Gateway examples](examples/integration_examples/gateway_comprehensive_demo.py) for complete implementation patterns.
412
+
338
413
  ## 📚 Documentation
339
414
 
340
415
  | Resource | Description |
@@ -356,14 +431,14 @@ The SDK includes a rich set of pre-built nodes for common operations:
356
431
  <td width="50%">
357
432
 
358
433
  **Data Operations**
359
- - `CSVReader` - Read CSV files
360
- - `JSONReader` - Read JSON files
434
+ - `CSVReaderNode` - Read CSV files
435
+ - `JSONReaderNode` - Read JSON files
361
436
  - `DocumentSourceNode` - Sample document provider
362
437
  - `QuerySourceNode` - Sample query provider
363
438
  - `RelevanceScorerNode` - Multi-method similarity
364
439
  - `SQLDatabaseNode` - Query databases
365
- - `CSVWriter` - Write CSV files
366
- - `JSONWriter` - Write JSON files
440
+ - `CSVWriterNode` - Write CSV files
441
+ - `JSONWriterNode` - Write JSON files
367
442
 
368
443
  </td>
369
444
  <td width="50%">
@@ -379,8 +454,8 @@ The SDK includes a rich set of pre-built nodes for common operations:
379
454
  - `Aggregator` - Aggregate data
380
455
 
381
456
  **Logic Nodes**
382
- - `Switch` - Conditional routing
383
- - `Merge` - Combine multiple inputs
457
+ - `SwitchNode` - Conditional routing
458
+ - `MergeNode` - Combine multiple inputs
384
459
  - `WorkflowNode` - Wrap workflows as reusable nodes
385
460
 
386
461
  </td>
@@ -389,8 +464,8 @@ The SDK includes a rich set of pre-built nodes for common operations:
389
464
  <td width="50%">
390
465
 
391
466
  **AI/ML Nodes**
392
- - `LLMAgent` - Multi-provider LLM with memory & tools
393
- - `EmbeddingGenerator` - Vector embeddings with caching
467
+ - `LLMAgentNode` - Multi-provider LLM with memory & tools
468
+ - `EmbeddingGeneratorNode` - Vector embeddings with caching
394
469
  - `MCPClient/MCPServer` - Model Context Protocol
395
470
  - `TextClassifier` - Text classification
396
471
  - `SentimentAnalyzer` - Sentiment analysis
@@ -430,14 +505,14 @@ The SDK includes a rich set of pre-built nodes for common operations:
430
505
  #### Workflow Management
431
506
  ```python
432
507
  from kailash.workflow import Workflow
433
- from kailash.nodes.logic import Switch
508
+ from kailash.nodes.logic import SwitchNode
434
509
  from kailash.nodes.transform import DataTransformer
435
510
 
436
511
  # Create complex workflows with branching logic
437
512
  workflow = Workflow("data_pipeline", name="data_pipeline")
438
513
 
439
- # Add conditional branching with Switch node
440
- switch = Switch()
514
+ # Add conditional branching with SwitchNode
515
+ switch = SwitchNode()
441
516
  workflow.add_node("route", switch)
442
517
 
443
518
  # Different paths based on validation
@@ -763,13 +838,13 @@ chunk_text_extractor = ChunkTextExtractorNode()
763
838
  query_text_wrapper = QueryTextWrapperNode()
764
839
 
765
840
  # Create embedding generators
766
- chunk_embedder = EmbeddingGenerator(
841
+ chunk_embedder = EmbeddingGeneratorNode(
767
842
  provider="ollama",
768
843
  model="nomic-embed-text",
769
844
  operation="embed_batch"
770
845
  )
771
846
 
772
- query_embedder = EmbeddingGenerator(
847
+ query_embedder = EmbeddingGeneratorNode(
773
848
  provider="ollama",
774
849
  model="nomic-embed-text",
775
850
  operation="embed_batch"
@@ -780,7 +855,7 @@ relevance_scorer = RelevanceScorerNode(similarity_method="cosine")
780
855
  context_formatter = ContextFormatterNode()
781
856
 
782
857
  # Create LLM agent for final answer generation
783
- llm_agent = LLMAgent(
858
+ llm_agent = LLMAgentNode(
784
859
  provider="ollama",
785
860
  model="llama3.2",
786
861
  temperature=0.7,
@@ -899,10 +974,10 @@ kailash/
899
974
  The SDK features a unified provider architecture for AI capabilities:
900
975
 
901
976
  ```python
902
- from kailash.nodes.ai import LLMAgent, EmbeddingGenerator
977
+ from kailash.nodes.ai import LLMAgentNode, EmbeddingGeneratorNode
903
978
 
904
979
  # Multi-provider LLM support
905
- agent = LLMAgent()
980
+ agent = LLMAgentNode()
906
981
  result = agent.run(
907
982
  provider="ollama", # or "openai", "anthropic", "mock"
908
983
  model="llama3.1:8b-instruct-q8_0",
@@ -911,7 +986,7 @@ result = agent.run(
911
986
  )
912
987
 
913
988
  # Vector embeddings with the same providers
914
- embedder = EmbeddingGenerator()
989
+ embedder = EmbeddingGeneratorNode()
915
990
  embedding = embedder.run(
916
991
  provider="ollama", # Same providers support embeddings
917
992
  model="snowflake-arctic-embed2",
@@ -6,7 +6,7 @@
6
6
  <a href="https://pepy.tech/project/kailash"><img src="https://static.pepy.tech/badge/kailash" alt="Downloads"></a>
7
7
  <img src="https://img.shields.io/badge/license-MIT-green.svg" alt="MIT License">
8
8
  <img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="Code style: black">
9
- <img src="https://img.shields.io/badge/tests-746%20passing-brightgreen.svg" alt="Tests: 746 passing">
9
+ <img src="https://img.shields.io/badge/tests-753%20passing-brightgreen.svg" alt="Tests: 753 passing">
10
10
  <img src="https://img.shields.io/badge/coverage-100%25-brightgreen.svg" alt="Coverage: 100%">
11
11
  </p>
12
12
 
@@ -31,6 +31,7 @@
31
31
  - 🤖 **AI-Powered**: Complete LLM agents, embeddings, and hierarchical RAG architecture
32
32
  - 🧠 **Retrieval-Augmented Generation**: Full RAG pipeline with intelligent document processing
33
33
  - 🌐 **REST API Wrapper**: Expose any workflow as a production-ready API in 3 lines
34
+ - 🚪 **Multi-Workflow Gateway**: Manage multiple workflows through unified API with MCP integration
34
35
 
35
36
  ## 🎯 Who Is This For?
36
37
 
@@ -64,7 +65,7 @@ uv sync
64
65
 
65
66
  ```python
66
67
  from kailash.workflow import Workflow
67
- from kailash.nodes.data import CSVReader
68
+ from kailash.nodes.data import CSVReaderNode
68
69
  from kailash.nodes.code import PythonCodeNode
69
70
  from kailash.runtime.local import LocalRuntime
70
71
  import pandas as pd
@@ -73,7 +74,7 @@ import pandas as pd
73
74
  workflow = Workflow("customer_analysis", name="customer_analysis")
74
75
 
75
76
  # Add data reader
76
- reader = CSVReader(file_path="customers.csv")
77
+ reader = CSVReaderNode(file_path="customers.csv")
77
78
  workflow.add_node("read_customers", reader)
78
79
 
79
80
  # Add custom processing using Python code
@@ -111,7 +112,7 @@ workflow.save("customer_analysis.yaml", format="yaml")
111
112
 
112
113
  ```python
113
114
  from kailash.workflow import Workflow
114
- from kailash.nodes.data import SharePointGraphReader, CSVWriter
115
+ from kailash.nodes.data import SharePointGraphReader, CSVWriterNode
115
116
  import os
116
117
 
117
118
  # Create workflow for SharePoint file processing
@@ -122,7 +123,7 @@ sharepoint = SharePointGraphReader()
122
123
  workflow.add_node("read_sharepoint", sharepoint)
123
124
 
124
125
  # Process downloaded files
125
- csv_writer = CSVWriter(file_path="sharepoint_output.csv")
126
+ csv_writer = CSVWriterNode(file_path="sharepoint_output.csv")
126
127
  workflow.add_node("save_locally", csv_writer)
127
128
 
128
129
  # Connect nodes
@@ -150,8 +151,8 @@ results, run_id = runtime.execute(workflow, inputs=inputs)
150
151
 
151
152
  ```python
152
153
  from kailash.workflow import Workflow
153
- from kailash.nodes.ai.embedding_generator import EmbeddingGenerator
154
- from kailash.nodes.ai.llm_agent import LLMAgent
154
+ from kailash.nodes.ai.embedding_generator import EmbeddingGeneratorNode
155
+ from kailash.nodes.ai.llm_agent import LLMAgentNode
155
156
  from kailash.nodes.data.sources import DocumentSourceNode, QuerySourceNode
156
157
  from kailash.nodes.data.retrieval import RelevanceScorerNode
157
158
  from kailash.nodes.transform.chunkers import HierarchicalChunkerNode
@@ -172,17 +173,17 @@ chunk_text_extractor = ChunkTextExtractorNode()
172
173
  query_text_wrapper = QueryTextWrapperNode()
173
174
 
174
175
  # AI processing with Ollama
175
- chunk_embedder = EmbeddingGenerator(
176
+ chunk_embedder = EmbeddingGeneratorNode(
176
177
  provider="ollama", model="nomic-embed-text", operation="embed_batch"
177
178
  )
178
- query_embedder = EmbeddingGenerator(
179
+ query_embedder = EmbeddingGeneratorNode(
179
180
  provider="ollama", model="nomic-embed-text", operation="embed_batch"
180
181
  )
181
182
 
182
183
  # Retrieval and response generation
183
184
  relevance_scorer = RelevanceScorerNode()
184
185
  context_formatter = ContextFormatterNode()
185
- llm_agent = LLMAgent(provider="ollama", model="llama3.2", temperature=0.7)
186
+ llm_agent = LLMAgentNode(provider="ollama", model="llama3.2", temperature=0.7)
186
187
 
187
188
  # Add all nodes to workflow
188
189
  for name, node in {
@@ -275,6 +276,80 @@ api.run(port=8000) # That's it! Your workflow is now a REST API
275
276
 
276
277
  See the [API demo example](examples/integration_examples/integration_api_demo.py) for complete usage patterns.
277
278
 
279
+ ### Multi-Workflow API Gateway - Manage Multiple Workflows
280
+
281
+ Run multiple workflows through a single unified API gateway with dynamic routing and MCP integration:
282
+
283
+ ```python
284
+ from kailash.api.gateway import WorkflowAPIGateway
285
+ from kailash.api.mcp_integration import MCPIntegration
286
+
287
+ # Create gateway
288
+ gateway = WorkflowAPIGateway(
289
+ title="Enterprise Platform",
290
+ description="Unified API for all workflows"
291
+ )
292
+
293
+ # Register multiple workflows
294
+ gateway.register_workflow("sales", sales_workflow)
295
+ gateway.register_workflow("analytics", analytics_workflow)
296
+ gateway.register_workflow("reports", reporting_workflow)
297
+
298
+ # Add AI-powered tools via MCP
299
+ mcp = MCPIntegration("ai_tools")
300
+ mcp.add_tool("analyze", analyze_function)
301
+ mcp.add_tool("predict", predict_function)
302
+ gateway.register_mcp_server("ai", mcp)
303
+
304
+ # Run unified server
305
+ gateway.run(port=8000)
306
+ ```
307
+
308
+ #### Gateway Features
309
+
310
+ - **Unified Access Point**: All workflows accessible through one server
311
+ - `/sales/execute` - Execute sales workflow
312
+ - `/analytics/execute` - Execute analytics workflow
313
+ - `/workflows` - List all available workflows
314
+ - `/health` - Check health of all services
315
+
316
+ - **MCP Integration**: AI-powered tools available to all workflows
317
+ ```python
318
+ # Use MCP tools in workflows
319
+ from kailash.api.mcp_integration import MCPToolNode
320
+
321
+ tool_node = MCPToolNode(
322
+ mcp_server="ai_tools",
323
+ tool_name="analyze"
324
+ )
325
+ workflow.add_node("ai_analysis", tool_node)
326
+ ```
327
+
328
+ - **Flexible Deployment Patterns**:
329
+ ```python
330
+ # Pattern 1: Single Gateway (most cases)
331
+ gateway.register_workflow("workflow1", wf1)
332
+ gateway.register_workflow("workflow2", wf2)
333
+
334
+ # Pattern 2: Hybrid (heavy workflows separate)
335
+ gateway.register_workflow("light", light_wf)
336
+ gateway.proxy_workflow("heavy", "http://gpu-service:8080")
337
+
338
+ # Pattern 3: High Availability
339
+ # Run multiple gateway instances behind load balancer
340
+
341
+ # Pattern 4: Kubernetes
342
+ # Deploy with horizontal pod autoscaling
343
+ ```
344
+
345
+ - **Production Features**:
346
+ - WebSocket support for real-time updates
347
+ - Health monitoring across all workflows
348
+ - Dynamic workflow registration/unregistration
349
+ - Built-in CORS and authentication support
350
+
351
+ See the [Gateway examples](examples/integration_examples/gateway_comprehensive_demo.py) for complete implementation patterns.
352
+
278
353
  ## 📚 Documentation
279
354
 
280
355
  | Resource | Description |
@@ -296,14 +371,14 @@ The SDK includes a rich set of pre-built nodes for common operations:
296
371
  <td width="50%">
297
372
 
298
373
  **Data Operations**
299
- - `CSVReader` - Read CSV files
300
- - `JSONReader` - Read JSON files
374
+ - `CSVReaderNode` - Read CSV files
375
+ - `JSONReaderNode` - Read JSON files
301
376
  - `DocumentSourceNode` - Sample document provider
302
377
  - `QuerySourceNode` - Sample query provider
303
378
  - `RelevanceScorerNode` - Multi-method similarity
304
379
  - `SQLDatabaseNode` - Query databases
305
- - `CSVWriter` - Write CSV files
306
- - `JSONWriter` - Write JSON files
380
+ - `CSVWriterNode` - Write CSV files
381
+ - `JSONWriterNode` - Write JSON files
307
382
 
308
383
  </td>
309
384
  <td width="50%">
@@ -319,8 +394,8 @@ The SDK includes a rich set of pre-built nodes for common operations:
319
394
  - `Aggregator` - Aggregate data
320
395
 
321
396
  **Logic Nodes**
322
- - `Switch` - Conditional routing
323
- - `Merge` - Combine multiple inputs
397
+ - `SwitchNode` - Conditional routing
398
+ - `MergeNode` - Combine multiple inputs
324
399
  - `WorkflowNode` - Wrap workflows as reusable nodes
325
400
 
326
401
  </td>
@@ -329,8 +404,8 @@ The SDK includes a rich set of pre-built nodes for common operations:
329
404
  <td width="50%">
330
405
 
331
406
  **AI/ML Nodes**
332
- - `LLMAgent` - Multi-provider LLM with memory & tools
333
- - `EmbeddingGenerator` - Vector embeddings with caching
407
+ - `LLMAgentNode` - Multi-provider LLM with memory & tools
408
+ - `EmbeddingGeneratorNode` - Vector embeddings with caching
334
409
  - `MCPClient/MCPServer` - Model Context Protocol
335
410
  - `TextClassifier` - Text classification
336
411
  - `SentimentAnalyzer` - Sentiment analysis
@@ -370,14 +445,14 @@ The SDK includes a rich set of pre-built nodes for common operations:
370
445
  #### Workflow Management
371
446
  ```python
372
447
  from kailash.workflow import Workflow
373
- from kailash.nodes.logic import Switch
448
+ from kailash.nodes.logic import SwitchNode
374
449
  from kailash.nodes.transform import DataTransformer
375
450
 
376
451
  # Create complex workflows with branching logic
377
452
  workflow = Workflow("data_pipeline", name="data_pipeline")
378
453
 
379
- # Add conditional branching with Switch node
380
- switch = Switch()
454
+ # Add conditional branching with SwitchNode
455
+ switch = SwitchNode()
381
456
  workflow.add_node("route", switch)
382
457
 
383
458
  # Different paths based on validation
@@ -703,13 +778,13 @@ chunk_text_extractor = ChunkTextExtractorNode()
703
778
  query_text_wrapper = QueryTextWrapperNode()
704
779
 
705
780
  # Create embedding generators
706
- chunk_embedder = EmbeddingGenerator(
781
+ chunk_embedder = EmbeddingGeneratorNode(
707
782
  provider="ollama",
708
783
  model="nomic-embed-text",
709
784
  operation="embed_batch"
710
785
  )
711
786
 
712
- query_embedder = EmbeddingGenerator(
787
+ query_embedder = EmbeddingGeneratorNode(
713
788
  provider="ollama",
714
789
  model="nomic-embed-text",
715
790
  operation="embed_batch"
@@ -720,7 +795,7 @@ relevance_scorer = RelevanceScorerNode(similarity_method="cosine")
720
795
  context_formatter = ContextFormatterNode()
721
796
 
722
797
  # Create LLM agent for final answer generation
723
- llm_agent = LLMAgent(
798
+ llm_agent = LLMAgentNode(
724
799
  provider="ollama",
725
800
  model="llama3.2",
726
801
  temperature=0.7,
@@ -839,10 +914,10 @@ kailash/
839
914
  The SDK features a unified provider architecture for AI capabilities:
840
915
 
841
916
  ```python
842
- from kailash.nodes.ai import LLMAgent, EmbeddingGenerator
917
+ from kailash.nodes.ai import LLMAgentNode, EmbeddingGeneratorNode
843
918
 
844
919
  # Multi-provider LLM support
845
- agent = LLMAgent()
920
+ agent = LLMAgentNode()
846
921
  result = agent.run(
847
922
  provider="ollama", # or "openai", "anthropic", "mock"
848
923
  model="llama3.1:8b-instruct-q8_0",
@@ -851,7 +926,7 @@ result = agent.run(
851
926
  )
852
927
 
853
928
  # Vector embeddings with the same providers
854
- embedder = EmbeddingGenerator()
929
+ embedder = EmbeddingGeneratorNode()
855
930
  embedding = embedder.run(
856
931
  provider="ollama", # Same providers support embeddings
857
932
  model="snowflake-arctic-embed2",
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "kailash"
7
- version = "0.1.3"
7
+ version = "0.1.4"
8
8
  description = "Python SDK for the Kailash container-node architecture"
9
9
  authors = [
10
10
  {name = "Integrum", email = "info@integrum.com"}
@@ -13,7 +13,7 @@ with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
13
13
  # Package configuration
14
14
  setup(
15
15
  name="kailash",
16
- version="0.1.1",
16
+ version="0.1.4",
17
17
  author="Integrum",
18
18
  author_email="info@integrum.com",
19
19
  description="Python SDK for the Kailash container-node architecture",
@@ -15,7 +15,7 @@ from kailash.workflow.visualization import WorkflowVisualizer
15
15
  # For backward compatibility
16
16
  WorkflowGraph = Workflow
17
17
 
18
- __version__ = "0.1.1"
18
+ __version__ = "0.1.4"
19
19
 
20
20
  __all__ = [
21
21
  "Workflow",
@@ -0,0 +1,17 @@
1
+ """
2
+ Kailash API module for exposing workflows as REST APIs.
3
+ """
4
+
5
+ from .gateway import WorkflowAPIGateway, WorkflowOrchestrator
6
+ from .mcp_integration import MCPIntegration, MCPToolNode
7
+ from .workflow_api import HierarchicalRAGAPI, WorkflowAPI, create_workflow_api
8
+
9
+ __all__ = [
10
+ "WorkflowAPI",
11
+ "HierarchicalRAGAPI",
12
+ "create_workflow_api",
13
+ "WorkflowAPIGateway",
14
+ "WorkflowOrchestrator",
15
+ "MCPIntegration",
16
+ "MCPToolNode",
17
+ ]