kailash 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. kailash/__init__.py +31 -0
  2. kailash/__main__.py +11 -0
  3. kailash/cli/__init__.py +5 -0
  4. kailash/cli/commands.py +563 -0
  5. kailash/manifest.py +778 -0
  6. kailash/nodes/__init__.py +23 -0
  7. kailash/nodes/ai/__init__.py +26 -0
  8. kailash/nodes/ai/agents.py +417 -0
  9. kailash/nodes/ai/models.py +488 -0
  10. kailash/nodes/api/__init__.py +52 -0
  11. kailash/nodes/api/auth.py +567 -0
  12. kailash/nodes/api/graphql.py +480 -0
  13. kailash/nodes/api/http.py +598 -0
  14. kailash/nodes/api/rate_limiting.py +572 -0
  15. kailash/nodes/api/rest.py +665 -0
  16. kailash/nodes/base.py +1032 -0
  17. kailash/nodes/base_async.py +128 -0
  18. kailash/nodes/code/__init__.py +32 -0
  19. kailash/nodes/code/python.py +1021 -0
  20. kailash/nodes/data/__init__.py +125 -0
  21. kailash/nodes/data/readers.py +496 -0
  22. kailash/nodes/data/sharepoint_graph.py +623 -0
  23. kailash/nodes/data/sql.py +380 -0
  24. kailash/nodes/data/streaming.py +1168 -0
  25. kailash/nodes/data/vector_db.py +964 -0
  26. kailash/nodes/data/writers.py +529 -0
  27. kailash/nodes/logic/__init__.py +6 -0
  28. kailash/nodes/logic/async_operations.py +702 -0
  29. kailash/nodes/logic/operations.py +551 -0
  30. kailash/nodes/transform/__init__.py +5 -0
  31. kailash/nodes/transform/processors.py +379 -0
  32. kailash/runtime/__init__.py +6 -0
  33. kailash/runtime/async_local.py +356 -0
  34. kailash/runtime/docker.py +697 -0
  35. kailash/runtime/local.py +434 -0
  36. kailash/runtime/parallel.py +557 -0
  37. kailash/runtime/runner.py +110 -0
  38. kailash/runtime/testing.py +347 -0
  39. kailash/sdk_exceptions.py +307 -0
  40. kailash/tracking/__init__.py +7 -0
  41. kailash/tracking/manager.py +885 -0
  42. kailash/tracking/metrics_collector.py +342 -0
  43. kailash/tracking/models.py +535 -0
  44. kailash/tracking/storage/__init__.py +0 -0
  45. kailash/tracking/storage/base.py +113 -0
  46. kailash/tracking/storage/database.py +619 -0
  47. kailash/tracking/storage/filesystem.py +543 -0
  48. kailash/utils/__init__.py +0 -0
  49. kailash/utils/export.py +924 -0
  50. kailash/utils/templates.py +680 -0
  51. kailash/visualization/__init__.py +62 -0
  52. kailash/visualization/api.py +732 -0
  53. kailash/visualization/dashboard.py +951 -0
  54. kailash/visualization/performance.py +808 -0
  55. kailash/visualization/reports.py +1471 -0
  56. kailash/workflow/__init__.py +15 -0
  57. kailash/workflow/builder.py +245 -0
  58. kailash/workflow/graph.py +827 -0
  59. kailash/workflow/mermaid_visualizer.py +628 -0
  60. kailash/workflow/mock_registry.py +63 -0
  61. kailash/workflow/runner.py +302 -0
  62. kailash/workflow/state.py +238 -0
  63. kailash/workflow/visualization.py +588 -0
  64. kailash-0.1.0.dist-info/METADATA +710 -0
  65. kailash-0.1.0.dist-info/RECORD +69 -0
  66. kailash-0.1.0.dist-info/WHEEL +5 -0
  67. kailash-0.1.0.dist-info/entry_points.txt +2 -0
  68. kailash-0.1.0.dist-info/licenses/LICENSE +21 -0
  69. kailash-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,23 @@
1
+ """Node system for the Kailash SDK."""
2
+
3
+ # Import all node modules to ensure registration
4
+ from kailash.nodes import ai, api, code, data, logic, transform
5
+ from kailash.nodes.base import Node, NodeParameter, NodeRegistry, register_node
6
+ from kailash.nodes.base_async import AsyncNode
7
+ from kailash.nodes.code import PythonCodeNode
8
+
9
+ __all__ = [
10
+ "Node",
11
+ "AsyncNode",
12
+ "NodeParameter",
13
+ "NodeRegistry",
14
+ "register_node",
15
+ "PythonCodeNode",
16
+ # Node modules
17
+ "ai",
18
+ "api",
19
+ "code",
20
+ "data",
21
+ "logic",
22
+ "transform",
23
+ ]
@@ -0,0 +1,26 @@
1
+ """AI and ML nodes for the Kailash SDK."""
2
+
3
+ from .agents import ChatAgent, FunctionCallingAgent, PlanningAgent, RetrievalAgent
4
+ from .models import (
5
+ ModelPredictor,
6
+ NamedEntityRecognizer,
7
+ SentimentAnalyzer,
8
+ TextClassifier,
9
+ TextEmbedder,
10
+ TextSummarizer,
11
+ )
12
+
13
+ __all__ = [
14
+ # Agents
15
+ "ChatAgent",
16
+ "RetrievalAgent",
17
+ "FunctionCallingAgent",
18
+ "PlanningAgent",
19
+ # Models
20
+ "TextClassifier",
21
+ "TextEmbedder",
22
+ "SentimentAnalyzer",
23
+ "NamedEntityRecognizer",
24
+ "ModelPredictor",
25
+ "TextSummarizer",
26
+ ]
@@ -0,0 +1,417 @@
1
+ """AI agent nodes for the Kailash SDK."""
2
+
3
+ from typing import Any, Dict
4
+
5
+ from kailash.nodes.base import Node, NodeParameter, register_node
6
+
7
+
8
+ @register_node()
9
+ class ChatAgent(Node):
10
+ """Chat-based AI agent node."""
11
+
12
+ def get_parameters(self) -> Dict[str, NodeParameter]:
13
+ return {
14
+ "messages": NodeParameter(
15
+ name="messages",
16
+ type=list,
17
+ required=True,
18
+ description="List of chat messages",
19
+ ),
20
+ "model": NodeParameter(
21
+ name="model",
22
+ type=str,
23
+ required=False,
24
+ default="default",
25
+ description="Model to use for chat",
26
+ ),
27
+ "temperature": NodeParameter(
28
+ name="temperature",
29
+ type=float,
30
+ required=False,
31
+ default=0.7,
32
+ description="Sampling temperature (0-1)",
33
+ ),
34
+ "max_tokens": NodeParameter(
35
+ name="max_tokens",
36
+ type=int,
37
+ required=False,
38
+ default=500,
39
+ description="Maximum tokens in response",
40
+ ),
41
+ "system_prompt": NodeParameter(
42
+ name="system_prompt",
43
+ type=str,
44
+ required=False,
45
+ default="You are a helpful assistant.",
46
+ description="System prompt for the agent",
47
+ ),
48
+ }
49
+
50
+ def run(self, **kwargs) -> Dict[str, Any]:
51
+ messages = kwargs["messages"]
52
+ model = kwargs.get("model", "default")
53
+ temperature = kwargs.get("temperature", 0.7)
54
+ max_tokens = kwargs.get("max_tokens", 500)
55
+ system_prompt = kwargs.get("system_prompt", "You are a helpful assistant.")
56
+
57
+ # Mock chat responses
58
+ responses = []
59
+
60
+ # Add system prompt as first message
61
+ full_conversation = [{"role": "system", "content": system_prompt}]
62
+ full_conversation.extend(messages)
63
+
64
+ # Generate mock response
65
+ if messages:
66
+ last_message = messages[-1]
67
+ if isinstance(last_message, dict) and last_message.get("role") == "user":
68
+ user_content = last_message.get("content", "")
69
+
70
+ # Simple mock responses based on input
71
+ if "hello" in user_content.lower():
72
+ response = "Hello! How can I help you today?"
73
+ elif "weather" in user_content.lower():
74
+ response = "I don't have access to real-time weather data, but I can help you with other questions!"
75
+ elif "?" in user_content:
76
+ response = f"That's an interesting question about '{user_content[:50]}...'. Based on the context, I would say..."
77
+ else:
78
+ response = f"I understand you're saying '{user_content[:50]}...'. Let me help you with that."
79
+
80
+ responses.append(
81
+ {
82
+ "role": "assistant",
83
+ "content": response,
84
+ "model": model,
85
+ "temperature": temperature,
86
+ }
87
+ )
88
+
89
+ return {
90
+ "responses": responses,
91
+ "full_conversation": full_conversation + responses,
92
+ "model": model,
93
+ "temperature": temperature,
94
+ "max_tokens": max_tokens,
95
+ }
96
+
97
+
98
+ @register_node()
99
+ class RetrievalAgent(Node):
100
+ """Retrieval-augmented generation agent."""
101
+
102
+ def get_parameters(self) -> Dict[str, NodeParameter]:
103
+ return {
104
+ "query": NodeParameter(
105
+ name="query", type=str, required=True, description="Query for retrieval"
106
+ ),
107
+ "documents": NodeParameter(
108
+ name="documents",
109
+ type=list,
110
+ required=True,
111
+ description="Documents to search through",
112
+ ),
113
+ "top_k": NodeParameter(
114
+ name="top_k",
115
+ type=int,
116
+ required=False,
117
+ default=5,
118
+ description="Number of top documents to retrieve",
119
+ ),
120
+ "similarity_threshold": NodeParameter(
121
+ name="similarity_threshold",
122
+ type=float,
123
+ required=False,
124
+ default=0.7,
125
+ description="Minimum similarity threshold",
126
+ ),
127
+ "generate_answer": NodeParameter(
128
+ name="generate_answer",
129
+ type=bool,
130
+ required=False,
131
+ default=True,
132
+ description="Whether to generate an answer based on retrieved documents",
133
+ ),
134
+ }
135
+
136
+ def run(self, **kwargs) -> Dict[str, Any]:
137
+ query = kwargs["query"]
138
+ documents = kwargs["documents"]
139
+ top_k = kwargs.get("top_k", 5)
140
+ similarity_threshold = kwargs.get("similarity_threshold", 0.7)
141
+ generate_answer = kwargs.get("generate_answer", True)
142
+
143
+ # Mock retrieval
144
+ retrieved_docs = []
145
+
146
+ # Simple keyword-based retrieval
147
+ query_words = set(query.lower().split())
148
+
149
+ for doc in documents:
150
+ if isinstance(doc, dict):
151
+ content = doc.get("content", "")
152
+ else:
153
+ content = str(doc)
154
+
155
+ # Calculate mock similarity
156
+ doc_words = set(content.lower().split())
157
+ overlap = len(query_words.intersection(doc_words))
158
+ similarity = overlap / max(len(query_words), 1)
159
+
160
+ if similarity >= similarity_threshold:
161
+ retrieved_docs.append(
162
+ {"document": doc, "content": content, "similarity": similarity}
163
+ )
164
+
165
+ # Sort by similarity and take top_k
166
+ retrieved_docs.sort(key=lambda x: x["similarity"], reverse=True)
167
+ retrieved_docs = retrieved_docs[:top_k]
168
+
169
+ # Generate answer if requested
170
+ answer = None
171
+ if generate_answer and retrieved_docs:
172
+ # Mock answer generation
173
+ context = " ".join([doc["content"][:200] for doc in retrieved_docs])
174
+ answer = f"Based on the retrieved documents about '{query}', the relevant information is: {context[:300]}..."
175
+
176
+ return {
177
+ "query": query,
178
+ "retrieved_documents": retrieved_docs,
179
+ "answer": answer,
180
+ "num_retrieved": len(retrieved_docs),
181
+ "top_k": top_k,
182
+ "similarity_threshold": similarity_threshold,
183
+ }
184
+
185
+
186
+ @register_node()
187
+ class FunctionCallingAgent(Node):
188
+ """Agent that can call functions based on input."""
189
+
190
+ def get_parameters(self) -> Dict[str, NodeParameter]:
191
+ return {
192
+ "query": NodeParameter(
193
+ name="query", type=str, required=True, description="User query"
194
+ ),
195
+ "available_functions": NodeParameter(
196
+ name="available_functions",
197
+ type=list,
198
+ required=True,
199
+ description="List of available function definitions",
200
+ ),
201
+ "context": NodeParameter(
202
+ name="context",
203
+ type=dict,
204
+ required=False,
205
+ default={},
206
+ description="Additional context for the agent",
207
+ ),
208
+ "max_calls": NodeParameter(
209
+ name="max_calls",
210
+ type=int,
211
+ required=False,
212
+ default=3,
213
+ description="Maximum number of function calls",
214
+ ),
215
+ }
216
+
217
+ def run(self, **kwargs) -> Dict[str, Any]:
218
+ query = kwargs["query"]
219
+ available_functions = kwargs["available_functions"]
220
+ context = kwargs.get("context", {})
221
+ max_calls = kwargs.get("max_calls", 3)
222
+
223
+ # Mock function calling
224
+ function_calls = []
225
+
226
+ # Simple pattern matching for function selection
227
+ query_lower = query.lower()
228
+
229
+ for func in available_functions:
230
+ if isinstance(func, dict):
231
+ func_name = func.get("name", "")
232
+ func_description = func.get("description", "")
233
+
234
+ # Check if query matches function purpose
235
+ if func_name.lower() in query_lower or any(
236
+ word in query_lower for word in func_description.lower().split()
237
+ ):
238
+ # Mock function call
239
+ mock_args = {}
240
+
241
+ # Generate mock arguments based on function parameters
242
+ if "parameters" in func:
243
+ for param_name, param_info in func["parameters"].items():
244
+ param_type = param_info.get("type", "string")
245
+ if param_type == "string":
246
+ mock_args[param_name] = f"mock_{param_name}_value"
247
+ elif param_type == "number":
248
+ mock_args[param_name] = 42
249
+ elif param_type == "boolean":
250
+ mock_args[param_name] = True
251
+ elif param_type == "array":
252
+ mock_args[param_name] = ["item1", "item2"]
253
+ else:
254
+ mock_args[param_name] = {"key": "value"}
255
+
256
+ function_calls.append(
257
+ {
258
+ "function": func_name,
259
+ "arguments": mock_args,
260
+ "result": f"Mock result from {func_name}",
261
+ }
262
+ )
263
+
264
+ if len(function_calls) >= max_calls:
265
+ break
266
+
267
+ # Generate final response
268
+ if function_calls:
269
+ response = f"Based on your query '{query}', I executed {len(function_calls)} function(s). "
270
+ response += "Here are the results: " + ", ".join(
271
+ [
272
+ f"{call['function']}() returned {call['result']}"
273
+ for call in function_calls
274
+ ]
275
+ )
276
+ else:
277
+ response = f"I couldn't find any relevant functions to help with '{query}'."
278
+
279
+ return {
280
+ "query": query,
281
+ "function_calls": function_calls,
282
+ "response": response,
283
+ "context": context,
284
+ "num_calls": len(function_calls),
285
+ }
286
+
287
+
288
+ @register_node()
289
+ class PlanningAgent(Node):
290
+ """Agent that creates execution plans."""
291
+
292
+ def get_parameters(self) -> Dict[str, NodeParameter]:
293
+ return {
294
+ "goal": NodeParameter(
295
+ name="goal", type=str, required=True, description="Goal to achieve"
296
+ ),
297
+ "available_tools": NodeParameter(
298
+ name="available_tools",
299
+ type=list,
300
+ required=True,
301
+ description="List of available tools/nodes",
302
+ ),
303
+ "constraints": NodeParameter(
304
+ name="constraints",
305
+ type=dict,
306
+ required=False,
307
+ default={},
308
+ description="Constraints for the plan",
309
+ ),
310
+ "max_steps": NodeParameter(
311
+ name="max_steps",
312
+ type=int,
313
+ required=False,
314
+ default=10,
315
+ description="Maximum number of steps in the plan",
316
+ ),
317
+ }
318
+
319
+ def run(self, **kwargs) -> Dict[str, Any]:
320
+ goal = kwargs["goal"]
321
+ available_tools = kwargs["available_tools"]
322
+ constraints = kwargs.get("constraints", {})
323
+ max_steps = kwargs.get("max_steps", 10)
324
+
325
+ # Mock plan generation
326
+ plan_steps = []
327
+
328
+ # Simple heuristic-based planning
329
+ goal_lower = goal.lower()
330
+
331
+ # Analyze goal and create steps
332
+ if "process" in goal_lower and "data" in goal_lower:
333
+ # Data processing workflow
334
+ potential_steps = [
335
+ {
336
+ "tool": "CSVReader",
337
+ "description": "Read input data",
338
+ "parameters": {"file_path": "input.csv"},
339
+ },
340
+ {
341
+ "tool": "Filter",
342
+ "description": "Filter data based on criteria",
343
+ "parameters": {"field": "value", "operator": ">", "value": 100},
344
+ },
345
+ {
346
+ "tool": "Aggregator",
347
+ "description": "Aggregate filtered data",
348
+ "parameters": {"group_by": "category", "operation": "sum"},
349
+ },
350
+ {
351
+ "tool": "CSVWriter",
352
+ "description": "Write results",
353
+ "parameters": {"file_path": "output.csv"},
354
+ },
355
+ ]
356
+ elif "analyze" in goal_lower and "text" in goal_lower:
357
+ # Text analysis workflow
358
+ potential_steps = [
359
+ {
360
+ "tool": "TextReader",
361
+ "description": "Read text data",
362
+ "parameters": {"file_path": "text.txt"},
363
+ },
364
+ {
365
+ "tool": "SentimentAnalyzer",
366
+ "description": "Analyze sentiment",
367
+ "parameters": {"language": "en"},
368
+ },
369
+ {
370
+ "tool": "TextSummarizer",
371
+ "description": "Summarize key points",
372
+ "parameters": {"max_length": 200},
373
+ },
374
+ {
375
+ "tool": "JSONWriter",
376
+ "description": "Save analysis results",
377
+ "parameters": {"file_path": "analysis.json"},
378
+ },
379
+ ]
380
+ else:
381
+ # Generic workflow
382
+ potential_steps = [
383
+ {
384
+ "tool": "DataReader",
385
+ "description": "Read input data",
386
+ "parameters": {},
387
+ },
388
+ {
389
+ "tool": "Transform",
390
+ "description": "Transform data",
391
+ "parameters": {},
392
+ },
393
+ {"tool": "Analyze", "description": "Analyze results", "parameters": {}},
394
+ {"tool": "Export", "description": "Export results", "parameters": {}},
395
+ ]
396
+
397
+ # Filter steps based on available tools
398
+ for step in potential_steps[:max_steps]:
399
+ tool_name = step["tool"]
400
+ if any(tool_name in str(tool) for tool in available_tools):
401
+ plan_steps.append(step)
402
+
403
+ # Apply constraints
404
+ if "time_limit" in constraints:
405
+ # Mock time estimation
406
+ estimated_time = len(plan_steps) * 10 # 10 seconds per step
407
+ if estimated_time > constraints["time_limit"]:
408
+ plan_steps = plan_steps[: constraints["time_limit"] // 10]
409
+
410
+ return {
411
+ "goal": goal,
412
+ "plan": plan_steps,
413
+ "estimated_steps": len(plan_steps),
414
+ "constraints": constraints,
415
+ "feasibility": "high" if plan_steps else "low",
416
+ "reasoning": f"Created a {len(plan_steps)}-step plan to achieve: {goal}",
417
+ }