kailash 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/access_control.py +40 -39
- kailash/api/auth.py +26 -32
- kailash/api/custom_nodes.py +29 -29
- kailash/api/custom_nodes_secure.py +35 -35
- kailash/api/database.py +17 -17
- kailash/api/gateway.py +19 -19
- kailash/api/mcp_integration.py +24 -23
- kailash/api/studio.py +45 -45
- kailash/api/workflow_api.py +8 -8
- kailash/cli/commands.py +5 -8
- kailash/manifest.py +42 -42
- kailash/mcp/__init__.py +1 -1
- kailash/mcp/ai_registry_server.py +20 -20
- kailash/mcp/client.py +9 -11
- kailash/mcp/client_new.py +10 -10
- kailash/mcp/server.py +1 -2
- kailash/mcp/server_enhanced.py +449 -0
- kailash/mcp/servers/ai_registry.py +6 -6
- kailash/mcp/utils/__init__.py +31 -0
- kailash/mcp/utils/cache.py +267 -0
- kailash/mcp/utils/config.py +263 -0
- kailash/mcp/utils/formatters.py +293 -0
- kailash/mcp/utils/metrics.py +418 -0
- kailash/nodes/ai/agents.py +9 -9
- kailash/nodes/ai/ai_providers.py +33 -34
- kailash/nodes/ai/embedding_generator.py +31 -32
- kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
- kailash/nodes/ai/iterative_llm_agent.py +48 -48
- kailash/nodes/ai/llm_agent.py +32 -33
- kailash/nodes/ai/models.py +13 -13
- kailash/nodes/ai/self_organizing.py +44 -44
- kailash/nodes/api/auth.py +11 -11
- kailash/nodes/api/graphql.py +13 -13
- kailash/nodes/api/http.py +19 -19
- kailash/nodes/api/monitoring.py +20 -20
- kailash/nodes/api/rate_limiting.py +9 -13
- kailash/nodes/api/rest.py +29 -29
- kailash/nodes/api/security.py +44 -47
- kailash/nodes/base.py +21 -23
- kailash/nodes/base_async.py +7 -7
- kailash/nodes/base_cycle_aware.py +12 -12
- kailash/nodes/base_with_acl.py +5 -5
- kailash/nodes/code/python.py +66 -57
- kailash/nodes/data/directory.py +6 -6
- kailash/nodes/data/event_generation.py +10 -10
- kailash/nodes/data/file_discovery.py +28 -31
- kailash/nodes/data/readers.py +8 -8
- kailash/nodes/data/retrieval.py +10 -10
- kailash/nodes/data/sharepoint_graph.py +17 -17
- kailash/nodes/data/sources.py +5 -5
- kailash/nodes/data/sql.py +13 -13
- kailash/nodes/data/streaming.py +25 -25
- kailash/nodes/data/vector_db.py +22 -22
- kailash/nodes/data/writers.py +7 -7
- kailash/nodes/logic/async_operations.py +17 -17
- kailash/nodes/logic/convergence.py +11 -11
- kailash/nodes/logic/loop.py +4 -4
- kailash/nodes/logic/operations.py +11 -11
- kailash/nodes/logic/workflow.py +8 -9
- kailash/nodes/mixins/mcp.py +17 -17
- kailash/nodes/mixins.py +8 -10
- kailash/nodes/transform/chunkers.py +3 -3
- kailash/nodes/transform/formatters.py +7 -7
- kailash/nodes/transform/processors.py +10 -10
- kailash/runtime/access_controlled.py +18 -18
- kailash/runtime/async_local.py +17 -19
- kailash/runtime/docker.py +20 -22
- kailash/runtime/local.py +16 -16
- kailash/runtime/parallel.py +23 -23
- kailash/runtime/parallel_cyclic.py +27 -27
- kailash/runtime/runner.py +6 -6
- kailash/runtime/testing.py +20 -20
- kailash/sdk_exceptions.py +0 -58
- kailash/security.py +14 -26
- kailash/tracking/manager.py +38 -38
- kailash/tracking/metrics_collector.py +15 -14
- kailash/tracking/models.py +53 -53
- kailash/tracking/storage/base.py +7 -17
- kailash/tracking/storage/database.py +22 -23
- kailash/tracking/storage/filesystem.py +38 -40
- kailash/utils/export.py +21 -21
- kailash/utils/templates.py +2 -3
- kailash/visualization/api.py +30 -34
- kailash/visualization/dashboard.py +17 -17
- kailash/visualization/performance.py +16 -16
- kailash/visualization/reports.py +25 -27
- kailash/workflow/builder.py +8 -8
- kailash/workflow/convergence.py +13 -12
- kailash/workflow/cycle_analyzer.py +30 -32
- kailash/workflow/cycle_builder.py +12 -12
- kailash/workflow/cycle_config.py +16 -15
- kailash/workflow/cycle_debugger.py +40 -40
- kailash/workflow/cycle_exceptions.py +29 -29
- kailash/workflow/cycle_profiler.py +21 -21
- kailash/workflow/cycle_state.py +20 -22
- kailash/workflow/cyclic_runner.py +44 -44
- kailash/workflow/graph.py +40 -40
- kailash/workflow/mermaid_visualizer.py +9 -11
- kailash/workflow/migration.py +22 -22
- kailash/workflow/mock_registry.py +6 -6
- kailash/workflow/runner.py +9 -9
- kailash/workflow/safety.py +12 -13
- kailash/workflow/state.py +8 -11
- kailash/workflow/templates.py +19 -19
- kailash/workflow/validation.py +14 -14
- kailash/workflow/visualization.py +22 -22
- {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/METADATA +53 -5
- kailash-0.3.2.dist-info/RECORD +136 -0
- kailash-0.3.0.dist-info/RECORD +0 -130
- {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/WHEEL +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/entry_points.txt +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.2.dist-info}/top_level.txt +0 -0
kailash/nodes/ai/llm_agent.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
"""Advanced LLM Agent node with LangChain integration and MCP support."""
|
2
2
|
|
3
3
|
import json
|
4
|
-
from typing import Any
|
4
|
+
from typing import Any
|
5
5
|
|
6
6
|
from kailash.nodes.base import Node, NodeParameter, register_node
|
7
7
|
|
@@ -119,7 +119,7 @@ class LLMAgentNode(Node):
|
|
119
119
|
... )
|
120
120
|
"""
|
121
121
|
|
122
|
-
def get_parameters(self) ->
|
122
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
123
123
|
return {
|
124
124
|
"provider": NodeParameter(
|
125
125
|
name="provider",
|
@@ -226,7 +226,7 @@ class LLMAgentNode(Node):
|
|
226
226
|
),
|
227
227
|
}
|
228
228
|
|
229
|
-
def run(self, **kwargs) ->
|
229
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
230
230
|
"""
|
231
231
|
Execute the LLM agent with the specified configuration.
|
232
232
|
|
@@ -584,8 +584,8 @@ class LLMAgentNode(Node):
|
|
584
584
|
return False
|
585
585
|
|
586
586
|
def _load_conversation_memory(
|
587
|
-
self, conversation_id:
|
588
|
-
) ->
|
587
|
+
self, conversation_id: str | None, memory_config: dict
|
588
|
+
) -> dict[str, Any]:
|
589
589
|
"""
|
590
590
|
Load conversation memory for persistent conversations.
|
591
591
|
|
@@ -675,8 +675,8 @@ class LLMAgentNode(Node):
|
|
675
675
|
}
|
676
676
|
|
677
677
|
def _retrieve_mcp_context(
|
678
|
-
self, mcp_servers:
|
679
|
-
) ->
|
678
|
+
self, mcp_servers: list[dict], mcp_context: list[str]
|
679
|
+
) -> list[dict[str, Any]]:
|
680
680
|
"""
|
681
681
|
Retrieve context from Model Context Protocol (MCP) servers.
|
682
682
|
|
@@ -893,7 +893,7 @@ class LLMAgentNode(Node):
|
|
893
893
|
|
894
894
|
return os.environ.get("KAILASH_USE_REAL_MCP", "false").lower() == "true"
|
895
895
|
|
896
|
-
def _discover_mcp_tools(self, mcp_servers:
|
896
|
+
def _discover_mcp_tools(self, mcp_servers: list[dict]) -> list[dict[str, Any]]:
|
897
897
|
"""
|
898
898
|
Discover available tools from MCP servers.
|
899
899
|
|
@@ -995,8 +995,8 @@ class LLMAgentNode(Node):
|
|
995
995
|
return discovered_tools
|
996
996
|
|
997
997
|
def _merge_tools(
|
998
|
-
self, existing_tools:
|
999
|
-
) ->
|
998
|
+
self, existing_tools: list[dict], mcp_tools: list[dict]
|
999
|
+
) -> list[dict]:
|
1000
1000
|
"""
|
1001
1001
|
Merge MCP discovered tools with existing tools, avoiding duplicates.
|
1002
1002
|
|
@@ -1027,8 +1027,8 @@ class LLMAgentNode(Node):
|
|
1027
1027
|
return merged_tools
|
1028
1028
|
|
1029
1029
|
def _perform_rag_retrieval(
|
1030
|
-
self, messages:
|
1031
|
-
) ->
|
1030
|
+
self, messages: list[dict], rag_config: dict, mcp_context: list[dict]
|
1031
|
+
) -> dict[str, Any]:
|
1032
1032
|
"""
|
1033
1033
|
Perform Retrieval Augmented Generation (RAG) to find relevant documents.
|
1034
1034
|
|
@@ -1188,12 +1188,12 @@ class LLMAgentNode(Node):
|
|
1188
1188
|
|
1189
1189
|
def _prepare_conversation(
|
1190
1190
|
self,
|
1191
|
-
messages:
|
1192
|
-
system_prompt:
|
1191
|
+
messages: list[dict],
|
1192
|
+
system_prompt: str | None,
|
1193
1193
|
memory: dict,
|
1194
|
-
mcp_context:
|
1194
|
+
mcp_context: list[dict],
|
1195
1195
|
rag_context: dict,
|
1196
|
-
) ->
|
1196
|
+
) -> list[dict]:
|
1197
1197
|
"""Prepare enriched conversation with all context."""
|
1198
1198
|
enriched_messages = []
|
1199
1199
|
|
@@ -1229,8 +1229,8 @@ class LLMAgentNode(Node):
|
|
1229
1229
|
return enriched_messages
|
1230
1230
|
|
1231
1231
|
def _mock_llm_response(
|
1232
|
-
self, messages:
|
1233
|
-
) ->
|
1232
|
+
self, messages: list[dict], tools: list[dict], generation_config: dict
|
1233
|
+
) -> dict[str, Any]:
|
1234
1234
|
"""Generate mock LLM response for testing."""
|
1235
1235
|
last_user_message = ""
|
1236
1236
|
for msg in reversed(messages):
|
@@ -1291,13 +1291,13 @@ class LLMAgentNode(Node):
|
|
1291
1291
|
self,
|
1292
1292
|
provider: str,
|
1293
1293
|
model: str,
|
1294
|
-
messages:
|
1295
|
-
tools:
|
1294
|
+
messages: list[dict],
|
1295
|
+
tools: list[dict],
|
1296
1296
|
generation_config: dict,
|
1297
1297
|
streaming: bool,
|
1298
1298
|
timeout: int,
|
1299
1299
|
max_retries: int,
|
1300
|
-
) ->
|
1300
|
+
) -> dict[str, Any]:
|
1301
1301
|
"""Generate LLM response using LangChain (mock implementation)."""
|
1302
1302
|
# This would be the real LangChain integration
|
1303
1303
|
return {
|
@@ -1320,10 +1320,10 @@ class LLMAgentNode(Node):
|
|
1320
1320
|
self,
|
1321
1321
|
provider: str,
|
1322
1322
|
model: str,
|
1323
|
-
messages:
|
1324
|
-
tools:
|
1323
|
+
messages: list[dict],
|
1324
|
+
tools: list[dict],
|
1325
1325
|
generation_config: dict,
|
1326
|
-
) ->
|
1326
|
+
) -> dict[str, Any]:
|
1327
1327
|
"""Generate LLM response using provider architecture."""
|
1328
1328
|
try:
|
1329
1329
|
from .ai_providers import get_provider
|
@@ -1368,10 +1368,10 @@ class LLMAgentNode(Node):
|
|
1368
1368
|
self,
|
1369
1369
|
provider: str,
|
1370
1370
|
model: str,
|
1371
|
-
messages:
|
1372
|
-
tools:
|
1371
|
+
messages: list[dict],
|
1372
|
+
tools: list[dict],
|
1373
1373
|
generation_config: dict,
|
1374
|
-
) ->
|
1374
|
+
) -> dict[str, Any]:
|
1375
1375
|
"""Generate LLM response using direct API calls (mock implementation)."""
|
1376
1376
|
return {
|
1377
1377
|
"id": "fallback_response_456",
|
@@ -1392,17 +1392,16 @@ class LLMAgentNode(Node):
|
|
1392
1392
|
def _update_conversation_memory(
|
1393
1393
|
self,
|
1394
1394
|
conversation_id: str,
|
1395
|
-
messages:
|
1395
|
+
messages: list[dict],
|
1396
1396
|
response: dict,
|
1397
1397
|
memory_config: dict,
|
1398
1398
|
) -> None:
|
1399
1399
|
"""Update conversation memory with new exchange."""
|
1400
1400
|
# Mock memory update (in real implementation, persist to storage)
|
1401
|
-
pass
|
1402
1401
|
|
1403
1402
|
def _calculate_usage_metrics(
|
1404
|
-
self, messages:
|
1405
|
-
) ->
|
1403
|
+
self, messages: list[dict], response: dict, model: str, provider: str
|
1404
|
+
) -> dict[str, Any]:
|
1406
1405
|
"""Calculate token usage and cost metrics."""
|
1407
1406
|
usage = response.get("usage", {})
|
1408
1407
|
prompt_tokens = usage.get("prompt_tokens", 0)
|
@@ -1433,8 +1432,8 @@ class LLMAgentNode(Node):
|
|
1433
1432
|
}
|
1434
1433
|
|
1435
1434
|
async def _execute_mcp_tool_call(
|
1436
|
-
self, tool_call: dict, mcp_tools:
|
1437
|
-
) ->
|
1435
|
+
self, tool_call: dict, mcp_tools: list[dict]
|
1436
|
+
) -> dict[str, Any]:
|
1438
1437
|
"""Execute an MCP tool call.
|
1439
1438
|
|
1440
1439
|
Args:
|
kailash/nodes/ai/models.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
"""AI/ML model nodes for the Kailash SDK."""
|
2
2
|
|
3
|
-
from typing import Any
|
3
|
+
from typing import Any
|
4
4
|
|
5
5
|
from kailash.nodes.base import Node, NodeParameter, register_node
|
6
6
|
|
@@ -9,7 +9,7 @@ from kailash.nodes.base import Node, NodeParameter, register_node
|
|
9
9
|
class TextClassifier(Node):
|
10
10
|
"""Generic text classification node."""
|
11
11
|
|
12
|
-
def get_parameters(self) ->
|
12
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
13
13
|
return {
|
14
14
|
"texts": NodeParameter(
|
15
15
|
name="texts",
|
@@ -40,7 +40,7 @@ class TextClassifier(Node):
|
|
40
40
|
),
|
41
41
|
}
|
42
42
|
|
43
|
-
def run(self, **kwargs) ->
|
43
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
44
44
|
texts = kwargs["texts"]
|
45
45
|
model_name = kwargs.get("model_name", "simple")
|
46
46
|
categories = kwargs.get("categories", ["positive", "negative", "neutral"])
|
@@ -82,7 +82,7 @@ class TextClassifier(Node):
|
|
82
82
|
class TextEmbedder(Node):
|
83
83
|
"""Generate text embeddings."""
|
84
84
|
|
85
|
-
def get_parameters(self) ->
|
85
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
86
86
|
return {
|
87
87
|
"texts": NodeParameter(
|
88
88
|
name="texts",
|
@@ -106,7 +106,7 @@ class TextEmbedder(Node):
|
|
106
106
|
),
|
107
107
|
}
|
108
108
|
|
109
|
-
def run(self, **kwargs) ->
|
109
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
110
110
|
texts = kwargs["texts"]
|
111
111
|
model_name = kwargs.get("model_name", "simple")
|
112
112
|
dimensions = kwargs.get("dimensions", 384)
|
@@ -140,7 +140,7 @@ class TextEmbedder(Node):
|
|
140
140
|
class SentimentAnalyzer(Node):
|
141
141
|
"""Analyze sentiment of text."""
|
142
142
|
|
143
|
-
def get_parameters(self) ->
|
143
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
144
144
|
return {
|
145
145
|
"texts": NodeParameter(
|
146
146
|
name="texts",
|
@@ -164,7 +164,7 @@ class SentimentAnalyzer(Node):
|
|
164
164
|
),
|
165
165
|
}
|
166
166
|
|
167
|
-
def run(self, **kwargs) ->
|
167
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
168
168
|
texts = kwargs["texts"]
|
169
169
|
language = kwargs.get("language", "en")
|
170
170
|
granularity = kwargs.get("granularity", "document")
|
@@ -225,7 +225,7 @@ class SentimentAnalyzer(Node):
|
|
225
225
|
class NamedEntityRecognizer(Node):
|
226
226
|
"""Extract named entities from text."""
|
227
227
|
|
228
|
-
def get_parameters(self) ->
|
228
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
229
229
|
return {
|
230
230
|
"texts": NodeParameter(
|
231
231
|
name="texts",
|
@@ -249,7 +249,7 @@ class NamedEntityRecognizer(Node):
|
|
249
249
|
),
|
250
250
|
}
|
251
251
|
|
252
|
-
def run(self, **kwargs) ->
|
252
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
253
253
|
texts = kwargs["texts"]
|
254
254
|
entity_types = kwargs.get(
|
255
255
|
"entity_types", ["PERSON", "ORGANIZATION", "LOCATION"]
|
@@ -318,7 +318,7 @@ class NamedEntityRecognizer(Node):
|
|
318
318
|
class ModelPredictor(Node):
|
319
319
|
"""Generic model prediction node."""
|
320
320
|
|
321
|
-
def get_parameters(self) ->
|
321
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
322
322
|
return {
|
323
323
|
"data": NodeParameter(
|
324
324
|
name="data",
|
@@ -349,7 +349,7 @@ class ModelPredictor(Node):
|
|
349
349
|
),
|
350
350
|
}
|
351
351
|
|
352
|
-
def run(self, **kwargs) ->
|
352
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
353
353
|
data = kwargs["data"]
|
354
354
|
model_path = kwargs.get("model_path", "default_model")
|
355
355
|
prediction_type = kwargs.get("prediction_type", "classification")
|
@@ -401,7 +401,7 @@ class ModelPredictor(Node):
|
|
401
401
|
class TextSummarizer(Node):
|
402
402
|
"""Summarize text content."""
|
403
403
|
|
404
|
-
def get_parameters(self) ->
|
404
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
405
405
|
return {
|
406
406
|
"texts": NodeParameter(
|
407
407
|
name="texts",
|
@@ -432,7 +432,7 @@ class TextSummarizer(Node):
|
|
432
432
|
),
|
433
433
|
}
|
434
434
|
|
435
|
-
def run(self, **kwargs) ->
|
435
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
436
436
|
texts = kwargs["texts"]
|
437
437
|
max_length = kwargs.get("max_length", 150)
|
438
438
|
min_length = kwargs.get("min_length", 50)
|
@@ -10,7 +10,7 @@ import time
|
|
10
10
|
import uuid
|
11
11
|
from collections import defaultdict, deque
|
12
12
|
from enum import Enum
|
13
|
-
from typing import Any
|
13
|
+
from typing import Any
|
14
14
|
|
15
15
|
from kailash.nodes.ai.a2a import A2AAgentNode
|
16
16
|
from kailash.nodes.base import Node, NodeParameter, register_node
|
@@ -125,7 +125,7 @@ class AgentPoolManagerNode(Node):
|
|
125
125
|
self.capability_index = defaultdict(set)
|
126
126
|
self.team_history = deque(maxlen=100)
|
127
127
|
|
128
|
-
def get_parameters(self) ->
|
128
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
129
129
|
return {
|
130
130
|
"action": NodeParameter(
|
131
131
|
name="action",
|
@@ -179,7 +179,7 @@ class AgentPoolManagerNode(Node):
|
|
179
179
|
),
|
180
180
|
}
|
181
181
|
|
182
|
-
def run(self, **kwargs) ->
|
182
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
183
183
|
"""Execute pool management action."""
|
184
184
|
action = kwargs.get("action", "list")
|
185
185
|
|
@@ -198,7 +198,7 @@ class AgentPoolManagerNode(Node):
|
|
198
198
|
else:
|
199
199
|
return {"success": False, "error": f"Unknown action: {action}"}
|
200
200
|
|
201
|
-
def _register_agent(self, kwargs:
|
201
|
+
def _register_agent(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
202
202
|
"""Register a new agent in the pool."""
|
203
203
|
agent_id = kwargs.get("agent_id")
|
204
204
|
if not agent_id:
|
@@ -235,7 +235,7 @@ class AgentPoolManagerNode(Node):
|
|
235
235
|
"pool_size": len(self.agent_registry),
|
236
236
|
}
|
237
237
|
|
238
|
-
def _unregister_agent(self, kwargs:
|
238
|
+
def _unregister_agent(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
239
239
|
"""Remove an agent from the pool."""
|
240
240
|
agent_id = kwargs.get("agent_id")
|
241
241
|
|
@@ -258,7 +258,7 @@ class AgentPoolManagerNode(Node):
|
|
258
258
|
"pool_size": len(self.agent_registry),
|
259
259
|
}
|
260
260
|
|
261
|
-
def _find_by_capability(self, kwargs:
|
261
|
+
def _find_by_capability(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
262
262
|
"""Find agents matching required capabilities."""
|
263
263
|
required_capabilities = set(kwargs.get("required_capabilities", []))
|
264
264
|
min_performance = kwargs.get("min_performance", 0.7)
|
@@ -305,7 +305,7 @@ class AgentPoolManagerNode(Node):
|
|
305
305
|
"total_pool_size": len(self.agent_registry),
|
306
306
|
}
|
307
307
|
|
308
|
-
def _update_status(self, kwargs:
|
308
|
+
def _update_status(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
309
309
|
"""Update agent status."""
|
310
310
|
agent_id = kwargs.get("agent_id")
|
311
311
|
new_status = kwargs.get("status")
|
@@ -357,7 +357,7 @@ class AgentPoolManagerNode(Node):
|
|
357
357
|
"last_active": self.agent_registry[agent_id]["last_active"],
|
358
358
|
}
|
359
359
|
|
360
|
-
def _get_metrics(self, kwargs:
|
360
|
+
def _get_metrics(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
361
361
|
"""Get performance metrics for an agent or all agents."""
|
362
362
|
agent_id = kwargs.get("agent_id")
|
363
363
|
|
@@ -409,7 +409,7 @@ class AgentPoolManagerNode(Node):
|
|
409
409
|
},
|
410
410
|
}
|
411
411
|
|
412
|
-
def _list_agents(self) ->
|
412
|
+
def _list_agents(self) -> dict[str, Any]:
|
413
413
|
"""List all agents in the pool."""
|
414
414
|
agents = []
|
415
415
|
for agent_id, agent_data in self.agent_registry.items():
|
@@ -499,7 +499,7 @@ class ProblemAnalyzerNode(Node):
|
|
499
499
|
"domain": ["domain_expertise", "validation", "interpretation"],
|
500
500
|
}
|
501
501
|
|
502
|
-
def get_parameters(self) ->
|
502
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
503
503
|
return {
|
504
504
|
"problem_description": NodeParameter(
|
505
505
|
name="problem_description",
|
@@ -530,7 +530,7 @@ class ProblemAnalyzerNode(Node):
|
|
530
530
|
),
|
531
531
|
}
|
532
532
|
|
533
|
-
def run(self, **kwargs) ->
|
533
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
534
534
|
"""Analyze the problem to determine requirements."""
|
535
535
|
problem_description = kwargs["problem_description"]
|
536
536
|
context = kwargs.get("context", {})
|
@@ -627,8 +627,8 @@ class ProblemAnalyzerNode(Node):
|
|
627
627
|
}
|
628
628
|
|
629
629
|
def _hierarchical_decomposition(
|
630
|
-
self, problem: str, capabilities:
|
631
|
-
) ->
|
630
|
+
self, problem: str, capabilities: set[str]
|
631
|
+
) -> list[dict]:
|
632
632
|
"""Decompose problem hierarchically."""
|
633
633
|
# Simple heuristic decomposition
|
634
634
|
phases = []
|
@@ -708,7 +708,7 @@ class ProblemAnalyzerNode(Node):
|
|
708
708
|
|
709
709
|
return phases
|
710
710
|
|
711
|
-
def _simple_decomposition(self, problem: str, capabilities:
|
711
|
+
def _simple_decomposition(self, problem: str, capabilities: set[str]) -> list[dict]:
|
712
712
|
"""Simple task decomposition."""
|
713
713
|
tasks = []
|
714
714
|
for i, cap in enumerate(capabilities):
|
@@ -731,8 +731,8 @@ class ProblemAnalyzerNode(Node):
|
|
731
731
|
return int(base_time * complexity_factor * parallelization_factor)
|
732
732
|
|
733
733
|
def _prioritize_capabilities(
|
734
|
-
self, capabilities:
|
735
|
-
) ->
|
734
|
+
self, capabilities: set[str], problem: str
|
735
|
+
) -> list[str]:
|
736
736
|
"""Prioritize capabilities based on problem."""
|
737
737
|
# Simple prioritization based on problem keywords
|
738
738
|
priority_map = {
|
@@ -789,7 +789,7 @@ class TeamFormationNode(Node):
|
|
789
789
|
self.formation_history = deque(maxlen=50)
|
790
790
|
self.team_performance_cache = {}
|
791
791
|
|
792
|
-
def get_parameters(self) ->
|
792
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
793
793
|
return {
|
794
794
|
"problem_analysis": NodeParameter(
|
795
795
|
name="problem_analysis",
|
@@ -835,7 +835,7 @@ class TeamFormationNode(Node):
|
|
835
835
|
),
|
836
836
|
}
|
837
837
|
|
838
|
-
def run(self, **kwargs) ->
|
838
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
839
839
|
"""Form an optimal team."""
|
840
840
|
problem_analysis = kwargs.get("problem_analysis", {})
|
841
841
|
available_agents = kwargs.get("available_agents", [])
|
@@ -901,8 +901,8 @@ class TeamFormationNode(Node):
|
|
901
901
|
}
|
902
902
|
|
903
903
|
def _capability_matching_formation(
|
904
|
-
self, problem:
|
905
|
-
) ->
|
904
|
+
self, problem: dict, agents: list[dict], constraints: dict
|
905
|
+
) -> list[dict]:
|
906
906
|
"""Form team by matching capabilities to requirements."""
|
907
907
|
required_capabilities = set(problem.get("required_capabilities", []))
|
908
908
|
selected_agents = []
|
@@ -947,8 +947,8 @@ class TeamFormationNode(Node):
|
|
947
947
|
return selected_agents
|
948
948
|
|
949
949
|
def _swarm_based_formation(
|
950
|
-
self, problem:
|
951
|
-
) ->
|
950
|
+
self, problem: dict, agents: list[dict], constraints: dict
|
951
|
+
) -> list[dict]:
|
952
952
|
"""Form team using swarm intelligence principles."""
|
953
953
|
required_capabilities = set(problem.get("required_capabilities", []))
|
954
954
|
problem.get("complexity_score", 0.5)
|
@@ -1024,8 +1024,8 @@ class TeamFormationNode(Node):
|
|
1024
1024
|
return best_cluster
|
1025
1025
|
|
1026
1026
|
def _market_based_formation(
|
1027
|
-
self, problem:
|
1028
|
-
) ->
|
1027
|
+
self, problem: dict, agents: list[dict], constraints: dict
|
1028
|
+
) -> list[dict]:
|
1029
1029
|
"""Form team using market-based auction mechanism."""
|
1030
1030
|
required_capabilities = problem.get("required_capabilities", [])
|
1031
1031
|
budget = constraints.get("budget", 100)
|
@@ -1069,8 +1069,8 @@ class TeamFormationNode(Node):
|
|
1069
1069
|
return selected_agents
|
1070
1070
|
|
1071
1071
|
def _hierarchical_formation(
|
1072
|
-
self, problem:
|
1073
|
-
) ->
|
1072
|
+
self, problem: dict, agents: list[dict], constraints: dict
|
1073
|
+
) -> list[dict]:
|
1074
1074
|
"""Form team with hierarchical structure."""
|
1075
1075
|
required_capabilities = problem.get("required_capabilities", [])
|
1076
1076
|
|
@@ -1108,8 +1108,8 @@ class TeamFormationNode(Node):
|
|
1108
1108
|
return team
|
1109
1109
|
|
1110
1110
|
def _random_formation(
|
1111
|
-
self, problem:
|
1112
|
-
) ->
|
1111
|
+
self, problem: dict, agents: list[dict], constraints: dict
|
1112
|
+
) -> list[dict]:
|
1113
1113
|
"""Random team formation for baseline comparison."""
|
1114
1114
|
team_size = min(
|
1115
1115
|
problem.get("estimated_agents", 5),
|
@@ -1120,8 +1120,8 @@ class TeamFormationNode(Node):
|
|
1120
1120
|
return random.sample(agents, team_size)
|
1121
1121
|
|
1122
1122
|
def _optimize_team(
|
1123
|
-
self, team:
|
1124
|
-
) ->
|
1123
|
+
self, team: list[dict], problem: dict, all_agents: list[dict]
|
1124
|
+
) -> dict[str, Any]:
|
1125
1125
|
"""Optimize team composition."""
|
1126
1126
|
current_fitness = self._calculate_team_fitness(team, problem)
|
1127
1127
|
|
@@ -1154,7 +1154,7 @@ class TeamFormationNode(Node):
|
|
1154
1154
|
"fitness_improvement": best_fitness - current_fitness,
|
1155
1155
|
}
|
1156
1156
|
|
1157
|
-
def _calculate_team_fitness(self, team:
|
1157
|
+
def _calculate_team_fitness(self, team: list[dict], problem: dict) -> float:
|
1158
1158
|
"""Calculate how well a team matches problem requirements."""
|
1159
1159
|
required_capabilities = set(problem.get("required_capabilities", []))
|
1160
1160
|
|
@@ -1187,8 +1187,8 @@ class TeamFormationNode(Node):
|
|
1187
1187
|
return max(0, min(1, fitness))
|
1188
1188
|
|
1189
1189
|
def _calculate_team_metrics(
|
1190
|
-
self, team:
|
1191
|
-
) ->
|
1190
|
+
self, team: list[dict], problem: dict
|
1191
|
+
) -> dict[str, Any]:
|
1192
1192
|
"""Calculate comprehensive team metrics."""
|
1193
1193
|
required_capabilities = set(problem.get("required_capabilities", []))
|
1194
1194
|
team_capabilities = set()
|
@@ -1231,7 +1231,7 @@ class SelfOrganizingAgentNode(A2AAgentNode):
|
|
1231
1231
|
self.collaboration_history = deque(maxlen=50)
|
1232
1232
|
self.skill_adaptations = defaultdict(float)
|
1233
1233
|
|
1234
|
-
def get_parameters(self) ->
|
1234
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
1235
1235
|
params = super().get_parameters()
|
1236
1236
|
|
1237
1237
|
# Add self-organization specific parameters
|
@@ -1283,7 +1283,7 @@ class SelfOrganizingAgentNode(A2AAgentNode):
|
|
1283
1283
|
|
1284
1284
|
return params
|
1285
1285
|
|
1286
|
-
def run(self, **kwargs) ->
|
1286
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
1287
1287
|
"""Execute self-organizing agent behavior."""
|
1288
1288
|
agent_id = kwargs.get("agent_id")
|
1289
1289
|
capabilities = kwargs.get("capabilities", [])
|
@@ -1339,7 +1339,7 @@ Guidelines:
|
|
1339
1339
|
|
1340
1340
|
return result
|
1341
1341
|
|
1342
|
-
def _adapt_to_team(self, agent_id: str, team_context:
|
1342
|
+
def _adapt_to_team(self, agent_id: str, team_context: dict, mode: str):
|
1343
1343
|
"""Adapt behavior to team dynamics."""
|
1344
1344
|
team_id = team_context.get("team_id")
|
1345
1345
|
if not team_id:
|
@@ -1368,7 +1368,7 @@ Guidelines:
|
|
1368
1368
|
self.skill_adaptations[cap] *= 1.1 # Enhance
|
1369
1369
|
|
1370
1370
|
def _enhance_task_with_context(
|
1371
|
-
self, task: str, team_context:
|
1371
|
+
self, task: str, team_context: dict, capabilities: list[str]
|
1372
1372
|
) -> str:
|
1373
1373
|
"""Enhance task description with team context."""
|
1374
1374
|
enhanced = task
|
@@ -1389,7 +1389,7 @@ Guidelines:
|
|
1389
1389
|
return enhanced
|
1390
1390
|
|
1391
1391
|
def _track_collaboration(
|
1392
|
-
self, agent_id: str, team_context:
|
1392
|
+
self, agent_id: str, team_context: dict, task: str, result: dict
|
1393
1393
|
):
|
1394
1394
|
"""Track collaboration history and performance."""
|
1395
1395
|
team_id = team_context.get("team_id", "unknown")
|
@@ -1442,7 +1442,7 @@ class SolutionEvaluatorNode(Node):
|
|
1442
1442
|
super().__init__()
|
1443
1443
|
self.evaluation_history = deque(maxlen=100)
|
1444
1444
|
|
1445
|
-
def get_parameters(self) ->
|
1445
|
+
def get_parameters(self) -> dict[str, NodeParameter]:
|
1446
1446
|
return {
|
1447
1447
|
"solution": NodeParameter(
|
1448
1448
|
name="solution",
|
@@ -1481,7 +1481,7 @@ class SolutionEvaluatorNode(Node):
|
|
1481
1481
|
),
|
1482
1482
|
}
|
1483
1483
|
|
1484
|
-
def run(self, **kwargs) ->
|
1484
|
+
def run(self, **kwargs) -> dict[str, Any]:
|
1485
1485
|
"""Evaluate solution quality."""
|
1486
1486
|
solution = kwargs.get("solution", {})
|
1487
1487
|
requirements = kwargs.get("problem_requirements", {})
|
@@ -1569,8 +1569,8 @@ class SolutionEvaluatorNode(Node):
|
|
1569
1569
|
}
|
1570
1570
|
|
1571
1571
|
def _generate_feedback(
|
1572
|
-
self, scores:
|
1573
|
-
) ->
|
1572
|
+
self, scores: dict[str, float], requirements: dict, overall: float
|
1573
|
+
) -> dict[str, Any]:
|
1574
1574
|
"""Generate specific feedback for improvement."""
|
1575
1575
|
feedback = {"strengths": [], "weaknesses": [], "suggestions": []}
|
1576
1576
|
|
@@ -1596,8 +1596,8 @@ class SolutionEvaluatorNode(Node):
|
|
1596
1596
|
return feedback
|
1597
1597
|
|
1598
1598
|
def _recommend_actions(
|
1599
|
-
self, scores:
|
1600
|
-
) ->
|
1599
|
+
self, scores: dict[str, float], feedback: dict, iteration: int
|
1600
|
+
) -> list[str]:
|
1601
1601
|
"""Recommend specific actions for improvement."""
|
1602
1602
|
actions = []
|
1603
1603
|
|