loom-agent 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- loom/__init__.py +1 -0
- loom/adapters/converters.py +77 -0
- loom/adapters/registry.py +43 -0
- loom/api/factory.py +77 -0
- loom/api/main.py +201 -0
- loom/builtin/__init__.py +3 -0
- loom/builtin/memory/__init__.py +3 -0
- loom/builtin/memory/metabolic.py +96 -0
- loom/builtin/memory/pso.py +41 -0
- loom/builtin/memory/sanitizers.py +39 -0
- loom/builtin/memory/validators.py +55 -0
- loom/config/tool.py +63 -0
- loom/infra/__init__.py +0 -0
- loom/infra/llm.py +44 -0
- loom/infra/logging.py +42 -0
- loom/infra/store.py +39 -0
- loom/infra/transport/memory.py +112 -0
- loom/infra/transport/nats.py +170 -0
- loom/infra/transport/redis.py +161 -0
- loom/interfaces/llm.py +45 -0
- loom/interfaces/memory.py +50 -0
- loom/interfaces/store.py +29 -0
- loom/interfaces/transport.py +35 -0
- loom/kernel/__init__.py +0 -0
- loom/kernel/base_interceptor.py +97 -0
- loom/kernel/bus.py +85 -0
- loom/kernel/dispatcher.py +58 -0
- loom/kernel/interceptors/__init__.py +14 -0
- loom/kernel/interceptors/adaptive.py +567 -0
- loom/kernel/interceptors/budget.py +60 -0
- loom/kernel/interceptors/depth.py +45 -0
- loom/kernel/interceptors/hitl.py +51 -0
- loom/kernel/interceptors/studio.py +129 -0
- loom/kernel/interceptors/timeout.py +27 -0
- loom/kernel/state.py +71 -0
- loom/memory/hierarchical.py +124 -0
- loom/node/__init__.py +0 -0
- loom/node/agent.py +252 -0
- loom/node/base.py +121 -0
- loom/node/crew.py +105 -0
- loom/node/router.py +77 -0
- loom/node/tool.py +50 -0
- loom/protocol/__init__.py +0 -0
- loom/protocol/cloudevents.py +73 -0
- loom/protocol/interfaces.py +164 -0
- loom/protocol/mcp.py +97 -0
- loom/protocol/memory_operations.py +51 -0
- loom/protocol/patch.py +93 -0
- loom_agent-0.3.3.dist-info/LICENSE +204 -0
- loom_agent-0.3.3.dist-info/METADATA +139 -0
- loom_agent-0.3.3.dist-info/RECORD +52 -0
- loom_agent-0.3.3.dist-info/WHEEL +4 -0
loom/node/base.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base Node Abstraction (Fractal System)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
from uuid import uuid4
|
|
9
|
+
|
|
10
|
+
from loom.protocol.cloudevents import CloudEvent
|
|
11
|
+
from loom.kernel.dispatcher import Dispatcher
|
|
12
|
+
|
|
13
|
+
class Node(ABC):
|
|
14
|
+
"""
|
|
15
|
+
Abstract Base Class for all Fractal Nodes (Agent, Tool, Crew).
|
|
16
|
+
Implements standard event subscription and request handling.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, node_id: str, dispatcher: Dispatcher):
|
|
20
|
+
self.node_id = node_id
|
|
21
|
+
self.dispatcher = dispatcher
|
|
22
|
+
self.source_uri = f"/node/{node_id}" # Standard URI
|
|
23
|
+
|
|
24
|
+
# Auto-subscribe to my requests
|
|
25
|
+
asyncio.create_task(self._subscribe_to_events())
|
|
26
|
+
|
|
27
|
+
async def _subscribe_to_events(self):
|
|
28
|
+
"""Subscribe to 'node.request' targeting this node."""
|
|
29
|
+
topic = f"node.request/{self.source_uri.strip('/')}"
|
|
30
|
+
await self.dispatcher.bus.subscribe(topic, self._handle_request)
|
|
31
|
+
|
|
32
|
+
async def _handle_request(self, event: CloudEvent):
|
|
33
|
+
"""
|
|
34
|
+
Standard request handler.
|
|
35
|
+
1. Calls node-specific process()
|
|
36
|
+
2. Dispatches response/result/error
|
|
37
|
+
"""
|
|
38
|
+
try:
|
|
39
|
+
# 1. Process
|
|
40
|
+
result = await self.process(event)
|
|
41
|
+
|
|
42
|
+
# 2. Respond
|
|
43
|
+
response_event = CloudEvent.create(
|
|
44
|
+
source=self.source_uri,
|
|
45
|
+
type="node.response",
|
|
46
|
+
data={
|
|
47
|
+
"request_id": event.id,
|
|
48
|
+
"result": result
|
|
49
|
+
},
|
|
50
|
+
traceparent=event.traceparent
|
|
51
|
+
)
|
|
52
|
+
# Response topic usually goes to whoever asked, or open bus
|
|
53
|
+
# In request-reply pattern, typically we might just publish it
|
|
54
|
+
# and the caller subscribes to node.response/originator
|
|
55
|
+
|
|
56
|
+
# For now, just generic publish
|
|
57
|
+
await self.dispatcher.dispatch(response_event)
|
|
58
|
+
|
|
59
|
+
except Exception as e:
|
|
60
|
+
error_event = CloudEvent.create(
|
|
61
|
+
source=self.source_uri,
|
|
62
|
+
type="node.error",
|
|
63
|
+
data={
|
|
64
|
+
"request_id": event.id,
|
|
65
|
+
"error": str(e)
|
|
66
|
+
},
|
|
67
|
+
traceparent=event.traceparent
|
|
68
|
+
)
|
|
69
|
+
await self.dispatcher.dispatch(error_event)
|
|
70
|
+
|
|
71
|
+
async def call(self, target_node: str, data: Dict[str, Any]) -> Any:
|
|
72
|
+
"""
|
|
73
|
+
Call another node and wait for response.
|
|
74
|
+
|
|
75
|
+
FIXED: Now properly cleans up subscription using unsubscribe()
|
|
76
|
+
to prevent memory leaks from accumulated handlers.
|
|
77
|
+
"""
|
|
78
|
+
request_id = str(uuid4())
|
|
79
|
+
request_event = CloudEvent.create(
|
|
80
|
+
source=self.source_uri,
|
|
81
|
+
type="node.request",
|
|
82
|
+
data=data,
|
|
83
|
+
subject=target_node,
|
|
84
|
+
)
|
|
85
|
+
request_event.id = request_id
|
|
86
|
+
|
|
87
|
+
# Subscribe to response
|
|
88
|
+
# Using Broadcast Reply pattern: listen to target's responses
|
|
89
|
+
response_future = asyncio.Future()
|
|
90
|
+
|
|
91
|
+
async def handle_response(event: CloudEvent):
|
|
92
|
+
if event.data and event.data.get("request_id") == request_id:
|
|
93
|
+
if not response_future.done():
|
|
94
|
+
if event.type == "node.error":
|
|
95
|
+
response_future.set_exception(Exception(event.data.get("error", "Unknown Error")))
|
|
96
|
+
else:
|
|
97
|
+
response_future.set_result(event.data.get("result"))
|
|
98
|
+
|
|
99
|
+
# Topic: node.response/{target_node}
|
|
100
|
+
# Note: clean URI
|
|
101
|
+
target_topic = f"node.response/{target_node.strip('/')}"
|
|
102
|
+
|
|
103
|
+
# We need access to bus directly or via dispatcher
|
|
104
|
+
# Dispatcher has .bus
|
|
105
|
+
await self.dispatcher.bus.subscribe(target_topic, handle_response)
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
# Dispatch request
|
|
109
|
+
await self.dispatcher.dispatch(request_event)
|
|
110
|
+
|
|
111
|
+
# Wait for response
|
|
112
|
+
return await asyncio.wait_for(response_future, timeout=30.0)
|
|
113
|
+
finally:
|
|
114
|
+
# FIXED: Cleanup subscription to prevent memory leaks
|
|
115
|
+
await self.dispatcher.bus.unsubscribe(target_topic, handle_response)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@abstractmethod
|
|
119
|
+
async def process(self, event: CloudEvent) -> Any:
|
|
120
|
+
"""Core logic."""
|
|
121
|
+
pass
|
loom/node/crew.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Crew Node (Orchestrator)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, List, Literal
|
|
6
|
+
|
|
7
|
+
from loom.protocol.cloudevents import CloudEvent
|
|
8
|
+
from loom.protocol.interfaces import NodeProtocol
|
|
9
|
+
from loom.node.base import Node
|
|
10
|
+
from loom.kernel.dispatcher import Dispatcher
|
|
11
|
+
from loom.protocol.memory_operations import ContextSanitizer
|
|
12
|
+
from loom.builtin.memory.sanitizers import BubbleUpSanitizer
|
|
13
|
+
|
|
14
|
+
class CrewNode(Node):
|
|
15
|
+
"""
|
|
16
|
+
A Node that orchestrates other Nodes (recursive composition).
|
|
17
|
+
|
|
18
|
+
FIXED: Now accepts List[NodeProtocol] instead of List[AgentNode].
|
|
19
|
+
This enables TRUE fractal recursion:
|
|
20
|
+
- CrewNode can contain AgentNode
|
|
21
|
+
- CrewNode can contain other CrewNode (nested crews)
|
|
22
|
+
- CrewNode can contain RouterNode
|
|
23
|
+
- Any Node type that implements NodeProtocol
|
|
24
|
+
|
|
25
|
+
This adheres to "Protocol-First" and "Fractal Uniformity" principles.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
node_id: str,
|
|
31
|
+
dispatcher: Dispatcher,
|
|
32
|
+
agents: List[NodeProtocol],
|
|
33
|
+
pattern: Literal["sequential", "parallel"] = "sequential",
|
|
34
|
+
sanitizer: ContextSanitizer = None
|
|
35
|
+
):
|
|
36
|
+
super().__init__(node_id, dispatcher)
|
|
37
|
+
self.agents = agents
|
|
38
|
+
self.pattern = pattern
|
|
39
|
+
self.sanitizer = sanitizer or BubbleUpSanitizer()
|
|
40
|
+
|
|
41
|
+
async def process(self, event: CloudEvent) -> Any:
|
|
42
|
+
"""
|
|
43
|
+
Execute the crew pattern.
|
|
44
|
+
"""
|
|
45
|
+
task = event.data.get("task", "")
|
|
46
|
+
|
|
47
|
+
if self.pattern == "sequential":
|
|
48
|
+
return await self._execute_sequential(task, event.traceparent)
|
|
49
|
+
|
|
50
|
+
return {"error": "Unsupported pattern"}
|
|
51
|
+
|
|
52
|
+
async def _execute_sequential(self, task: str, traceparent: str = None) -> Any:
|
|
53
|
+
"""
|
|
54
|
+
Chain agents sequentially. A -> B -> C
|
|
55
|
+
|
|
56
|
+
FIXED: Now uses self.call() to ensure all calls go through event bus.
|
|
57
|
+
This ensures:
|
|
58
|
+
- Interceptor hooks are triggered
|
|
59
|
+
- Studio can capture events
|
|
60
|
+
- Distributed deployment is supported
|
|
61
|
+
- Fractal uniformity is maintained
|
|
62
|
+
"""
|
|
63
|
+
current_input = task
|
|
64
|
+
chain_results = []
|
|
65
|
+
|
|
66
|
+
for agent in self.agents:
|
|
67
|
+
# Use self.call() to invoke through event bus
|
|
68
|
+
# This ensures proper event flow: request -> dispatch -> interceptors -> agent -> response
|
|
69
|
+
try:
|
|
70
|
+
result = await self.call(
|
|
71
|
+
target_node=agent.source_uri,
|
|
72
|
+
data={"task": current_input}
|
|
73
|
+
)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
# Error already propagated through event bus
|
|
76
|
+
return {
|
|
77
|
+
"error": f"Agent {agent.node_id} failed: {str(e)}",
|
|
78
|
+
"trace": chain_results
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# Extract response
|
|
82
|
+
# self.call() returns the result data from node.response event
|
|
83
|
+
if isinstance(result, dict):
|
|
84
|
+
response = result.get("response", str(result))
|
|
85
|
+
else:
|
|
86
|
+
response = str(result)
|
|
87
|
+
|
|
88
|
+
# Sanitization (Fractal Metabolism)
|
|
89
|
+
# Limit the bubble-up context to prevent context pollution in long chains
|
|
90
|
+
sanitized_response = await self.sanitizer.sanitize(str(response), target_token_limit=100)
|
|
91
|
+
|
|
92
|
+
chain_results.append({
|
|
93
|
+
"agent": agent.node_id,
|
|
94
|
+
"output": response, # Full output in trace
|
|
95
|
+
"sanitized": sanitized_response
|
|
96
|
+
})
|
|
97
|
+
|
|
98
|
+
# Pass to next agent
|
|
99
|
+
# Design choice: pass full output (agent's memory will metabolize if needed)
|
|
100
|
+
current_input = response
|
|
101
|
+
|
|
102
|
+
return {
|
|
103
|
+
"final_output": current_input,
|
|
104
|
+
"trace": chain_results
|
|
105
|
+
}
|
loom/node/router.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Router Node (Attention Mechanism)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, List, Dict
|
|
6
|
+
from loom.protocol.cloudevents import CloudEvent
|
|
7
|
+
from loom.protocol.interfaces import NodeProtocol
|
|
8
|
+
from loom.node.base import Node
|
|
9
|
+
from loom.kernel.dispatcher import Dispatcher
|
|
10
|
+
from loom.interfaces.llm import LLMProvider
|
|
11
|
+
|
|
12
|
+
class AttentionRouter(Node):
|
|
13
|
+
"""
|
|
14
|
+
Intelligent Router that routes tasks to the best suited Node based on description.
|
|
15
|
+
|
|
16
|
+
FIXED: Now accepts List[NodeProtocol] instead of List[AgentNode].
|
|
17
|
+
This enables fractal routing:
|
|
18
|
+
- Can route to AgentNode
|
|
19
|
+
- Can route to CrewNode (sub-teams)
|
|
20
|
+
- Can route to other RouterNode (nested routing)
|
|
21
|
+
- Any Node type that implements NodeProtocol
|
|
22
|
+
|
|
23
|
+
This adheres to "Protocol-First" and "Fractal Uniformity" principles.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
node_id: str,
|
|
29
|
+
dispatcher: Dispatcher,
|
|
30
|
+
agents: List[NodeProtocol],
|
|
31
|
+
provider: LLMProvider
|
|
32
|
+
):
|
|
33
|
+
super().__init__(node_id, dispatcher)
|
|
34
|
+
self.agents = {agent.node_id: agent for agent in agents}
|
|
35
|
+
self.provider = provider
|
|
36
|
+
# Agent descriptions map
|
|
37
|
+
self.registry = {agent.node_id: agent.role for agent in agents}
|
|
38
|
+
|
|
39
|
+
async def process(self, event: CloudEvent) -> Any:
|
|
40
|
+
task = event.data.get("task", "")
|
|
41
|
+
if not task:
|
|
42
|
+
return {"error": "No task provided"}
|
|
43
|
+
|
|
44
|
+
# 1. Construct Prompt
|
|
45
|
+
options = "\n".join([f"- {aid}: {role}" for aid, role in self.registry.items()])
|
|
46
|
+
prompt = f"""
|
|
47
|
+
You are a routing system. Given the task, select the best agent ID to handle it.
|
|
48
|
+
Return ONLY the agent ID.
|
|
49
|
+
|
|
50
|
+
Agents:
|
|
51
|
+
{options}
|
|
52
|
+
|
|
53
|
+
Task: {task}
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
# 2. LLM Select
|
|
57
|
+
# Simple chat call
|
|
58
|
+
response = await self.provider.chat([{"role": "user", "content": prompt}])
|
|
59
|
+
selected_id = response.content.strip()
|
|
60
|
+
|
|
61
|
+
# Clean up potential extra chars/whitespace
|
|
62
|
+
# Iterate keys to find match if fuzzy
|
|
63
|
+
target_agent = None
|
|
64
|
+
for aid in self.agents:
|
|
65
|
+
if aid in selected_id:
|
|
66
|
+
target_agent = self.agents[aid]
|
|
67
|
+
break
|
|
68
|
+
|
|
69
|
+
if not target_agent:
|
|
70
|
+
return {"error": f"Could not route task. Selected: {selected_id}"}
|
|
71
|
+
|
|
72
|
+
# 3. Dispatch to Target
|
|
73
|
+
# Request-Reply: We wait for the agent and return its result.
|
|
74
|
+
# Use our Node.call mechanism!
|
|
75
|
+
|
|
76
|
+
result = await self.call(target_agent.source_uri, {"task": task})
|
|
77
|
+
return {"result": result, "routed_to": target_agent.node_id}
|
loom/node/tool.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool Node (Fractal System)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Callable, Dict
|
|
6
|
+
|
|
7
|
+
from loom.protocol.cloudevents import CloudEvent
|
|
8
|
+
from loom.protocol.mcp import MCPToolDefinition
|
|
9
|
+
from loom.node.base import Node
|
|
10
|
+
from loom.kernel.dispatcher import Dispatcher
|
|
11
|
+
|
|
12
|
+
class ToolNode(Node):
|
|
13
|
+
"""
|
|
14
|
+
A Node that acts as an MCP Server for a single tool.
|
|
15
|
+
Reference Implementation.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
node_id: str,
|
|
21
|
+
dispatcher: Dispatcher,
|
|
22
|
+
tool_def: MCPToolDefinition,
|
|
23
|
+
func: Callable[[Dict[str, Any]], Any]
|
|
24
|
+
):
|
|
25
|
+
super().__init__(node_id, dispatcher)
|
|
26
|
+
self.tool_def = tool_def
|
|
27
|
+
self.func = func
|
|
28
|
+
|
|
29
|
+
async def process(self, event: CloudEvent) -> Any:
|
|
30
|
+
"""
|
|
31
|
+
Execute the tool.
|
|
32
|
+
Expects event.data to contain 'arguments'.
|
|
33
|
+
"""
|
|
34
|
+
args = event.data.get("arguments", {})
|
|
35
|
+
|
|
36
|
+
# In a real system, validate against self.tool_def.input_schema
|
|
37
|
+
|
|
38
|
+
# Execute
|
|
39
|
+
try:
|
|
40
|
+
# Check if func is async
|
|
41
|
+
import inspect
|
|
42
|
+
if inspect.iscoroutinefunction(self.func):
|
|
43
|
+
result = await self.func(args)
|
|
44
|
+
else:
|
|
45
|
+
result = self.func(args)
|
|
46
|
+
|
|
47
|
+
return {"result": result}
|
|
48
|
+
except Exception as e:
|
|
49
|
+
# Re-raise to trigger node.error in Base Node
|
|
50
|
+
raise RuntimeError(f"Tool execution failed: {e}")
|
|
File without changes
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CloudEvents v1.0 Implementation for Loom
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
from uuid import uuid4
|
|
10
|
+
|
|
11
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
12
|
+
|
|
13
|
+
class CloudEvent(BaseModel):
|
|
14
|
+
"""
|
|
15
|
+
CloudEvents 1.0 Specification Implementation.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
specversion: The version of the CloudEvents specification which the event uses.
|
|
19
|
+
id: Identifies the event.
|
|
20
|
+
source: Identifies the context in which an event happened.
|
|
21
|
+
type: Describes the type of event related to the originating occurrence.
|
|
22
|
+
datacontenttype: Content type of data value.
|
|
23
|
+
dataschema: Identifies the schema that data adheres to.
|
|
24
|
+
subject: Describes the subject of the event in the context of the event producer (identified by source).
|
|
25
|
+
time: Timestamp of when the occurrence happened.
|
|
26
|
+
data: The event payload.
|
|
27
|
+
traceparent: W3C Trace Context (Extension)
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
# Required Attributes
|
|
31
|
+
specversion: str = "1.0"
|
|
32
|
+
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
33
|
+
source: str
|
|
34
|
+
type: str # e.g., "node.call", "agent.thought"
|
|
35
|
+
|
|
36
|
+
# Optional Attributes
|
|
37
|
+
datacontenttype: Optional[str] = "application/json"
|
|
38
|
+
dataschema: Optional[str] = None
|
|
39
|
+
subject: Optional[str] = None
|
|
40
|
+
time: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
41
|
+
data: Optional[Any] = None
|
|
42
|
+
|
|
43
|
+
# Extensions
|
|
44
|
+
traceparent: Optional[str] = None
|
|
45
|
+
extensions: Dict[str, Any] = Field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
model_config = ConfigDict(
|
|
48
|
+
populate_by_name=True,
|
|
49
|
+
json_encoders={datetime: lambda v: v.isoformat()},
|
|
50
|
+
extra='allow'
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Convert to standard CloudEvents dictionary structure."""
|
|
55
|
+
return self.model_dump(exclude_none=True, by_alias=True)
|
|
56
|
+
|
|
57
|
+
@classmethod
|
|
58
|
+
def create(
|
|
59
|
+
cls,
|
|
60
|
+
source: str,
|
|
61
|
+
type: str,
|
|
62
|
+
data: Optional[Any] = None,
|
|
63
|
+
subject: Optional[str] = None,
|
|
64
|
+
traceparent: Optional[str] = None
|
|
65
|
+
) -> "CloudEvent":
|
|
66
|
+
"""Factory method to create a CloudEvent."""
|
|
67
|
+
return cls(
|
|
68
|
+
source=source,
|
|
69
|
+
type=type,
|
|
70
|
+
data=data,
|
|
71
|
+
subject=subject,
|
|
72
|
+
traceparent=traceparent
|
|
73
|
+
)
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core Protocols for Loom Framework.
|
|
3
|
+
Adhering to the "Protocol-First" design principle using typing.Protocol.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Any, Dict, List, Optional, Protocol, runtime_checkable, AsyncIterator, Union
|
|
7
|
+
|
|
8
|
+
from loom.protocol.cloudevents import CloudEvent
|
|
9
|
+
|
|
10
|
+
# ----------------------------------------------------------------------
|
|
11
|
+
# Node Protocol
|
|
12
|
+
# ----------------------------------------------------------------------
|
|
13
|
+
|
|
14
|
+
@runtime_checkable
|
|
15
|
+
class NodeProtocol(Protocol):
|
|
16
|
+
"""
|
|
17
|
+
Protocol for any Node in the Loom Fractal System.
|
|
18
|
+
"""
|
|
19
|
+
node_id: str
|
|
20
|
+
source_uri: str
|
|
21
|
+
|
|
22
|
+
async def process(self, event: CloudEvent) -> Any:
|
|
23
|
+
"""
|
|
24
|
+
Process an incoming event and return a result.
|
|
25
|
+
"""
|
|
26
|
+
...
|
|
27
|
+
|
|
28
|
+
async def call(self, target_node: str, data: Dict[str, Any]) -> Any:
|
|
29
|
+
"""
|
|
30
|
+
Send a request to another node and await the response.
|
|
31
|
+
"""
|
|
32
|
+
...
|
|
33
|
+
|
|
34
|
+
# ----------------------------------------------------------------------
|
|
35
|
+
# Memory Protocol
|
|
36
|
+
# ----------------------------------------------------------------------
|
|
37
|
+
|
|
38
|
+
@runtime_checkable
|
|
39
|
+
class MemoryStrategy(Protocol):
|
|
40
|
+
"""
|
|
41
|
+
Protocol for Memory interactions.
|
|
42
|
+
"""
|
|
43
|
+
async def add(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
|
|
44
|
+
"""Add a memory entry."""
|
|
45
|
+
...
|
|
46
|
+
|
|
47
|
+
async def get_context(self, task: str = "") -> str:
|
|
48
|
+
"""Get full context formatted for the LLM."""
|
|
49
|
+
...
|
|
50
|
+
|
|
51
|
+
async def get_recent(self, limit: int = 10) -> List[Dict[str, Any]]:
|
|
52
|
+
"""Get recent memory entries."""
|
|
53
|
+
...
|
|
54
|
+
|
|
55
|
+
async def clear(self) -> None:
|
|
56
|
+
"""Clear memory."""
|
|
57
|
+
...
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@runtime_checkable
|
|
61
|
+
class ReflectiveMemoryStrategy(MemoryStrategy, Protocol):
|
|
62
|
+
"""
|
|
63
|
+
Extended Protocol for Memory with Reflection capabilities.
|
|
64
|
+
|
|
65
|
+
ADDED: Reflection methods for metabolic memory management.
|
|
66
|
+
Implementations that support reflection should implement this protocol.
|
|
67
|
+
|
|
68
|
+
This follows Protocol-First design - not all memories need reflection,
|
|
69
|
+
but those that do should implement these methods consistently.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def should_reflect(self, threshold: int = 20) -> bool:
|
|
73
|
+
"""
|
|
74
|
+
Check if memory should be reflected/consolidated.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
threshold: Number of entries that trigger reflection
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
True if reflection should be performed
|
|
81
|
+
"""
|
|
82
|
+
...
|
|
83
|
+
|
|
84
|
+
def get_reflection_candidates(self, count: int = 10) -> List[Any]:
|
|
85
|
+
"""
|
|
86
|
+
Get memory entries to be reflected/summarized.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
count: Number of entries to retrieve for reflection
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List of memory entries (implementation-specific format)
|
|
93
|
+
"""
|
|
94
|
+
...
|
|
95
|
+
|
|
96
|
+
async def consolidate(self, summary: str, remove_count: int = 10) -> None:
|
|
97
|
+
"""
|
|
98
|
+
Consolidate memories by replacing old entries with summary.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
summary: The summarized/reflected knowledge
|
|
102
|
+
remove_count: Number of old entries to remove
|
|
103
|
+
|
|
104
|
+
This is the "metabolic" operation - converting detailed memories
|
|
105
|
+
into compact knowledge representations.
|
|
106
|
+
"""
|
|
107
|
+
...
|
|
108
|
+
|
|
109
|
+
# ----------------------------------------------------------------------
|
|
110
|
+
# LLM Protocol
|
|
111
|
+
# ----------------------------------------------------------------------
|
|
112
|
+
|
|
113
|
+
# We need the LLMResponse type, but we can't easily import it if it's in the interface file
|
|
114
|
+
# without creating circular deps if that interface file imports this protocol file.
|
|
115
|
+
# For now, we will use Any or assume the structure matches.
|
|
116
|
+
# Ideally, data models should be in `loom.protocol.types` or similar,
|
|
117
|
+
# but we'll stick to `Any` or Dict for the strict Protocol definition to avoid tight coupling,
|
|
118
|
+
# OR we rely on structural subtyping.
|
|
119
|
+
# But let's try to be precise if possible.
|
|
120
|
+
|
|
121
|
+
@runtime_checkable
|
|
122
|
+
class LLMProviderProtocol(Protocol):
|
|
123
|
+
"""
|
|
124
|
+
Protocol for LLM Providers.
|
|
125
|
+
"""
|
|
126
|
+
async def chat(
|
|
127
|
+
self,
|
|
128
|
+
messages: List[Dict[str, Any]],
|
|
129
|
+
tools: Optional[List[Dict[str, Any]]] = None
|
|
130
|
+
) -> Any: # Returns LLMResponse compatible object
|
|
131
|
+
...
|
|
132
|
+
|
|
133
|
+
async def stream_chat(
|
|
134
|
+
self,
|
|
135
|
+
messages: List[Dict[str, Any]],
|
|
136
|
+
tools: Optional[List[Dict[str, Any]]] = None
|
|
137
|
+
) -> AsyncIterator[str]:
|
|
138
|
+
...
|
|
139
|
+
|
|
140
|
+
# ----------------------------------------------------------------------
|
|
141
|
+
# Infra Protocols
|
|
142
|
+
# ----------------------------------------------------------------------
|
|
143
|
+
|
|
144
|
+
@runtime_checkable
|
|
145
|
+
class TransportProtocol(Protocol):
|
|
146
|
+
"""
|
|
147
|
+
Protocol for Event Transport (Pub/Sub).
|
|
148
|
+
|
|
149
|
+
FIXED: Added unsubscribe() to prevent memory leaks.
|
|
150
|
+
Handlers must be unsubscribed when no longer needed.
|
|
151
|
+
"""
|
|
152
|
+
async def connect(self) -> None: ...
|
|
153
|
+
async def disconnect(self) -> None: ...
|
|
154
|
+
async def publish(self, topic: str, event: CloudEvent) -> None: ...
|
|
155
|
+
async def subscribe(self, topic: str, handler: Any) -> None: ...
|
|
156
|
+
async def unsubscribe(self, topic: str, handler: Any) -> None: ...
|
|
157
|
+
|
|
158
|
+
@runtime_checkable
|
|
159
|
+
class EventBusProtocol(Protocol):
|
|
160
|
+
"""
|
|
161
|
+
Protocol for the Universal Event Bus.
|
|
162
|
+
"""
|
|
163
|
+
async def publish(self, event: CloudEvent) -> None: ...
|
|
164
|
+
async def subscribe(self, topic: str, handler: Any) -> None: ...
|
loom/protocol/mcp.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model Context Protocol (MCP) Implementation for Loom
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
11
|
+
|
|
12
|
+
# --- MCP Data Models ---
|
|
13
|
+
|
|
14
|
+
class MCPToolDefinition(BaseModel):
|
|
15
|
+
"""Definition of an MCP Tool."""
|
|
16
|
+
name: str
|
|
17
|
+
description: str
|
|
18
|
+
input_schema: Dict[str, Any] = Field(..., alias="inputSchema")
|
|
19
|
+
|
|
20
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
21
|
+
|
|
22
|
+
class MCPResource(BaseModel):
|
|
23
|
+
"""Definition of an MCP Resource."""
|
|
24
|
+
uri: str
|
|
25
|
+
name: str
|
|
26
|
+
mime_type: str = Field(..., alias="mimeType")
|
|
27
|
+
description: Optional[str] = None
|
|
28
|
+
|
|
29
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
30
|
+
|
|
31
|
+
class MCPPrompt(BaseModel):
|
|
32
|
+
"""Definition of an MCP Prompt."""
|
|
33
|
+
name: str
|
|
34
|
+
description: str
|
|
35
|
+
arguments: List[Dict[str, Any]] = Field(default_factory=list)
|
|
36
|
+
|
|
37
|
+
class MCPToolCall(BaseModel):
|
|
38
|
+
"""A request to call a tool."""
|
|
39
|
+
name: str
|
|
40
|
+
arguments: Dict[str, Any]
|
|
41
|
+
|
|
42
|
+
class MCPToolResult(BaseModel):
|
|
43
|
+
"""Result of a tool call."""
|
|
44
|
+
content: List[Dict[str, Any]] # Text or Image content
|
|
45
|
+
is_error: bool = False
|
|
46
|
+
|
|
47
|
+
# --- MCP Interfaces ---
|
|
48
|
+
|
|
49
|
+
class MCPServer(ABC):
|
|
50
|
+
"""
|
|
51
|
+
Abstract Interface for an MCP Server (provider of tools/resources).
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
@abstractmethod
|
|
55
|
+
async def list_tools(self) -> List[MCPToolDefinition]:
|
|
56
|
+
"""List available tools."""
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
@abstractmethod
|
|
60
|
+
async def call_tool(self, name: str, arguments: Dict[str, Any]) -> MCPToolResult:
|
|
61
|
+
"""Call a specific tool."""
|
|
62
|
+
pass
|
|
63
|
+
|
|
64
|
+
@abstractmethod
|
|
65
|
+
async def list_resources(self) -> List[MCPResource]:
|
|
66
|
+
"""List available resources."""
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
@abstractmethod
|
|
70
|
+
async def read_resource(self, uri: str) -> str:
|
|
71
|
+
"""Read a resource content."""
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
@abstractmethod
|
|
75
|
+
async def list_prompts(self) -> List[MCPPrompt]:
|
|
76
|
+
"""List available prompts."""
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
@abstractmethod
|
|
80
|
+
async def get_prompt(self, name: str, arguments: Dict[str, Any]) -> str:
|
|
81
|
+
"""Get a prompt context."""
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
class MCPClient(ABC):
|
|
85
|
+
"""
|
|
86
|
+
Abstract Interface for an MCP Client (consumer of tools/resources).
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
@abstractmethod
|
|
90
|
+
async def discover_capabilities(self):
|
|
91
|
+
"""Discover tools and resources from connected servers."""
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
@abstractmethod
|
|
95
|
+
async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Any:
|
|
96
|
+
"""Execute a tool via the protocol."""
|
|
97
|
+
pass
|