autogen-substrate-memory 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogen_substrate_memory-0.1.0/PKG-INFO +137 -0
- autogen_substrate_memory-0.1.0/README.md +104 -0
- autogen_substrate_memory-0.1.0/pyproject.toml +71 -0
- autogen_substrate_memory-0.1.0/src/autogen_substrate/__init__.py +6 -0
- autogen_substrate_memory-0.1.0/src/autogen_substrate/client.py +173 -0
- autogen_substrate_memory-0.1.0/src/autogen_substrate/memory.py +346 -0
- autogen_substrate_memory-0.1.0/src/autogen_substrate/py.typed +0 -0
- autogen_substrate_memory-0.1.0/tests/__init__.py +0 -0
- autogen_substrate_memory-0.1.0/tests/test_client.py +198 -0
- autogen_substrate_memory-0.1.0/tests/test_memory.py +260 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: autogen-substrate-memory
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SUBSTRATE cognitive memory provider for Microsoft AutoGen agents
|
|
5
|
+
Project-URL: Homepage, https://garmolabs.com
|
|
6
|
+
Project-URL: Documentation, https://garmolabs.com/substrate/docs
|
|
7
|
+
Project-URL: Repository, https://github.com/GarmoLabs/autogen-substrate-memory
|
|
8
|
+
Author-email: Garmo Labs <hello@garmolabs.com>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
Keywords: ai-agents,autogen,cognitive,memory,substrate
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Typing :: Typed
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Requires-Dist: autogen-agentchat>=0.4
|
|
23
|
+
Requires-Dist: autogen-core>=0.4
|
|
24
|
+
Requires-Dist: httpx>=0.27
|
|
25
|
+
Provides-Extra: dev
|
|
26
|
+
Requires-Dist: mypy>=1.13; extra == 'dev'
|
|
27
|
+
Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
|
|
28
|
+
Requires-Dist: pytest-cov>=5.0; extra == 'dev'
|
|
29
|
+
Requires-Dist: pytest>=8.0; extra == 'dev'
|
|
30
|
+
Requires-Dist: respx>=0.22; extra == 'dev'
|
|
31
|
+
Requires-Dist: ruff>=0.8; extra == 'dev'
|
|
32
|
+
Description-Content-Type: text/markdown
|
|
33
|
+
|
|
34
|
+
# autogen-substrate-memory
|
|
35
|
+
|
|
36
|
+
SUBSTRATE cognitive memory provider for [Microsoft AutoGen](https://github.com/microsoft/autogen) agents.
|
|
37
|
+
|
|
38
|
+
Give your AutoGen agents persistent identity, emotional awareness, and causal memory powered by the [SUBSTRATE](https://garmolabs.com) cognitive entity framework.
|
|
39
|
+
|
|
40
|
+
## Installation
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
pip install autogen-substrate-memory
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Or install from source:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install -e ".[dev]"
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Quick Start
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
import asyncio
|
|
56
|
+
from autogen_agentchat.agents import AssistantAgent
|
|
57
|
+
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
|
58
|
+
from autogen_substrate import SubstrateMemory
|
|
59
|
+
|
|
60
|
+
async def main():
|
|
61
|
+
# Create SUBSTRATE memory provider
|
|
62
|
+
memory = SubstrateMemory(api_key="sk_sub_your_key_here")
|
|
63
|
+
|
|
64
|
+
# Create an AutoGen agent with SUBSTRATE memory
|
|
65
|
+
agent = AssistantAgent(
|
|
66
|
+
name="kai",
|
|
67
|
+
model_client=OpenAIChatCompletionClient(model="gpt-4o"),
|
|
68
|
+
memory=[memory],
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# The agent now has access to persistent cognitive memory,
|
|
72
|
+
# emotional state, and identity verification.
|
|
73
|
+
response = await agent.run(task="What do you remember about our last conversation?")
|
|
74
|
+
print(response)
|
|
75
|
+
|
|
76
|
+
asyncio.run(main())
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Configuration
|
|
80
|
+
|
|
81
|
+
### Environment Variables
|
|
82
|
+
|
|
83
|
+
| Variable | Description | Default |
|
|
84
|
+
|---|---|---|
|
|
85
|
+
| `SUBSTRATE_API_KEY` | Your SUBSTRATE API key | (required) |
|
|
86
|
+
| `SUBSTRATE_MCP_URL` | MCP server URL | `https://substrate.garmolabs.com/mcp-server/mcp` |
|
|
87
|
+
|
|
88
|
+
### Full Configuration
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from autogen_substrate.memory import SubstrateMemory, SubstrateMemoryConfig
|
|
92
|
+
|
|
93
|
+
config = SubstrateMemoryConfig(
|
|
94
|
+
api_key="sk_sub_...",
|
|
95
|
+
mcp_url="https://substrate.garmolabs.com/mcp-server/mcp",
|
|
96
|
+
search_top_k=10, # Number of memory results per query
|
|
97
|
+
include_emotion=True, # Include emotional state in queries
|
|
98
|
+
include_identity=True, # Include identity verification in context
|
|
99
|
+
include_values=True, # Include core values in context
|
|
100
|
+
timeout_seconds=30.0, # HTTP request timeout
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
memory = SubstrateMemory(config=config)
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
## How It Works
|
|
107
|
+
|
|
108
|
+
### Memory Protocol
|
|
109
|
+
|
|
110
|
+
`SubstrateMemory` implements AutoGen's `Memory` protocol:
|
|
111
|
+
|
|
112
|
+
| Method | Behavior |
|
|
113
|
+
|---|---|
|
|
114
|
+
| `query(query)` | Hybrid search (semantic + keyword) across entity memory. Optionally includes emotional state. |
|
|
115
|
+
| `update_context(model_context)` | Injects identity, emotion, values, and relevant memories as a SystemMessage. |
|
|
116
|
+
| `add(content)` | Stores content via the SUBSTRATE `respond` tool with a `[memory-store]` prefix. |
|
|
117
|
+
| `clear()` | No-op. SUBSTRATE manages its own memory lifecycle with causal consolidation. |
|
|
118
|
+
| `close()` | No-op. Each operation creates its own HTTP connection. |
|
|
119
|
+
|
|
120
|
+
### SUBSTRATE MCP Tools Used
|
|
121
|
+
|
|
122
|
+
| Tool | Used In | Purpose |
|
|
123
|
+
|---|---|---|
|
|
124
|
+
| `hybrid_search` | `query()`, `update_context()` | Semantic + keyword memory retrieval |
|
|
125
|
+
| `memory_search` | `query()` (fallback) | Keyword-only memory retrieval |
|
|
126
|
+
| `get_emotion_state` | `query()`, `update_context()` | Current affective state (valence, arousal, dominance) |
|
|
127
|
+
| `verify_identity` | `update_context()` | Cryptographic identity continuity check |
|
|
128
|
+
| `get_values` | `update_context()` | Core value architecture |
|
|
129
|
+
| `respond` | `add()` | Store new memories via conversational input |
|
|
130
|
+
|
|
131
|
+
## Get an API Key
|
|
132
|
+
|
|
133
|
+
Sign up at [garmolabs.com](https://garmolabs.com) to get a SUBSTRATE API key.
|
|
134
|
+
|
|
135
|
+
## License
|
|
136
|
+
|
|
137
|
+
MIT
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# autogen-substrate-memory
|
|
2
|
+
|
|
3
|
+
SUBSTRATE cognitive memory provider for [Microsoft AutoGen](https://github.com/microsoft/autogen) agents.
|
|
4
|
+
|
|
5
|
+
Give your AutoGen agents persistent identity, emotional awareness, and causal memory powered by the [SUBSTRATE](https://garmolabs.com) cognitive entity framework.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install autogen-substrate-memory
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
Or install from source:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install -e ".[dev]"
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Quick Start
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
import asyncio
|
|
23
|
+
from autogen_agentchat.agents import AssistantAgent
|
|
24
|
+
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
|
25
|
+
from autogen_substrate import SubstrateMemory
|
|
26
|
+
|
|
27
|
+
async def main():
|
|
28
|
+
# Create SUBSTRATE memory provider
|
|
29
|
+
memory = SubstrateMemory(api_key="sk_sub_your_key_here")
|
|
30
|
+
|
|
31
|
+
# Create an AutoGen agent with SUBSTRATE memory
|
|
32
|
+
agent = AssistantAgent(
|
|
33
|
+
name="kai",
|
|
34
|
+
model_client=OpenAIChatCompletionClient(model="gpt-4o"),
|
|
35
|
+
memory=[memory],
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# The agent now has access to persistent cognitive memory,
|
|
39
|
+
# emotional state, and identity verification.
|
|
40
|
+
response = await agent.run(task="What do you remember about our last conversation?")
|
|
41
|
+
print(response)
|
|
42
|
+
|
|
43
|
+
asyncio.run(main())
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Configuration
|
|
47
|
+
|
|
48
|
+
### Environment Variables
|
|
49
|
+
|
|
50
|
+
| Variable | Description | Default |
|
|
51
|
+
|---|---|---|
|
|
52
|
+
| `SUBSTRATE_API_KEY` | Your SUBSTRATE API key | (required) |
|
|
53
|
+
| `SUBSTRATE_MCP_URL` | MCP server URL | `https://substrate.garmolabs.com/mcp-server/mcp` |
|
|
54
|
+
|
|
55
|
+
### Full Configuration
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from autogen_substrate.memory import SubstrateMemory, SubstrateMemoryConfig
|
|
59
|
+
|
|
60
|
+
config = SubstrateMemoryConfig(
|
|
61
|
+
api_key="sk_sub_...",
|
|
62
|
+
mcp_url="https://substrate.garmolabs.com/mcp-server/mcp",
|
|
63
|
+
search_top_k=10, # Number of memory results per query
|
|
64
|
+
include_emotion=True, # Include emotional state in queries
|
|
65
|
+
include_identity=True, # Include identity verification in context
|
|
66
|
+
include_values=True, # Include core values in context
|
|
67
|
+
timeout_seconds=30.0, # HTTP request timeout
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
memory = SubstrateMemory(config=config)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## How It Works
|
|
74
|
+
|
|
75
|
+
### Memory Protocol
|
|
76
|
+
|
|
77
|
+
`SubstrateMemory` implements AutoGen's `Memory` protocol:
|
|
78
|
+
|
|
79
|
+
| Method | Behavior |
|
|
80
|
+
|---|---|
|
|
81
|
+
| `query(query)` | Hybrid search (semantic + keyword) across entity memory. Optionally includes emotional state. |
|
|
82
|
+
| `update_context(model_context)` | Injects identity, emotion, values, and relevant memories as a SystemMessage. |
|
|
83
|
+
| `add(content)` | Stores content via the SUBSTRATE `respond` tool with a `[memory-store]` prefix. |
|
|
84
|
+
| `clear()` | No-op. SUBSTRATE manages its own memory lifecycle with causal consolidation. |
|
|
85
|
+
| `close()` | No-op. Each operation creates its own HTTP connection. |
|
|
86
|
+
|
|
87
|
+
### SUBSTRATE MCP Tools Used
|
|
88
|
+
|
|
89
|
+
| Tool | Used In | Purpose |
|
|
90
|
+
|---|---|---|
|
|
91
|
+
| `hybrid_search` | `query()`, `update_context()` | Semantic + keyword memory retrieval |
|
|
92
|
+
| `memory_search` | `query()` (fallback) | Keyword-only memory retrieval |
|
|
93
|
+
| `get_emotion_state` | `query()`, `update_context()` | Current affective state (valence, arousal, dominance) |
|
|
94
|
+
| `verify_identity` | `update_context()` | Cryptographic identity continuity check |
|
|
95
|
+
| `get_values` | `update_context()` | Core value architecture |
|
|
96
|
+
| `respond` | `add()` | Store new memories via conversational input |
|
|
97
|
+
|
|
98
|
+
## Get an API Key
|
|
99
|
+
|
|
100
|
+
Sign up at [garmolabs.com](https://garmolabs.com) to get a SUBSTRATE API key.
|
|
101
|
+
|
|
102
|
+
## License
|
|
103
|
+
|
|
104
|
+
MIT
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "autogen-substrate-memory"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "SUBSTRATE cognitive memory provider for Microsoft AutoGen agents"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.10"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Garmo Labs", email = "hello@garmolabs.com" },
|
|
14
|
+
]
|
|
15
|
+
keywords = ["autogen", "substrate", "memory", "cognitive", "ai-agents"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 4 - Beta",
|
|
18
|
+
"Intended Audience :: Developers",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.10",
|
|
22
|
+
"Programming Language :: Python :: 3.11",
|
|
23
|
+
"Programming Language :: Python :: 3.12",
|
|
24
|
+
"Programming Language :: Python :: 3.13",
|
|
25
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
26
|
+
"Typing :: Typed",
|
|
27
|
+
]
|
|
28
|
+
dependencies = [
|
|
29
|
+
"autogen-agentchat>=0.4",
|
|
30
|
+
"autogen-core>=0.4",
|
|
31
|
+
"httpx>=0.27",
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
[project.optional-dependencies]
|
|
35
|
+
dev = [
|
|
36
|
+
"pytest>=8.0",
|
|
37
|
+
"pytest-asyncio>=0.24",
|
|
38
|
+
"pytest-cov>=5.0",
|
|
39
|
+
"respx>=0.22",
|
|
40
|
+
"ruff>=0.8",
|
|
41
|
+
"mypy>=1.13",
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
[project.urls]
|
|
45
|
+
Homepage = "https://garmolabs.com"
|
|
46
|
+
Documentation = "https://garmolabs.com/substrate/docs"
|
|
47
|
+
Repository = "https://github.com/GarmoLabs/autogen-substrate-memory"
|
|
48
|
+
|
|
49
|
+
[tool.hatch.build.targets.wheel]
|
|
50
|
+
packages = ["src/autogen_substrate"]
|
|
51
|
+
|
|
52
|
+
[tool.ruff]
|
|
53
|
+
target-version = "py310"
|
|
54
|
+
line-length = 100
|
|
55
|
+
|
|
56
|
+
[tool.ruff.lint]
|
|
57
|
+
select = ["E", "F", "W", "I", "N", "UP", "B", "SIM", "TCH"]
|
|
58
|
+
|
|
59
|
+
[tool.mypy]
|
|
60
|
+
python_version = "3.10"
|
|
61
|
+
strict = true
|
|
62
|
+
warn_return_any = true
|
|
63
|
+
warn_unused_configs = true
|
|
64
|
+
|
|
65
|
+
[tool.pytest.ini_options]
|
|
66
|
+
asyncio_mode = "auto"
|
|
67
|
+
testpaths = ["tests"]
|
|
68
|
+
markers = [
|
|
69
|
+
"unit: unit tests",
|
|
70
|
+
"integration: integration tests requiring network",
|
|
71
|
+
]
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""Lightweight async HTTP client for the SUBSTRATE MCP server.
|
|
2
|
+
|
|
3
|
+
Sends JSON-RPC 2.0 requests over HTTPS with Bearer token authentication.
|
|
4
|
+
All responses are validated and errors are surfaced as typed exceptions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
import httpx
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger("autogen_substrate.client")
|
|
17
|
+
|
|
18
|
+
DEFAULT_MCP_URL = "https://substrate.garmolabs.com/mcp-server/mcp"
|
|
19
|
+
DEFAULT_TIMEOUT_SECONDS = 30.0
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SubstrateClientError(Exception):
|
|
23
|
+
"""Base exception for SUBSTRATE client errors."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SubstrateAuthError(SubstrateClientError):
|
|
27
|
+
"""Raised when the API key is invalid or missing."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class SubstrateRPCError(SubstrateClientError):
|
|
31
|
+
"""Raised when the MCP server returns a JSON-RPC error."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, code: int, message: str, data: Any = None) -> None:
|
|
34
|
+
self.code = code
|
|
35
|
+
self.rpc_message = message
|
|
36
|
+
self.data = data
|
|
37
|
+
super().__init__(f"JSON-RPC error {code}: {message}")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass(frozen=True)
|
|
41
|
+
class RPCResponse:
|
|
42
|
+
"""Immutable wrapper around a successful JSON-RPC result."""
|
|
43
|
+
|
|
44
|
+
id: int | str | None
|
|
45
|
+
result: Any
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass(frozen=True)
|
|
49
|
+
class SubstrateClientConfig:
|
|
50
|
+
"""Immutable configuration for the SUBSTRATE MCP client."""
|
|
51
|
+
|
|
52
|
+
api_key: str
|
|
53
|
+
mcp_url: str = DEFAULT_MCP_URL
|
|
54
|
+
timeout_seconds: float = DEFAULT_TIMEOUT_SECONDS
|
|
55
|
+
extra_headers: dict[str, str] = field(default_factory=dict)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class SubstrateClient:
|
|
59
|
+
"""Async HTTP client for SUBSTRATE MCP JSON-RPC calls.
|
|
60
|
+
|
|
61
|
+
Usage::
|
|
62
|
+
|
|
63
|
+
config = SubstrateClientConfig(api_key="sk_sub_...")
|
|
64
|
+
async with SubstrateClient(config) as client:
|
|
65
|
+
result = await client.call_tool("memory_search", {"query": "hello"})
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(self, config: SubstrateClientConfig) -> None:
|
|
69
|
+
if not config.api_key:
|
|
70
|
+
raise SubstrateAuthError("SUBSTRATE API key must not be empty")
|
|
71
|
+
self._config = config
|
|
72
|
+
self._request_id = 0
|
|
73
|
+
self._http: httpx.AsyncClient | None = None
|
|
74
|
+
|
|
75
|
+
async def __aenter__(self) -> SubstrateClient:
|
|
76
|
+
self._http = httpx.AsyncClient(
|
|
77
|
+
base_url=self._config.mcp_url,
|
|
78
|
+
headers={
|
|
79
|
+
"Authorization": f"Bearer {self._config.api_key}",
|
|
80
|
+
"Content-Type": "application/json",
|
|
81
|
+
**self._config.extra_headers,
|
|
82
|
+
},
|
|
83
|
+
timeout=httpx.Timeout(self._config.timeout_seconds),
|
|
84
|
+
)
|
|
85
|
+
return self
|
|
86
|
+
|
|
87
|
+
async def __aexit__(self, *exc: object) -> None:
|
|
88
|
+
if self._http is not None:
|
|
89
|
+
await self._http.aclose()
|
|
90
|
+
self._http = None
|
|
91
|
+
|
|
92
|
+
def _next_id(self) -> int:
|
|
93
|
+
self._request_id += 1
|
|
94
|
+
return self._request_id
|
|
95
|
+
|
|
96
|
+
async def _send_rpc(self, method: str, params: dict[str, Any] | None = None) -> RPCResponse:
|
|
97
|
+
"""Send a JSON-RPC 2.0 request and return the parsed response."""
|
|
98
|
+
if self._http is None:
|
|
99
|
+
raise SubstrateClientError("Client not initialized. Use 'async with' context manager.")
|
|
100
|
+
|
|
101
|
+
request_id = self._next_id()
|
|
102
|
+
payload: dict[str, Any] = {
|
|
103
|
+
"jsonrpc": "2.0",
|
|
104
|
+
"id": request_id,
|
|
105
|
+
"method": method,
|
|
106
|
+
}
|
|
107
|
+
if params is not None:
|
|
108
|
+
payload["params"] = params
|
|
109
|
+
|
|
110
|
+
logger.debug("MCP request: method=%s id=%d", method, request_id)
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
response = await self._http.post("", content=json.dumps(payload))
|
|
114
|
+
except httpx.TimeoutException as exc:
|
|
115
|
+
raise SubstrateClientError(f"Request timed out after {self._config.timeout_seconds}s") from exc
|
|
116
|
+
except httpx.HTTPError as exc:
|
|
117
|
+
raise SubstrateClientError(f"HTTP error: {exc}") from exc
|
|
118
|
+
|
|
119
|
+
if response.status_code == 401:
|
|
120
|
+
raise SubstrateAuthError("Invalid or expired API key")
|
|
121
|
+
if response.status_code == 429:
|
|
122
|
+
raise SubstrateClientError("Rate limit exceeded")
|
|
123
|
+
if response.status_code >= 400:
|
|
124
|
+
raise SubstrateClientError(
|
|
125
|
+
f"HTTP {response.status_code}: {response.text[:200]}"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
body = response.json()
|
|
130
|
+
except (json.JSONDecodeError, ValueError) as exc:
|
|
131
|
+
raise SubstrateClientError(f"Invalid JSON response: {response.text[:200]}") from exc
|
|
132
|
+
|
|
133
|
+
if "error" in body and body["error"] is not None:
|
|
134
|
+
err = body["error"]
|
|
135
|
+
raise SubstrateRPCError(
|
|
136
|
+
code=err.get("code", -32603),
|
|
137
|
+
message=err.get("message", "Unknown error"),
|
|
138
|
+
data=err.get("data"),
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
return RPCResponse(id=body.get("id"), result=body.get("result"))
|
|
142
|
+
|
|
143
|
+
async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None = None) -> Any:
|
|
144
|
+
"""Call an MCP tool and return the result content.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
tool_name: Name of the SUBSTRATE MCP tool (e.g. ``hybrid_search``).
|
|
148
|
+
arguments: Tool-specific arguments dict.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
The ``result`` field from the JSON-RPC response.
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
SubstrateRPCError: If the server returns a JSON-RPC error.
|
|
155
|
+
SubstrateAuthError: If authentication fails.
|
|
156
|
+
SubstrateClientError: On network or protocol errors.
|
|
157
|
+
"""
|
|
158
|
+
params: dict[str, Any] = {"name": tool_name}
|
|
159
|
+
if arguments:
|
|
160
|
+
params["arguments"] = arguments
|
|
161
|
+
|
|
162
|
+
rpc = await self._send_rpc("tools/call", params)
|
|
163
|
+
return rpc.result
|
|
164
|
+
|
|
165
|
+
async def list_tools(self) -> list[dict[str, Any]]:
|
|
166
|
+
"""List all available MCP tools for the authenticated tier."""
|
|
167
|
+
rpc = await self._send_rpc("tools/list")
|
|
168
|
+
result = rpc.result
|
|
169
|
+
if isinstance(result, dict) and "tools" in result:
|
|
170
|
+
return result["tools"]
|
|
171
|
+
if isinstance(result, list):
|
|
172
|
+
return result
|
|
173
|
+
return []
|
|
@@ -0,0 +1,346 @@
|
|
|
1
|
+
"""SUBSTRATE Memory provider for Microsoft AutoGen agents.
|
|
2
|
+
|
|
3
|
+
Implements the ``autogen_core.memory.Memory`` protocol, backed by the
|
|
4
|
+
SUBSTRATE cognitive entity framework. Queries combine hybrid search with
|
|
5
|
+
emotional state awareness; context injection provides identity, emotion,
|
|
6
|
+
and relevant memories as a system message.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import os
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from typing import Any, Sequence
|
|
16
|
+
|
|
17
|
+
from autogen_core.memory import Memory, MemoryContent, MemoryMimeType, MemoryQueryResult
|
|
18
|
+
from autogen_core.model_context import ChatCompletionContext
|
|
19
|
+
from autogen_core.models import SystemMessage
|
|
20
|
+
|
|
21
|
+
from autogen_substrate.client import (
|
|
22
|
+
SubstrateClient,
|
|
23
|
+
SubstrateClientConfig,
|
|
24
|
+
SubstrateClientError,
|
|
25
|
+
SubstrateRPCError,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger("autogen_substrate.memory")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _extract_text(content: Any) -> str:
|
|
32
|
+
"""Extract text from various MCP tool response shapes."""
|
|
33
|
+
if isinstance(content, str):
|
|
34
|
+
return content
|
|
35
|
+
if isinstance(content, dict):
|
|
36
|
+
# MCP content blocks: [{"type": "text", "text": "..."}]
|
|
37
|
+
if "content" in content and isinstance(content["content"], list):
|
|
38
|
+
parts = []
|
|
39
|
+
for block in content["content"]:
|
|
40
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
41
|
+
parts.append(block.get("text", ""))
|
|
42
|
+
return "\n".join(parts)
|
|
43
|
+
# Direct text field
|
|
44
|
+
if "text" in content:
|
|
45
|
+
return str(content["text"])
|
|
46
|
+
return json.dumps(content, indent=2, default=str)
|
|
47
|
+
if isinstance(content, list):
|
|
48
|
+
parts = []
|
|
49
|
+
for item in content:
|
|
50
|
+
parts.append(_extract_text(item))
|
|
51
|
+
return "\n".join(parts)
|
|
52
|
+
return str(content)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@dataclass(frozen=True)
|
|
56
|
+
class SubstrateMemoryConfig:
|
|
57
|
+
"""Immutable configuration for SubstrateMemory.
|
|
58
|
+
|
|
59
|
+
Attributes:
|
|
60
|
+
api_key: SUBSTRATE API key. Falls back to ``SUBSTRATE_API_KEY`` env var.
|
|
61
|
+
mcp_url: MCP server URL. Falls back to ``SUBSTRATE_MCP_URL`` env var.
|
|
62
|
+
name: Display name for this memory provider.
|
|
63
|
+
search_top_k: Default number of results for hybrid search.
|
|
64
|
+
include_emotion: Whether to include emotional state in queries and context.
|
|
65
|
+
include_identity: Whether to include identity verification in context.
|
|
66
|
+
include_values: Whether to include core values in context.
|
|
67
|
+
timeout_seconds: HTTP request timeout.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
api_key: str = ""
|
|
71
|
+
mcp_url: str = ""
|
|
72
|
+
name: str = "substrate"
|
|
73
|
+
search_top_k: int = 5
|
|
74
|
+
include_emotion: bool = True
|
|
75
|
+
include_identity: bool = True
|
|
76
|
+
include_values: bool = True
|
|
77
|
+
timeout_seconds: float = 30.0
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class SubstrateMemory(Memory):
|
|
81
|
+
"""SUBSTRATE-backed memory for AutoGen agents.
|
|
82
|
+
|
|
83
|
+
Provides cognitive memory, emotional state, identity verification, and
|
|
84
|
+
value-aligned context injection for AutoGen agent conversations.
|
|
85
|
+
|
|
86
|
+
Usage::
|
|
87
|
+
|
|
88
|
+
from autogen_substrate import SubstrateMemory
|
|
89
|
+
|
|
90
|
+
memory = SubstrateMemory(api_key="sk_sub_...")
|
|
91
|
+
await memory.add("User prefers concise responses")
|
|
92
|
+
result = await memory.query("What does the user prefer?")
|
|
93
|
+
print(result.results)
|
|
94
|
+
|
|
95
|
+
Or with full config::
|
|
96
|
+
|
|
97
|
+
from autogen_substrate.memory import SubstrateMemory, SubstrateMemoryConfig
|
|
98
|
+
|
|
99
|
+
config = SubstrateMemoryConfig(
|
|
100
|
+
api_key="sk_sub_...",
|
|
101
|
+
search_top_k=10,
|
|
102
|
+
include_emotion=True,
|
|
103
|
+
)
|
|
104
|
+
memory = SubstrateMemory(config=config)
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
def __init__(
|
|
108
|
+
self,
|
|
109
|
+
*,
|
|
110
|
+
api_key: str = "",
|
|
111
|
+
config: SubstrateMemoryConfig | None = None,
|
|
112
|
+
) -> None:
|
|
113
|
+
resolved_config = config or SubstrateMemoryConfig(api_key=api_key)
|
|
114
|
+
|
|
115
|
+
resolved_key = (
|
|
116
|
+
resolved_config.api_key
|
|
117
|
+
or os.environ.get("SUBSTRATE_API_KEY", "")
|
|
118
|
+
)
|
|
119
|
+
resolved_url = (
|
|
120
|
+
resolved_config.mcp_url
|
|
121
|
+
or os.environ.get("SUBSTRATE_MCP_URL", "")
|
|
122
|
+
or "https://substrate.garmolabs.com/mcp-server/mcp"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
self._config = SubstrateMemoryConfig(
|
|
126
|
+
api_key=resolved_key,
|
|
127
|
+
mcp_url=resolved_url,
|
|
128
|
+
name=resolved_config.name,
|
|
129
|
+
search_top_k=resolved_config.search_top_k,
|
|
130
|
+
include_emotion=resolved_config.include_emotion,
|
|
131
|
+
include_identity=resolved_config.include_identity,
|
|
132
|
+
include_values=resolved_config.include_values,
|
|
133
|
+
timeout_seconds=resolved_config.timeout_seconds,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
self._client_config = SubstrateClientConfig(
|
|
137
|
+
api_key=self._config.api_key,
|
|
138
|
+
mcp_url=self._config.mcp_url,
|
|
139
|
+
timeout_seconds=self._config.timeout_seconds,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def name(self) -> str:
|
|
144
|
+
"""Display name for this memory provider."""
|
|
145
|
+
return self._config.name
|
|
146
|
+
|
|
147
|
+
async def query(
|
|
148
|
+
self,
|
|
149
|
+
query: str,
|
|
150
|
+
*,
|
|
151
|
+
cancellation_token: Any | None = None,
|
|
152
|
+
**kwargs: Any,
|
|
153
|
+
) -> MemoryQueryResult:
|
|
154
|
+
"""Query SUBSTRATE for relevant memories and emotional context.
|
|
155
|
+
|
|
156
|
+
Performs a hybrid search (semantic + keyword) across the entity's memory
|
|
157
|
+
and knowledge stores. Optionally includes the current emotional state
|
|
158
|
+
as additional context.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
query: Natural language search query.
|
|
162
|
+
cancellation_token: Optional cancellation token (unused, kept for protocol).
|
|
163
|
+
**kwargs: Additional arguments. Supports ``top_k`` override.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
MemoryQueryResult with matching memory contents.
|
|
167
|
+
"""
|
|
168
|
+
top_k = kwargs.get("top_k", self._config.search_top_k)
|
|
169
|
+
results: list[MemoryContent] = []
|
|
170
|
+
|
|
171
|
+
async with SubstrateClient(self._client_config) as client:
|
|
172
|
+
# Hybrid search for relevant memories
|
|
173
|
+
try:
|
|
174
|
+
search_result = await client.call_tool(
|
|
175
|
+
"hybrid_search",
|
|
176
|
+
{"query": query, "top_k": top_k},
|
|
177
|
+
)
|
|
178
|
+
search_text = _extract_text(search_result)
|
|
179
|
+
if search_text.strip():
|
|
180
|
+
results.append(
|
|
181
|
+
MemoryContent(
|
|
182
|
+
content=search_text,
|
|
183
|
+
mime_type=MemoryMimeType.TEXT,
|
|
184
|
+
metadata={"source": "substrate_hybrid_search", "query": query},
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
188
|
+
logger.warning("hybrid_search failed, falling back to memory_search: %s", exc)
|
|
189
|
+
try:
|
|
190
|
+
fallback = await client.call_tool("memory_search", {"query": query})
|
|
191
|
+
fallback_text = _extract_text(fallback)
|
|
192
|
+
if fallback_text.strip():
|
|
193
|
+
results.append(
|
|
194
|
+
MemoryContent(
|
|
195
|
+
content=fallback_text,
|
|
196
|
+
mime_type=MemoryMimeType.TEXT,
|
|
197
|
+
metadata={"source": "substrate_memory_search", "query": query},
|
|
198
|
+
)
|
|
199
|
+
)
|
|
200
|
+
except (SubstrateRPCError, SubstrateClientError) as fallback_exc:
|
|
201
|
+
logger.error("memory_search also failed: %s", fallback_exc)
|
|
202
|
+
|
|
203
|
+
# Include emotional state if configured
|
|
204
|
+
if self._config.include_emotion:
|
|
205
|
+
try:
|
|
206
|
+
emotion_result = await client.call_tool("get_emotion_state")
|
|
207
|
+
emotion_text = _extract_text(emotion_result)
|
|
208
|
+
if emotion_text.strip():
|
|
209
|
+
results.append(
|
|
210
|
+
MemoryContent(
|
|
211
|
+
content=f"[Emotional State]\n{emotion_text}",
|
|
212
|
+
mime_type=MemoryMimeType.TEXT,
|
|
213
|
+
metadata={"source": "substrate_emotion"},
|
|
214
|
+
)
|
|
215
|
+
)
|
|
216
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
217
|
+
logger.debug("get_emotion_state unavailable: %s", exc)
|
|
218
|
+
|
|
219
|
+
return MemoryQueryResult(results=results)
|
|
220
|
+
|
|
221
|
+
async def update_context(
|
|
222
|
+
self,
|
|
223
|
+
model_context: ChatCompletionContext,
|
|
224
|
+
) -> None:
|
|
225
|
+
"""Inject SUBSTRATE cognitive context into the agent's model context.
|
|
226
|
+
|
|
227
|
+
Prepends a system message containing the entity's identity status,
|
|
228
|
+
emotional state, core values, and any recent relevant memories.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
model_context: The agent's current chat completion context.
|
|
232
|
+
"""
|
|
233
|
+
sections: list[str] = []
|
|
234
|
+
|
|
235
|
+
async with SubstrateClient(self._client_config) as client:
|
|
236
|
+
# Identity verification
|
|
237
|
+
if self._config.include_identity:
|
|
238
|
+
try:
|
|
239
|
+
identity = await client.call_tool("verify_identity")
|
|
240
|
+
identity_text = _extract_text(identity)
|
|
241
|
+
if identity_text.strip():
|
|
242
|
+
sections.append(f"## Identity Continuity\n{identity_text}")
|
|
243
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
244
|
+
logger.debug("verify_identity unavailable: %s", exc)
|
|
245
|
+
|
|
246
|
+
# Emotional state
|
|
247
|
+
if self._config.include_emotion:
|
|
248
|
+
try:
|
|
249
|
+
emotion = await client.call_tool("get_emotion_state")
|
|
250
|
+
emotion_text = _extract_text(emotion)
|
|
251
|
+
if emotion_text.strip():
|
|
252
|
+
sections.append(f"## Emotional State\n{emotion_text}")
|
|
253
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
254
|
+
logger.debug("get_emotion_state unavailable: %s", exc)
|
|
255
|
+
|
|
256
|
+
# Core values
|
|
257
|
+
if self._config.include_values:
|
|
258
|
+
try:
|
|
259
|
+
values = await client.call_tool("get_values")
|
|
260
|
+
values_text = _extract_text(values)
|
|
261
|
+
if values_text.strip():
|
|
262
|
+
sections.append(f"## Core Values\n{values_text}")
|
|
263
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
264
|
+
logger.debug("get_values unavailable: %s", exc)
|
|
265
|
+
|
|
266
|
+
# Recent context from conversation messages
|
|
267
|
+
existing = await model_context.get_messages()
|
|
268
|
+
context_query = _build_context_query(existing)
|
|
269
|
+
if context_query:
|
|
270
|
+
try:
|
|
271
|
+
memories = await client.call_tool(
|
|
272
|
+
"hybrid_search",
|
|
273
|
+
{"query": context_query, "top_k": 3},
|
|
274
|
+
)
|
|
275
|
+
mem_text = _extract_text(memories)
|
|
276
|
+
if mem_text.strip():
|
|
277
|
+
sections.append(f"## Relevant Memories\n{mem_text}")
|
|
278
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
279
|
+
logger.debug("Context memory search unavailable: %s", exc)
|
|
280
|
+
|
|
281
|
+
if sections:
|
|
282
|
+
system_text = (
|
|
283
|
+
"# SUBSTRATE Cognitive Context\n\n"
|
|
284
|
+
+ "\n\n".join(sections)
|
|
285
|
+
)
|
|
286
|
+
await model_context.add_message(SystemMessage(content=system_text))
|
|
287
|
+
|
|
288
|
+
async def add(
|
|
289
|
+
self,
|
|
290
|
+
content: str,
|
|
291
|
+
*,
|
|
292
|
+
cancellation_token: Any | None = None,
|
|
293
|
+
**kwargs: Any,
|
|
294
|
+
) -> None:
|
|
295
|
+
"""Store a memory in SUBSTRATE via the respond tool.
|
|
296
|
+
|
|
297
|
+
Sends the content with a ``[memory-store]`` prefix so the entity
|
|
298
|
+
processes it as an explicit memory rather than conversational input.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
content: The memory content to store.
|
|
302
|
+
cancellation_token: Optional cancellation token (unused).
|
|
303
|
+
**kwargs: Reserved for future use.
|
|
304
|
+
"""
|
|
305
|
+
async with SubstrateClient(self._client_config) as client:
|
|
306
|
+
try:
|
|
307
|
+
await client.call_tool(
|
|
308
|
+
"respond",
|
|
309
|
+
{"message": f"[memory-store] {content}"},
|
|
310
|
+
)
|
|
311
|
+
except (SubstrateRPCError, SubstrateClientError) as exc:
|
|
312
|
+
logger.error("Failed to store memory: %s", exc)
|
|
313
|
+
raise
|
|
314
|
+
|
|
315
|
+
async def clear(self) -> None:
|
|
316
|
+
"""No-op. SUBSTRATE manages its own memory lifecycle.
|
|
317
|
+
|
|
318
|
+
SUBSTRATE entities maintain causal memory with built-in consolidation
|
|
319
|
+
and decay. Explicit clearing is not supported to preserve identity
|
|
320
|
+
continuity.
|
|
321
|
+
"""
|
|
322
|
+
logger.debug("clear() called — SUBSTRATE manages its own memory lifecycle")
|
|
323
|
+
|
|
324
|
+
async def close(self) -> None:
|
|
325
|
+
"""No-op. Each operation creates its own HTTP connection.
|
|
326
|
+
|
|
327
|
+
The client uses context managers per-call, so there is no persistent
|
|
328
|
+
connection to close.
|
|
329
|
+
"""
|
|
330
|
+
logger.debug("close() called — no persistent resources to release")
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def _build_context_query(messages: Sequence[Any]) -> str:
|
|
334
|
+
"""Build a search query from the most recent user messages.
|
|
335
|
+
|
|
336
|
+
Extracts text from the last few messages to form a contextual query
|
|
337
|
+
for memory retrieval.
|
|
338
|
+
"""
|
|
339
|
+
texts: list[str] = []
|
|
340
|
+
for msg in messages[-5:]:
|
|
341
|
+
content = getattr(msg, "content", None)
|
|
342
|
+
if isinstance(content, str) and content.strip():
|
|
343
|
+
texts.append(content.strip())
|
|
344
|
+
combined = " ".join(texts)
|
|
345
|
+
# Truncate to a reasonable query length
|
|
346
|
+
return combined[:500] if combined else ""
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""Tests for the SUBSTRATE MCP client."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
import pytest
|
|
9
|
+
import respx
|
|
10
|
+
|
|
11
|
+
from autogen_substrate.client import (
|
|
12
|
+
RPCResponse,
|
|
13
|
+
SubstrateAuthError,
|
|
14
|
+
SubstrateClient,
|
|
15
|
+
SubstrateClientConfig,
|
|
16
|
+
SubstrateClientError,
|
|
17
|
+
SubstrateRPCError,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
MCP_URL = "https://test.substrate.local/mcp"
|
|
21
|
+
API_KEY = "sk_sub_test_key_123"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _config(**overrides: object) -> SubstrateClientConfig:
|
|
25
|
+
defaults = {"api_key": API_KEY, "mcp_url": MCP_URL}
|
|
26
|
+
defaults.update(overrides)
|
|
27
|
+
return SubstrateClientConfig(**defaults) # type: ignore[arg-type]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _rpc_success(result: object, request_id: int = 1) -> dict:
|
|
31
|
+
return {"jsonrpc": "2.0", "id": request_id, "result": result}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _rpc_error(code: int, message: str, request_id: int = 1) -> dict:
|
|
35
|
+
return {"jsonrpc": "2.0", "id": request_id, "error": {"code": code, "message": message}}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# -- Construction -----------------------------------------------------------
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class TestClientConstruction:
|
|
42
|
+
def test_empty_api_key_raises(self) -> None:
|
|
43
|
+
with pytest.raises(SubstrateAuthError, match="must not be empty"):
|
|
44
|
+
SubstrateClient(SubstrateClientConfig(api_key="", mcp_url=MCP_URL))
|
|
45
|
+
|
|
46
|
+
def test_valid_config_creates_client(self) -> None:
|
|
47
|
+
client = SubstrateClient(_config())
|
|
48
|
+
assert client is not None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# -- call_tool --------------------------------------------------------------
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class TestCallTool:
|
|
55
|
+
@respx.mock
|
|
56
|
+
@pytest.mark.asyncio
|
|
57
|
+
async def test_call_tool_success(self) -> None:
|
|
58
|
+
route = respx.post(MCP_URL).mock(
|
|
59
|
+
return_value=httpx.Response(200, json=_rpc_success({"content": [{"type": "text", "text": "found it"}]}))
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
async with SubstrateClient(_config()) as client:
|
|
63
|
+
result = await client.call_tool("memory_search", {"query": "hello"})
|
|
64
|
+
|
|
65
|
+
assert result == {"content": [{"type": "text", "text": "found it"}]}
|
|
66
|
+
assert route.called
|
|
67
|
+
|
|
68
|
+
@respx.mock
|
|
69
|
+
@pytest.mark.asyncio
|
|
70
|
+
async def test_call_tool_sends_bearer_token(self) -> None:
|
|
71
|
+
respx.post(MCP_URL).mock(
|
|
72
|
+
return_value=httpx.Response(200, json=_rpc_success("ok"))
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
async with SubstrateClient(_config()) as client:
|
|
76
|
+
await client.call_tool("get_values")
|
|
77
|
+
|
|
78
|
+
request = respx.calls[0].request
|
|
79
|
+
assert request.headers["authorization"] == f"Bearer {API_KEY}"
|
|
80
|
+
|
|
81
|
+
@respx.mock
|
|
82
|
+
@pytest.mark.asyncio
|
|
83
|
+
async def test_call_tool_sends_correct_jsonrpc(self) -> None:
|
|
84
|
+
respx.post(MCP_URL).mock(
|
|
85
|
+
return_value=httpx.Response(200, json=_rpc_success("ok"))
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
async with SubstrateClient(_config()) as client:
|
|
89
|
+
await client.call_tool("hybrid_search", {"query": "test", "top_k": 3})
|
|
90
|
+
|
|
91
|
+
body = json.loads(respx.calls[0].request.content)
|
|
92
|
+
assert body["jsonrpc"] == "2.0"
|
|
93
|
+
assert body["method"] == "tools/call"
|
|
94
|
+
assert body["params"]["name"] == "hybrid_search"
|
|
95
|
+
assert body["params"]["arguments"] == {"query": "test", "top_k": 3}
|
|
96
|
+
|
|
97
|
+
@respx.mock
|
|
98
|
+
@pytest.mark.asyncio
|
|
99
|
+
async def test_call_tool_rpc_error(self) -> None:
|
|
100
|
+
respx.post(MCP_URL).mock(
|
|
101
|
+
return_value=httpx.Response(200, json=_rpc_error(-32602, "Invalid params"))
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
async with SubstrateClient(_config()) as client:
|
|
105
|
+
with pytest.raises(SubstrateRPCError, match="Invalid params"):
|
|
106
|
+
await client.call_tool("bad_tool")
|
|
107
|
+
|
|
108
|
+
@respx.mock
|
|
109
|
+
@pytest.mark.asyncio
|
|
110
|
+
async def test_call_tool_401_raises_auth_error(self) -> None:
|
|
111
|
+
respx.post(MCP_URL).mock(
|
|
112
|
+
return_value=httpx.Response(401, json={"error": "Unauthorized"})
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
async with SubstrateClient(_config()) as client:
|
|
116
|
+
with pytest.raises(SubstrateAuthError, match="Invalid or expired"):
|
|
117
|
+
await client.call_tool("get_values")
|
|
118
|
+
|
|
119
|
+
@respx.mock
|
|
120
|
+
@pytest.mark.asyncio
|
|
121
|
+
async def test_call_tool_429_raises_rate_limit(self) -> None:
|
|
122
|
+
respx.post(MCP_URL).mock(
|
|
123
|
+
return_value=httpx.Response(429, text="Too Many Requests")
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
async with SubstrateClient(_config()) as client:
|
|
127
|
+
with pytest.raises(SubstrateClientError, match="Rate limit"):
|
|
128
|
+
await client.call_tool("get_values")
|
|
129
|
+
|
|
130
|
+
@respx.mock
|
|
131
|
+
@pytest.mark.asyncio
|
|
132
|
+
async def test_call_tool_500_raises_client_error(self) -> None:
|
|
133
|
+
respx.post(MCP_URL).mock(
|
|
134
|
+
return_value=httpx.Response(500, text="Internal Server Error")
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
async with SubstrateClient(_config()) as client:
|
|
138
|
+
with pytest.raises(SubstrateClientError, match="HTTP 500"):
|
|
139
|
+
await client.call_tool("get_values")
|
|
140
|
+
|
|
141
|
+
@respx.mock
|
|
142
|
+
@pytest.mark.asyncio
|
|
143
|
+
async def test_call_tool_invalid_json_raises(self) -> None:
|
|
144
|
+
respx.post(MCP_URL).mock(
|
|
145
|
+
return_value=httpx.Response(200, text="not json at all")
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
async with SubstrateClient(_config()) as client:
|
|
149
|
+
with pytest.raises(SubstrateClientError, match="Invalid JSON"):
|
|
150
|
+
await client.call_tool("get_values")
|
|
151
|
+
|
|
152
|
+
@pytest.mark.asyncio
|
|
153
|
+
async def test_call_tool_without_context_manager_raises(self) -> None:
|
|
154
|
+
client = SubstrateClient(_config())
|
|
155
|
+
with pytest.raises(SubstrateClientError, match="not initialized"):
|
|
156
|
+
await client.call_tool("get_values")
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
# -- list_tools -------------------------------------------------------------
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class TestListTools:
|
|
163
|
+
@respx.mock
|
|
164
|
+
@pytest.mark.asyncio
|
|
165
|
+
async def test_list_tools_dict_response(self) -> None:
|
|
166
|
+
tools = [{"name": "respond"}, {"name": "memory_search"}]
|
|
167
|
+
respx.post(MCP_URL).mock(
|
|
168
|
+
return_value=httpx.Response(200, json=_rpc_success({"tools": tools}))
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
async with SubstrateClient(_config()) as client:
|
|
172
|
+
result = await client.list_tools()
|
|
173
|
+
|
|
174
|
+
assert len(result) == 2
|
|
175
|
+
assert result[0]["name"] == "respond"
|
|
176
|
+
|
|
177
|
+
@respx.mock
|
|
178
|
+
@pytest.mark.asyncio
|
|
179
|
+
async def test_list_tools_list_response(self) -> None:
|
|
180
|
+
tools = [{"name": "respond"}]
|
|
181
|
+
respx.post(MCP_URL).mock(
|
|
182
|
+
return_value=httpx.Response(200, json=_rpc_success(tools))
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
async with SubstrateClient(_config()) as client:
|
|
186
|
+
result = await client.list_tools()
|
|
187
|
+
|
|
188
|
+
assert len(result) == 1
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
# -- RPCResponse ------------------------------------------------------------
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class TestRPCResponse:
|
|
195
|
+
def test_immutable(self) -> None:
|
|
196
|
+
resp = RPCResponse(id=1, result="test")
|
|
197
|
+
with pytest.raises(AttributeError):
|
|
198
|
+
resp.id = 2 # type: ignore[misc]
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
"""Tests for the SubstrateMemory provider."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any
|
|
7
|
+
from unittest.mock import AsyncMock, MagicMock, patch
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
import pytest
|
|
11
|
+
import respx
|
|
12
|
+
|
|
13
|
+
from autogen_substrate.memory import SubstrateMemory, SubstrateMemoryConfig, _build_context_query, _extract_text
|
|
14
|
+
|
|
15
|
+
MCP_URL = "https://test.substrate.local/mcp"
|
|
16
|
+
API_KEY = "sk_sub_test_key_123"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _rpc_success(result: object, request_id: int | None = None) -> dict:
|
|
20
|
+
return {"jsonrpc": "2.0", "id": request_id or 1, "result": result}
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _rpc_error(code: int, message: str) -> dict:
|
|
24
|
+
return {"jsonrpc": "2.0", "id": 1, "error": {"code": code, "message": message}}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _mcp_text_content(text: str) -> dict:
|
|
28
|
+
return {"content": [{"type": "text", "text": text}]}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _memory(
|
|
32
|
+
*,
|
|
33
|
+
include_emotion: bool = True,
|
|
34
|
+
include_identity: bool = True,
|
|
35
|
+
include_values: bool = True,
|
|
36
|
+
) -> SubstrateMemory:
|
|
37
|
+
config = SubstrateMemoryConfig(
|
|
38
|
+
api_key=API_KEY,
|
|
39
|
+
mcp_url=MCP_URL,
|
|
40
|
+
include_emotion=include_emotion,
|
|
41
|
+
include_identity=include_identity,
|
|
42
|
+
include_values=include_values,
|
|
43
|
+
)
|
|
44
|
+
return SubstrateMemory(config=config)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# -- _extract_text -----------------------------------------------------------
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class TestExtractText:
|
|
51
|
+
def test_string_passthrough(self) -> None:
|
|
52
|
+
assert _extract_text("hello") == "hello"
|
|
53
|
+
|
|
54
|
+
def test_mcp_content_blocks(self) -> None:
|
|
55
|
+
data = {"content": [{"type": "text", "text": "line1"}, {"type": "text", "text": "line2"}]}
|
|
56
|
+
assert _extract_text(data) == "line1\nline2"
|
|
57
|
+
|
|
58
|
+
def test_dict_with_text_field(self) -> None:
|
|
59
|
+
assert _extract_text({"text": "hello"}) == "hello"
|
|
60
|
+
|
|
61
|
+
def test_dict_fallback_to_json(self) -> None:
|
|
62
|
+
result = _extract_text({"foo": "bar"})
|
|
63
|
+
assert "foo" in result
|
|
64
|
+
assert "bar" in result
|
|
65
|
+
|
|
66
|
+
def test_list_of_items(self) -> None:
|
|
67
|
+
result = _extract_text(["a", "b"])
|
|
68
|
+
assert "a" in result
|
|
69
|
+
assert "b" in result
|
|
70
|
+
|
|
71
|
+
def test_non_string_converted(self) -> None:
|
|
72
|
+
assert _extract_text(42) == "42"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# -- _build_context_query ---------------------------------------------------
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class TestBuildContextQuery:
|
|
79
|
+
def test_empty_messages(self) -> None:
|
|
80
|
+
assert _build_context_query([]) == ""
|
|
81
|
+
|
|
82
|
+
def test_extracts_content_from_messages(self) -> None:
|
|
83
|
+
msgs = [MagicMock(content="hello world"), MagicMock(content="how are you")]
|
|
84
|
+
result = _build_context_query(msgs)
|
|
85
|
+
assert "hello world" in result
|
|
86
|
+
assert "how are you" in result
|
|
87
|
+
|
|
88
|
+
def test_truncates_long_content(self) -> None:
|
|
89
|
+
msgs = [MagicMock(content="x" * 1000)]
|
|
90
|
+
result = _build_context_query(msgs)
|
|
91
|
+
assert len(result) == 500
|
|
92
|
+
|
|
93
|
+
def test_skips_empty_content(self) -> None:
|
|
94
|
+
msgs = [MagicMock(content=""), MagicMock(content="real")]
|
|
95
|
+
result = _build_context_query(msgs)
|
|
96
|
+
assert result == "real"
|
|
97
|
+
|
|
98
|
+
def test_uses_last_five_messages(self) -> None:
|
|
99
|
+
msgs = [MagicMock(content=f"msg{i}") for i in range(10)]
|
|
100
|
+
result = _build_context_query(msgs)
|
|
101
|
+
assert "msg5" in result
|
|
102
|
+
assert "msg4" not in result
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# -- SubstrateMemory.query ---------------------------------------------------
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class TestMemoryQuery:
|
|
109
|
+
@respx.mock
|
|
110
|
+
@pytest.mark.asyncio
|
|
111
|
+
async def test_query_returns_search_and_emotion(self) -> None:
|
|
112
|
+
call_count = 0
|
|
113
|
+
|
|
114
|
+
def _side_effect(request: httpx.Request) -> httpx.Response:
|
|
115
|
+
nonlocal call_count
|
|
116
|
+
call_count += 1
|
|
117
|
+
body = json.loads(request.content)
|
|
118
|
+
tool_name = body["params"]["name"]
|
|
119
|
+
if tool_name == "hybrid_search":
|
|
120
|
+
return httpx.Response(200, json=_rpc_success(_mcp_text_content("memory result")))
|
|
121
|
+
if tool_name == "get_emotion_state":
|
|
122
|
+
return httpx.Response(200, json=_rpc_success(_mcp_text_content("valence=0.8")))
|
|
123
|
+
return httpx.Response(200, json=_rpc_success("ok"))
|
|
124
|
+
|
|
125
|
+
respx.post(MCP_URL).mock(side_effect=_side_effect)
|
|
126
|
+
mem = _memory()
|
|
127
|
+
result = await mem.query("what happened yesterday?")
|
|
128
|
+
|
|
129
|
+
assert len(result.results) == 2
|
|
130
|
+
assert "memory result" in result.results[0].content
|
|
131
|
+
assert "valence=0.8" in result.results[1].content
|
|
132
|
+
|
|
133
|
+
@respx.mock
|
|
134
|
+
@pytest.mark.asyncio
|
|
135
|
+
async def test_query_without_emotion(self) -> None:
|
|
136
|
+
respx.post(MCP_URL).mock(
|
|
137
|
+
return_value=httpx.Response(200, json=_rpc_success(_mcp_text_content("found")))
|
|
138
|
+
)
|
|
139
|
+
mem = _memory(include_emotion=False)
|
|
140
|
+
result = await mem.query("test")
|
|
141
|
+
|
|
142
|
+
assert len(result.results) == 1
|
|
143
|
+
assert "found" in result.results[0].content
|
|
144
|
+
|
|
145
|
+
@respx.mock
|
|
146
|
+
@pytest.mark.asyncio
|
|
147
|
+
async def test_query_falls_back_to_memory_search(self) -> None:
|
|
148
|
+
call_count = 0
|
|
149
|
+
|
|
150
|
+
def _side_effect(request: httpx.Request) -> httpx.Response:
|
|
151
|
+
nonlocal call_count
|
|
152
|
+
call_count += 1
|
|
153
|
+
body = json.loads(request.content)
|
|
154
|
+
tool_name = body["params"]["name"]
|
|
155
|
+
if tool_name == "hybrid_search":
|
|
156
|
+
return httpx.Response(200, json=_rpc_error(-32602, "Not available"))
|
|
157
|
+
if tool_name == "memory_search":
|
|
158
|
+
return httpx.Response(200, json=_rpc_success(_mcp_text_content("fallback result")))
|
|
159
|
+
if tool_name == "get_emotion_state":
|
|
160
|
+
return httpx.Response(200, json=_rpc_success(_mcp_text_content("calm")))
|
|
161
|
+
return httpx.Response(200, json=_rpc_success("ok"))
|
|
162
|
+
|
|
163
|
+
respx.post(MCP_URL).mock(side_effect=_side_effect)
|
|
164
|
+
mem = _memory()
|
|
165
|
+
result = await mem.query("test")
|
|
166
|
+
|
|
167
|
+
assert any("fallback result" in r.content for r in result.results)
|
|
168
|
+
|
|
169
|
+
@respx.mock
|
|
170
|
+
@pytest.mark.asyncio
|
|
171
|
+
async def test_query_handles_empty_results(self) -> None:
|
|
172
|
+
respx.post(MCP_URL).mock(
|
|
173
|
+
return_value=httpx.Response(200, json=_rpc_success(_mcp_text_content("")))
|
|
174
|
+
)
|
|
175
|
+
mem = _memory(include_emotion=False)
|
|
176
|
+
result = await mem.query("test")
|
|
177
|
+
|
|
178
|
+
assert len(result.results) == 0
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
# -- SubstrateMemory.add ----------------------------------------------------
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class TestMemoryAdd:
|
|
185
|
+
@respx.mock
|
|
186
|
+
@pytest.mark.asyncio
|
|
187
|
+
async def test_add_sends_memory_store_prefix(self) -> None:
|
|
188
|
+
respx.post(MCP_URL).mock(
|
|
189
|
+
return_value=httpx.Response(200, json=_rpc_success("stored"))
|
|
190
|
+
)
|
|
191
|
+
mem = _memory()
|
|
192
|
+
await mem.add("User likes concise responses")
|
|
193
|
+
|
|
194
|
+
body = json.loads(respx.calls[0].request.content)
|
|
195
|
+
assert body["params"]["name"] == "respond"
|
|
196
|
+
assert "[memory-store]" in body["params"]["arguments"]["message"]
|
|
197
|
+
assert "concise responses" in body["params"]["arguments"]["message"]
|
|
198
|
+
|
|
199
|
+
@respx.mock
|
|
200
|
+
@pytest.mark.asyncio
|
|
201
|
+
async def test_add_raises_on_failure(self) -> None:
|
|
202
|
+
respx.post(MCP_URL).mock(
|
|
203
|
+
return_value=httpx.Response(500, text="Server Error")
|
|
204
|
+
)
|
|
205
|
+
mem = _memory()
|
|
206
|
+
with pytest.raises(Exception):
|
|
207
|
+
await mem.add("something")
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
# -- SubstrateMemory.clear / close ------------------------------------------
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
class TestMemoryLifecycle:
|
|
214
|
+
@pytest.mark.asyncio
|
|
215
|
+
async def test_clear_is_noop(self) -> None:
|
|
216
|
+
mem = _memory()
|
|
217
|
+
await mem.clear() # should not raise
|
|
218
|
+
|
|
219
|
+
@pytest.mark.asyncio
|
|
220
|
+
async def test_close_is_noop(self) -> None:
|
|
221
|
+
mem = _memory()
|
|
222
|
+
await mem.close() # should not raise
|
|
223
|
+
|
|
224
|
+
def test_name_property(self) -> None:
|
|
225
|
+
mem = _memory()
|
|
226
|
+
assert mem.name == "substrate"
|
|
227
|
+
|
|
228
|
+
def test_custom_name(self) -> None:
|
|
229
|
+
config = SubstrateMemoryConfig(api_key=API_KEY, mcp_url=MCP_URL, name="custom")
|
|
230
|
+
mem = SubstrateMemory(config=config)
|
|
231
|
+
assert mem.name == "custom"
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
# -- SubstrateMemory construction -------------------------------------------
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
class TestMemoryConstruction:
|
|
238
|
+
def test_api_key_from_constructor(self) -> None:
|
|
239
|
+
mem = SubstrateMemory(api_key="sk_sub_direct")
|
|
240
|
+
assert mem._config.api_key == "sk_sub_direct"
|
|
241
|
+
|
|
242
|
+
def test_api_key_from_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
|
243
|
+
monkeypatch.setenv("SUBSTRATE_API_KEY", "sk_sub_env")
|
|
244
|
+
mem = SubstrateMemory()
|
|
245
|
+
assert mem._config.api_key == "sk_sub_env"
|
|
246
|
+
|
|
247
|
+
def test_mcp_url_from_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
|
248
|
+
monkeypatch.setenv("SUBSTRATE_API_KEY", "sk_sub_x")
|
|
249
|
+
monkeypatch.setenv("SUBSTRATE_MCP_URL", "https://custom.example.com/mcp")
|
|
250
|
+
mem = SubstrateMemory()
|
|
251
|
+
assert mem._config.mcp_url == "https://custom.example.com/mcp"
|
|
252
|
+
|
|
253
|
+
def test_default_mcp_url(self) -> None:
|
|
254
|
+
mem = SubstrateMemory(api_key="sk_sub_x")
|
|
255
|
+
assert "substrate.garmolabs.com" in mem._config.mcp_url
|
|
256
|
+
|
|
257
|
+
def test_config_is_frozen(self) -> None:
|
|
258
|
+
config = SubstrateMemoryConfig(api_key="sk_sub_x")
|
|
259
|
+
with pytest.raises(AttributeError):
|
|
260
|
+
config.api_key = "changed" # type: ignore[misc]
|