universal-agent-context 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uacs/__init__.py +12 -0
- uacs/adapters/__init__.py +19 -0
- uacs/adapters/agent_skill_adapter.py +202 -0
- uacs/adapters/agents_md_adapter.py +330 -0
- uacs/adapters/base.py +261 -0
- uacs/adapters/clinerules_adapter.py +39 -0
- uacs/adapters/cursorrules_adapter.py +39 -0
- uacs/api.py +262 -0
- uacs/cli/__init__.py +6 -0
- uacs/cli/context.py +349 -0
- uacs/cli/main.py +195 -0
- uacs/cli/mcp.py +115 -0
- uacs/cli/memory.py +142 -0
- uacs/cli/packages.py +309 -0
- uacs/cli/skills.py +144 -0
- uacs/cli/utils.py +24 -0
- uacs/config/repositories.yaml +26 -0
- uacs/context/__init__.py +0 -0
- uacs/context/agent_context.py +406 -0
- uacs/context/shared_context.py +661 -0
- uacs/context/unified_context.py +332 -0
- uacs/mcp_server_entry.py +80 -0
- uacs/memory/__init__.py +5 -0
- uacs/memory/simple_memory.py +255 -0
- uacs/packages/__init__.py +26 -0
- uacs/packages/manager.py +413 -0
- uacs/packages/models.py +60 -0
- uacs/packages/sources.py +270 -0
- uacs/protocols/__init__.py +5 -0
- uacs/protocols/mcp/__init__.py +8 -0
- uacs/protocols/mcp/manager.py +77 -0
- uacs/protocols/mcp/skills_server.py +700 -0
- uacs/skills_validator.py +367 -0
- uacs/utils/__init__.py +5 -0
- uacs/utils/paths.py +24 -0
- uacs/visualization/README.md +132 -0
- uacs/visualization/__init__.py +36 -0
- uacs/visualization/models.py +195 -0
- uacs/visualization/static/index.html +857 -0
- uacs/visualization/storage.py +402 -0
- uacs/visualization/visualization.py +328 -0
- uacs/visualization/web_server.py +364 -0
- universal_agent_context-0.2.0.dist-info/METADATA +873 -0
- universal_agent_context-0.2.0.dist-info/RECORD +47 -0
- universal_agent_context-0.2.0.dist-info/WHEEL +4 -0
- universal_agent_context-0.2.0.dist-info/entry_points.txt +2 -0
- universal_agent_context-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
"""Unified context adapter combining agent skills, AGENTS.md, and shared context.
|
|
2
|
+
|
|
3
|
+
This module creates a single unified interface for all agent context sources:
|
|
4
|
+
- SKILL.md: Individual skill files (Agent Skills format, recommended)
|
|
5
|
+
- AGENTS.md: Project-specific instructions
|
|
6
|
+
- Shared Context: Runtime agent communication with compression
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from uacs.adapters.agent_skill_adapter import AgentSkillAdapter
|
|
14
|
+
from uacs.adapters.agents_md_adapter import AgentsMDAdapter
|
|
15
|
+
from uacs.context.shared_context import SharedContextManager
|
|
16
|
+
from uacs.utils.paths import get_project_root
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class UnifiedContextAdapter:
|
|
20
|
+
"""Unified adapter for all agent context sources."""
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
agents_md_path: Path | None = None,
|
|
25
|
+
context_storage: Path | None = None,
|
|
26
|
+
):
|
|
27
|
+
"""Initialize unified adapter.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
agents_md_path: Path to AGENTS.md or project root
|
|
31
|
+
context_storage: Path for shared context storage
|
|
32
|
+
"""
|
|
33
|
+
# Initialize all adapters
|
|
34
|
+
self.agents_md = AgentsMDAdapter(agents_md_path)
|
|
35
|
+
self.shared_context = SharedContextManager(context_storage)
|
|
36
|
+
self.agent_skills = AgentSkillAdapter.discover_skills(get_project_root())
|
|
37
|
+
|
|
38
|
+
def build_agent_prompt(
|
|
39
|
+
self,
|
|
40
|
+
user_query: str,
|
|
41
|
+
agent_name: str,
|
|
42
|
+
include_history: bool = True,
|
|
43
|
+
max_context_tokens: int = 4000,
|
|
44
|
+
) -> str:
|
|
45
|
+
"""Build complete agent prompt with all context sources.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
user_query: User's query
|
|
49
|
+
agent_name: Name of agent receiving prompt
|
|
50
|
+
include_history: Include shared context history
|
|
51
|
+
max_context_tokens: Max tokens for context
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Complete prompt string
|
|
55
|
+
"""
|
|
56
|
+
prompt_parts = []
|
|
57
|
+
|
|
58
|
+
# 1. AGENTS.md project context (if available)
|
|
59
|
+
agents_md_prompt = self.agents_md.to_system_prompt()
|
|
60
|
+
if agents_md_prompt:
|
|
61
|
+
prompt_parts.append("# PROJECT CONTEXT (from AGENTS.md)")
|
|
62
|
+
prompt_parts.append(agents_md_prompt)
|
|
63
|
+
prompt_parts.append("")
|
|
64
|
+
|
|
65
|
+
# 2. Skills capabilities (search Agent Skills format)
|
|
66
|
+
skill_prompt = None
|
|
67
|
+
|
|
68
|
+
# Try Agent Skills format (recommended)
|
|
69
|
+
for adapter in self.agent_skills:
|
|
70
|
+
if adapter.parsed and hasattr(adapter.parsed, "triggers"):
|
|
71
|
+
query_lower = user_query.lower()
|
|
72
|
+
for trigger in adapter.parsed.triggers:
|
|
73
|
+
if trigger.lower() in query_lower or query_lower in trigger.lower():
|
|
74
|
+
skill_prompt = adapter.to_system_prompt()
|
|
75
|
+
break
|
|
76
|
+
if skill_prompt:
|
|
77
|
+
break
|
|
78
|
+
|
|
79
|
+
if skill_prompt:
|
|
80
|
+
prompt_parts.append("# ACTIVE SKILL")
|
|
81
|
+
prompt_parts.append(skill_prompt)
|
|
82
|
+
prompt_parts.append("")
|
|
83
|
+
|
|
84
|
+
# 3. Shared context from other agents (if enabled)
|
|
85
|
+
if include_history:
|
|
86
|
+
# Reserve tokens for history
|
|
87
|
+
reserved_tokens = max_context_tokens // 2
|
|
88
|
+
context_history = self.shared_context.get_compressed_context(
|
|
89
|
+
agent=None, # Include all agents
|
|
90
|
+
max_tokens=reserved_tokens,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
if context_history:
|
|
94
|
+
prompt_parts.append("# SHARED CONTEXT (from other agents)")
|
|
95
|
+
prompt_parts.append(context_history)
|
|
96
|
+
prompt_parts.append("")
|
|
97
|
+
|
|
98
|
+
# 4. User query
|
|
99
|
+
prompt_parts.append("# USER REQUEST")
|
|
100
|
+
prompt_parts.append(user_query)
|
|
101
|
+
|
|
102
|
+
full_prompt = "\n".join(prompt_parts)
|
|
103
|
+
|
|
104
|
+
# Store this interaction in shared context
|
|
105
|
+
self.shared_context.add_entry(
|
|
106
|
+
content=f"Query: {user_query[:200]}...", agent=agent_name, references=[]
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
return full_prompt
|
|
110
|
+
|
|
111
|
+
def export_config(self, output_path: Path) -> None:
|
|
112
|
+
"""Export unified context configuration.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
output_path: Path to save configuration
|
|
116
|
+
"""
|
|
117
|
+
config = {
|
|
118
|
+
"skills_path": ".agent/skills/",
|
|
119
|
+
"agents_md_path": str(self.agents_md.file_path)
|
|
120
|
+
if self.agents_md.file_path
|
|
121
|
+
else None,
|
|
122
|
+
"context_storage": str(self.shared_context.storage_path)
|
|
123
|
+
if self.shared_context.storage_path
|
|
124
|
+
else None,
|
|
125
|
+
"capabilities": self.get_unified_capabilities(),
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
output_path.write_text(json.dumps(config, indent=2))
|
|
129
|
+
|
|
130
|
+
def record_agent_response(
|
|
131
|
+
self, agent_name: str, response: str, references: list | None = None
|
|
132
|
+
) -> str:
|
|
133
|
+
"""Record agent response in shared context.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
agent_name: Name of responding agent
|
|
137
|
+
response: Agent's response
|
|
138
|
+
references: IDs of context entries referenced
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
Entry ID
|
|
142
|
+
"""
|
|
143
|
+
return self.shared_context.add_entry(
|
|
144
|
+
content=response, agent=agent_name, references=references or []
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def get_unified_capabilities(self) -> dict[str, Any]:
|
|
148
|
+
"""Get all capabilities from all sources.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Combined capabilities dictionary
|
|
152
|
+
"""
|
|
153
|
+
skills_capabilities = []
|
|
154
|
+
available_skills = []
|
|
155
|
+
|
|
156
|
+
for adapter in self.agent_skills:
|
|
157
|
+
if adapter.parsed:
|
|
158
|
+
skills_capabilities.append(adapter.to_adk_capabilities())
|
|
159
|
+
available_skills.append(adapter.parsed.name)
|
|
160
|
+
|
|
161
|
+
return {
|
|
162
|
+
"skills": skills_capabilities,
|
|
163
|
+
"project_context": self.agents_md.to_adk_capabilities(),
|
|
164
|
+
"shared_context_stats": self.shared_context.get_stats(),
|
|
165
|
+
"available_skills": available_skills,
|
|
166
|
+
"agents_md_loaded": self.agents_md.config is not None,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
def get_token_stats(self) -> dict[str, Any]:
|
|
170
|
+
"""Get token usage statistics across all sources.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Token statistics
|
|
174
|
+
"""
|
|
175
|
+
context_stats = self.shared_context.get_stats()
|
|
176
|
+
|
|
177
|
+
# Estimate tokens from AGENTS.md
|
|
178
|
+
agents_md_tokens = 0
|
|
179
|
+
if self.agents_md.config:
|
|
180
|
+
agents_md_prompt = self.agents_md.to_system_prompt()
|
|
181
|
+
agents_md_tokens = len(agents_md_prompt) // 4
|
|
182
|
+
|
|
183
|
+
# Estimate tokens from Agent Skills
|
|
184
|
+
skills_tokens = 0
|
|
185
|
+
for adapter in self.agent_skills:
|
|
186
|
+
skill_prompt = adapter.to_system_prompt()
|
|
187
|
+
skills_tokens += len(skill_prompt) // 4
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
"agents_md_tokens": agents_md_tokens,
|
|
191
|
+
"skills_tokens": skills_tokens,
|
|
192
|
+
"shared_context_tokens": context_stats["total_tokens"],
|
|
193
|
+
"tokens_saved_by_compression": context_stats["tokens_saved"],
|
|
194
|
+
"total_potential_tokens": agents_md_tokens
|
|
195
|
+
+ skills_tokens
|
|
196
|
+
+ context_stats["total_tokens"],
|
|
197
|
+
"compression_ratio": context_stats["compression_ratio"],
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
def optimize_context(self):
|
|
201
|
+
"""Trigger context optimization (compression, summarization).
|
|
202
|
+
|
|
203
|
+
This is called automatically but can be triggered manually.
|
|
204
|
+
"""
|
|
205
|
+
# The shared context manager handles this internally
|
|
206
|
+
self.shared_context._auto_compress()
|
|
207
|
+
|
|
208
|
+
def export_unified_config(self, output_path: Path):
|
|
209
|
+
"""Export unified configuration to JSON.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
output_path: Path to save configuration
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
config = {
|
|
216
|
+
"capabilities": self.get_unified_capabilities(),
|
|
217
|
+
"token_stats": self.get_token_stats(),
|
|
218
|
+
"context_graph": self.shared_context.get_context_graph(),
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
output_path.write_text(json.dumps(config, indent=2))
|
|
222
|
+
|
|
223
|
+
def visualize_context(self):
|
|
224
|
+
"""Launch interactive context visualization."""
|
|
225
|
+
|
|
226
|
+
from uacs.visualization import ContextVisualizer
|
|
227
|
+
|
|
228
|
+
def create_snapshot(self, name: str) -> dict[str, Any]:
|
|
229
|
+
"""Create snapshot of current context state.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
name: Snapshot name
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Snapshot data
|
|
236
|
+
"""
|
|
237
|
+
snapshot = {
|
|
238
|
+
"name": name,
|
|
239
|
+
"timestamp": __import__("datetime").datetime.now().isoformat(),
|
|
240
|
+
"capabilities": self.get_unified_capabilities(),
|
|
241
|
+
"token_stats": self.get_token_stats(),
|
|
242
|
+
"context_entries": len(self.shared_context.entries),
|
|
243
|
+
"summaries": len(self.shared_context.summaries),
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
# Save snapshot
|
|
247
|
+
snapshot_path = self.shared_context.storage_path / f"snapshot_{name}.json"
|
|
248
|
+
|
|
249
|
+
snapshot_path.write_text(json.dumps(snapshot, indent=2))
|
|
250
|
+
|
|
251
|
+
return snapshot
|
|
252
|
+
|
|
253
|
+
def get_compression_report(self) -> str:
|
|
254
|
+
"""Get detailed compression report.
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Formatted report string
|
|
258
|
+
"""
|
|
259
|
+
stats = self.shared_context.get_stats()
|
|
260
|
+
token_stats = self.get_token_stats()
|
|
261
|
+
|
|
262
|
+
report = f"""
|
|
263
|
+
# Context Compression Report
|
|
264
|
+
|
|
265
|
+
## Overall Statistics
|
|
266
|
+
- Total Entries: {stats["entry_count"]}
|
|
267
|
+
- Summaries Created: {stats["summary_count"]}
|
|
268
|
+
- Compression Ratio: {stats["compression_ratio"]}
|
|
269
|
+
- Storage Size: {stats["storage_size_mb"]:.2f} MB
|
|
270
|
+
|
|
271
|
+
## Token Savings
|
|
272
|
+
- Original Tokens: {stats["total_tokens"] + stats["tokens_saved"]:,}
|
|
273
|
+
- Current Tokens: {stats["total_tokens"]:,}
|
|
274
|
+
- Saved by Compression: {stats["tokens_saved"]:,}
|
|
275
|
+
- Effective Reduction: {stats["compression_ratio"]}
|
|
276
|
+
|
|
277
|
+
## Source Breakdown
|
|
278
|
+
- AGENTS.md: {token_stats["agents_md_tokens"]:,} tokens
|
|
279
|
+
- Agent Skills: {token_stats["skills_tokens"]:,} tokens (across {len(self.skills.skills)} skills)
|
|
280
|
+
- Shared Context: {token_stats["shared_context_tokens"]:,} tokens
|
|
281
|
+
|
|
282
|
+
## Recommendations
|
|
283
|
+
"""
|
|
284
|
+
|
|
285
|
+
# Add recommendations based on stats
|
|
286
|
+
if stats["entry_count"] > 20:
|
|
287
|
+
report += "- Consider creating more summaries to reduce token usage\n"
|
|
288
|
+
|
|
289
|
+
if stats["compression_ratio"] == "0%":
|
|
290
|
+
report += (
|
|
291
|
+
"- No compression active yet - will auto-compress after 10+ entries\n"
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
if token_stats["shared_context_tokens"] > 4000:
|
|
295
|
+
report += "- Shared context is large - consider reviewing old entries\n"
|
|
296
|
+
|
|
297
|
+
return report
|
|
298
|
+
|
|
299
|
+
def get_capabilities(self, agent_name: str | None = None) -> dict[str, Any]:
|
|
300
|
+
"""Get available capabilities for an agent.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
agent_name: Optional agent name to filter capabilities
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Dictionary of capabilities
|
|
307
|
+
"""
|
|
308
|
+
skill_names = [s.parsed.name for s in self.agent_skills if s.parsed]
|
|
309
|
+
return {
|
|
310
|
+
"skills": skill_names,
|
|
311
|
+
"agents_md_loaded": self.agents_md.exists(),
|
|
312
|
+
"context_entries": len(self.shared_context.entries),
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
def build_context(
|
|
316
|
+
self, query: str, agent_name: str, max_tokens: int | None = None
|
|
317
|
+
) -> str:
|
|
318
|
+
"""Build context for an agent query.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
query: The query or task
|
|
322
|
+
agent_name: Agent name
|
|
323
|
+
max_tokens: Optional token limit
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
Formatted context string
|
|
327
|
+
"""
|
|
328
|
+
return self.build_agent_prompt(
|
|
329
|
+
user_query=query,
|
|
330
|
+
agent_name=agent_name,
|
|
331
|
+
max_context_tokens=max_tokens or 4000,
|
|
332
|
+
)
|
uacs/mcp_server_entry.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Entry point for UACS MCP Server."""
|
|
2
|
+
import argparse
|
|
3
|
+
import asyncio
|
|
4
|
+
import sys
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
async def run_sse(port: int = 3000):
|
|
9
|
+
"""Run the server using SSE transport."""
|
|
10
|
+
import uvicorn
|
|
11
|
+
from starlette.applications import Starlette
|
|
12
|
+
from starlette.responses import JSONResponse, Response
|
|
13
|
+
from starlette.routing import Route, Mount
|
|
14
|
+
from mcp.server.sse import SseServerTransport
|
|
15
|
+
from uacs.protocols.mcp.skills_server import server
|
|
16
|
+
|
|
17
|
+
# Initialize SSE transport
|
|
18
|
+
# Note: '/messages/' must be a relative path for the client to post to.
|
|
19
|
+
sse = SseServerTransport("/messages/")
|
|
20
|
+
|
|
21
|
+
async def handle_sse(request):
|
|
22
|
+
"""Handle SSE connection."""
|
|
23
|
+
async with sse.connect_sse(
|
|
24
|
+
request.scope, request.receive, request._send
|
|
25
|
+
) as streams:
|
|
26
|
+
await server.run(
|
|
27
|
+
streams[0], streams[1], server.create_initialization_options()
|
|
28
|
+
)
|
|
29
|
+
return Response()
|
|
30
|
+
|
|
31
|
+
async def health_check(request):
|
|
32
|
+
"""Health check endpoint."""
|
|
33
|
+
return JSONResponse({"status": "ok"})
|
|
34
|
+
|
|
35
|
+
routes = [
|
|
36
|
+
Route("/sse", endpoint=handle_sse),
|
|
37
|
+
Mount("/messages", app=sse.handle_post_message),
|
|
38
|
+
Route("/health", endpoint=health_check),
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
app = Starlette(routes=routes)
|
|
42
|
+
|
|
43
|
+
# Configure uvicorn
|
|
44
|
+
config = uvicorn.Config(app, host="0.0.0.0", port=port, log_level="info")
|
|
45
|
+
server_instance = uvicorn.Server(config)
|
|
46
|
+
await server_instance.serve()
|
|
47
|
+
|
|
48
|
+
async def run_stdio():
|
|
49
|
+
"""Run the server using stdio transport."""
|
|
50
|
+
from uacs.protocols.mcp.skills_server import server
|
|
51
|
+
from mcp.server.stdio import stdio_server
|
|
52
|
+
|
|
53
|
+
async with stdio_server() as (read_stream, write_stream):
|
|
54
|
+
await server.run(
|
|
55
|
+
read_stream, write_stream, server.create_initialization_options()
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def main():
|
|
59
|
+
parser = argparse.ArgumentParser(description="UACS MCP Server")
|
|
60
|
+
default_transport = os.environ.get("UACS_TRANSPORT", "stdio")
|
|
61
|
+
parser.add_argument("--transport", choices=["stdio", "sse"], default=default_transport, help="Transport mode")
|
|
62
|
+
parser.add_argument("--port", type=int, default=3000, help="Port for SSE server")
|
|
63
|
+
|
|
64
|
+
# Parse args (this will handle --help and exit automatically)
|
|
65
|
+
args = parser.parse_args()
|
|
66
|
+
|
|
67
|
+
# If we get here, args are valid
|
|
68
|
+
try:
|
|
69
|
+
if args.transport == "sse":
|
|
70
|
+
asyncio.run(run_sse(args.port))
|
|
71
|
+
else:
|
|
72
|
+
asyncio.run(run_stdio())
|
|
73
|
+
except KeyboardInterrupt:
|
|
74
|
+
sys.exit(0)
|
|
75
|
+
except Exception as e:
|
|
76
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
77
|
+
sys.exit(1)
|
|
78
|
+
|
|
79
|
+
if __name__ == "__main__":
|
|
80
|
+
main()
|
uacs/memory/__init__.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
"""Simple JSON-based memory store with project/global hierarchy."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import re
|
|
7
|
+
from collections.abc import Mapping
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from datetime import UTC, datetime, timedelta
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
VALID_SCOPES = {"project", "global"}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _utcnow_iso() -> str:
|
|
17
|
+
"""Return current UTC timestamp in ISO format."""
|
|
18
|
+
return datetime.now(UTC).isoformat()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _sanitize_key(key: str) -> str:
|
|
22
|
+
"""Convert arbitrary keys into safe filenames."""
|
|
23
|
+
cleaned = re.sub(r"[^A-Za-z0-9._-]+", "-", key.strip())
|
|
24
|
+
return cleaned or "memory-entry"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class MemoryEntry:
|
|
29
|
+
"""Structured representation of a memory entry."""
|
|
30
|
+
|
|
31
|
+
key: str
|
|
32
|
+
scope: str
|
|
33
|
+
data: dict[str, Any]
|
|
34
|
+
created_at: str
|
|
35
|
+
updated_at: str
|
|
36
|
+
path: Path
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def from_file(cls, path: Path, scope: str) -> MemoryEntry | None:
|
|
40
|
+
"""Load a memory entry from a JSON file."""
|
|
41
|
+
try:
|
|
42
|
+
raw = json.loads(path.read_text())
|
|
43
|
+
except json.JSONDecodeError:
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
data_field = raw.get("data")
|
|
47
|
+
if data_field is None:
|
|
48
|
+
data_field = {k: v for k, v in raw.items() if not k.startswith("_")}
|
|
49
|
+
|
|
50
|
+
return cls(
|
|
51
|
+
key=raw.get("_key", path.stem),
|
|
52
|
+
scope=raw.get("_scope", scope),
|
|
53
|
+
data=data_field if isinstance(data_field, dict) else {},
|
|
54
|
+
created_at=raw.get("_created", raw.get("_timestamp", _utcnow_iso())),
|
|
55
|
+
updated_at=raw.get("_updated", raw.get("_timestamp", _utcnow_iso())),
|
|
56
|
+
path=path,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
def to_dict(self) -> dict[str, Any]:
|
|
60
|
+
"""Serialize entry to dictionary for JSON storage."""
|
|
61
|
+
return {
|
|
62
|
+
"_key": self.key,
|
|
63
|
+
"_scope": self.scope,
|
|
64
|
+
"_created": self.created_at,
|
|
65
|
+
"_updated": self.updated_at,
|
|
66
|
+
"data": self.data,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class SimpleMemoryStore:
|
|
71
|
+
"""Simple JSON-based key-value memory with scoped lookup."""
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
project_path: Path,
|
|
76
|
+
global_path: Path | None = None,
|
|
77
|
+
):
|
|
78
|
+
self.project_root = project_path / ".state" / "memory"
|
|
79
|
+
self.global_root = global_path or Path.home() / ".multi-agent" / "memory"
|
|
80
|
+
|
|
81
|
+
def init_storage(self, scope: str = "project") -> Path:
|
|
82
|
+
"""Initialize storage directories and config for a scope."""
|
|
83
|
+
resolved_scope = self._validate_scope(scope)
|
|
84
|
+
scope_dir = self._scope_dir(resolved_scope)
|
|
85
|
+
scope_dir.mkdir(parents=True, exist_ok=True)
|
|
86
|
+
|
|
87
|
+
config_path = scope_dir.parent / "config.json"
|
|
88
|
+
if not config_path.exists():
|
|
89
|
+
payload = {
|
|
90
|
+
"storage": "simple",
|
|
91
|
+
"version": "1.0",
|
|
92
|
+
"initialized_at": _utcnow_iso(),
|
|
93
|
+
"scope": resolved_scope,
|
|
94
|
+
}
|
|
95
|
+
config_path.write_text(json.dumps(payload, indent=2))
|
|
96
|
+
|
|
97
|
+
return scope_dir
|
|
98
|
+
|
|
99
|
+
def store(
|
|
100
|
+
self,
|
|
101
|
+
key: str,
|
|
102
|
+
value: Mapping[str, Any],
|
|
103
|
+
scope: str = "project",
|
|
104
|
+
) -> MemoryEntry:
|
|
105
|
+
"""Store a memory entry and return the created entry."""
|
|
106
|
+
resolved_scope = self._validate_scope(scope)
|
|
107
|
+
sanitized_key = _sanitize_key(key)
|
|
108
|
+
scope_dir = self.init_storage(resolved_scope)
|
|
109
|
+
|
|
110
|
+
file_path = scope_dir / f"{sanitized_key}.json"
|
|
111
|
+
now = _utcnow_iso()
|
|
112
|
+
created_at = now
|
|
113
|
+
|
|
114
|
+
if not isinstance(value, Mapping):
|
|
115
|
+
raise ValueError("Memory value must be a mapping")
|
|
116
|
+
|
|
117
|
+
existing_entry = self._load_entry(file_path, resolved_scope)
|
|
118
|
+
if existing_entry:
|
|
119
|
+
created_at = existing_entry.created_at
|
|
120
|
+
|
|
121
|
+
entry = MemoryEntry(
|
|
122
|
+
key=key,
|
|
123
|
+
scope=resolved_scope,
|
|
124
|
+
data=dict(value),
|
|
125
|
+
created_at=created_at,
|
|
126
|
+
updated_at=now,
|
|
127
|
+
path=file_path,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
file_path.write_text(json.dumps(entry.to_dict(), indent=2))
|
|
131
|
+
return entry
|
|
132
|
+
|
|
133
|
+
def retrieve(self, key: str, scope: str = "both") -> MemoryEntry | None:
|
|
134
|
+
"""Retrieve entry by key honoring project→global lookup."""
|
|
135
|
+
scopes = (
|
|
136
|
+
("project", "global") if scope == "both" else (self._validate_scope(scope),)
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
for current_scope in scopes:
|
|
140
|
+
file_path = self._scope_dir(current_scope) / f"{_sanitize_key(key)}.json"
|
|
141
|
+
entry = self._load_entry(file_path, current_scope)
|
|
142
|
+
if entry:
|
|
143
|
+
return entry
|
|
144
|
+
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
def delete(self, key: str, scope: str = "project") -> bool:
|
|
148
|
+
"""Delete entry for a specific scope."""
|
|
149
|
+
resolved_scope = self._validate_scope(scope)
|
|
150
|
+
file_path = self._scope_dir(resolved_scope) / f"{_sanitize_key(key)}.json"
|
|
151
|
+
if file_path.exists():
|
|
152
|
+
file_path.unlink()
|
|
153
|
+
return True
|
|
154
|
+
return False
|
|
155
|
+
|
|
156
|
+
def list_entries(self, scope: str = "both") -> list[MemoryEntry]:
|
|
157
|
+
"""List all entries for the provided scope."""
|
|
158
|
+
entries: list[MemoryEntry] = []
|
|
159
|
+
scopes = (
|
|
160
|
+
("project", "global") if scope == "both" else (self._validate_scope(scope),)
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
for current_scope in scopes:
|
|
164
|
+
scope_dir = self._scope_dir(current_scope)
|
|
165
|
+
if not scope_dir.exists():
|
|
166
|
+
continue
|
|
167
|
+
for file_path in scope_dir.glob("*.json"):
|
|
168
|
+
entry = self._load_entry(file_path, current_scope)
|
|
169
|
+
if entry:
|
|
170
|
+
entries.append(entry)
|
|
171
|
+
return entries
|
|
172
|
+
|
|
173
|
+
def search(self, query: str, scope: str = "both") -> list[MemoryEntry]:
|
|
174
|
+
"""Search entries by substring match."""
|
|
175
|
+
needle = query.lower()
|
|
176
|
+
results = []
|
|
177
|
+
for entry in self.list_entries(scope):
|
|
178
|
+
haystack = json.dumps(entry.data).lower()
|
|
179
|
+
if needle in entry.key.lower() or needle in haystack:
|
|
180
|
+
results.append(entry)
|
|
181
|
+
return results
|
|
182
|
+
|
|
183
|
+
def clean(self, older_than_days: int = 30, scope: str = "project") -> int:
|
|
184
|
+
"""Remove entries older than provided days. Returns deleted count."""
|
|
185
|
+
resolved_scope = self._validate_scope(scope)
|
|
186
|
+
cutoff = datetime.now(UTC) - timedelta(days=older_than_days)
|
|
187
|
+
deleted = 0
|
|
188
|
+
|
|
189
|
+
for entry in self.list_entries(resolved_scope):
|
|
190
|
+
try:
|
|
191
|
+
created_time = datetime.fromisoformat(entry.created_at)
|
|
192
|
+
except ValueError:
|
|
193
|
+
created_time = datetime.utcfromtimestamp(entry.path.stat().st_mtime)
|
|
194
|
+
|
|
195
|
+
if created_time.tzinfo is None:
|
|
196
|
+
created_time = created_time.replace(tzinfo=UTC)
|
|
197
|
+
|
|
198
|
+
if created_time < cutoff and entry.path.exists():
|
|
199
|
+
entry.path.unlink()
|
|
200
|
+
deleted += 1
|
|
201
|
+
|
|
202
|
+
return deleted
|
|
203
|
+
|
|
204
|
+
def get_stats(self) -> dict[str, dict[str, Any]]:
|
|
205
|
+
"""Return statistics for project and global scopes."""
|
|
206
|
+
stats = {}
|
|
207
|
+
for scope in ("project", "global"):
|
|
208
|
+
scope_dir = self._scope_dir(scope)
|
|
209
|
+
entry_files = list(scope_dir.glob("*.json")) if scope_dir.exists() else []
|
|
210
|
+
size_bytes = sum(f.stat().st_size for f in entry_files)
|
|
211
|
+
|
|
212
|
+
latest_updated = None
|
|
213
|
+
entry_count = 0
|
|
214
|
+
for entry in (self._load_entry(f, scope) for f in entry_files):
|
|
215
|
+
if not entry:
|
|
216
|
+
continue
|
|
217
|
+
entry_count += 1
|
|
218
|
+
try:
|
|
219
|
+
updated_time = datetime.fromisoformat(entry.updated_at)
|
|
220
|
+
except ValueError:
|
|
221
|
+
updated_time = datetime.utcfromtimestamp(entry.path.stat().st_mtime)
|
|
222
|
+
if updated_time.tzinfo is None:
|
|
223
|
+
updated_time = updated_time.replace(tzinfo=UTC)
|
|
224
|
+
if latest_updated is None or updated_time > latest_updated:
|
|
225
|
+
latest_updated = updated_time
|
|
226
|
+
|
|
227
|
+
stats[scope] = {
|
|
228
|
+
"entries": entry_count,
|
|
229
|
+
"size_bytes": size_bytes,
|
|
230
|
+
"last_updated": latest_updated.isoformat() if latest_updated else None,
|
|
231
|
+
"path": str(scope_dir),
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
return stats
|
|
235
|
+
|
|
236
|
+
def _scope_dir(self, scope: str) -> Path:
|
|
237
|
+
"""Return the directory for a scope (always within knowledge/)."""
|
|
238
|
+
base = self.project_root if scope == "project" else self.global_root
|
|
239
|
+
return base / "knowledge"
|
|
240
|
+
|
|
241
|
+
def _load_entry(self, file_path: Path, scope: str) -> MemoryEntry | None:
|
|
242
|
+
"""Load entry from file; skip invalid JSON."""
|
|
243
|
+
if not file_path.exists():
|
|
244
|
+
return None
|
|
245
|
+
return MemoryEntry.from_file(file_path, scope)
|
|
246
|
+
|
|
247
|
+
def _validate_scope(self, scope: str) -> str:
|
|
248
|
+
"""Ensure scope is recognized."""
|
|
249
|
+
if scope not in VALID_SCOPES:
|
|
250
|
+
allowed = ", ".join(sorted(VALID_SCOPES))
|
|
251
|
+
raise ValueError(f"Invalid scope: {scope}. Use one of: {allowed}")
|
|
252
|
+
return scope
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
__all__ = ["MemoryEntry", "SimpleMemoryStore"]
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Minimal local package manager for UACS.
|
|
2
|
+
|
|
3
|
+
Provides simple package management (install, list, validate, remove) without
|
|
4
|
+
remote discovery features. Inspired by GitHub CLI extensions pattern.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from uacs.packages.manager import PackageManager
|
|
8
|
+
from uacs.packages.models import InstalledPackage, PackageSource
|
|
9
|
+
from uacs.packages.sources import (
|
|
10
|
+
GitCloneError,
|
|
11
|
+
InvalidSourceError,
|
|
12
|
+
LocalCopyError,
|
|
13
|
+
PackageSourceError,
|
|
14
|
+
PackageSourceHandler,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"GitCloneError",
|
|
19
|
+
"InstalledPackage",
|
|
20
|
+
"InvalidSourceError",
|
|
21
|
+
"LocalCopyError",
|
|
22
|
+
"PackageManager",
|
|
23
|
+
"PackageSource",
|
|
24
|
+
"PackageSourceError",
|
|
25
|
+
"PackageSourceHandler",
|
|
26
|
+
]
|