universal-agent-context 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uacs/__init__.py +12 -0
- uacs/adapters/__init__.py +19 -0
- uacs/adapters/agent_skill_adapter.py +202 -0
- uacs/adapters/agents_md_adapter.py +330 -0
- uacs/adapters/base.py +261 -0
- uacs/adapters/clinerules_adapter.py +39 -0
- uacs/adapters/cursorrules_adapter.py +39 -0
- uacs/api.py +262 -0
- uacs/cli/__init__.py +6 -0
- uacs/cli/context.py +349 -0
- uacs/cli/main.py +195 -0
- uacs/cli/mcp.py +115 -0
- uacs/cli/memory.py +142 -0
- uacs/cli/packages.py +309 -0
- uacs/cli/skills.py +144 -0
- uacs/cli/utils.py +24 -0
- uacs/config/repositories.yaml +26 -0
- uacs/context/__init__.py +0 -0
- uacs/context/agent_context.py +406 -0
- uacs/context/shared_context.py +661 -0
- uacs/context/unified_context.py +332 -0
- uacs/mcp_server_entry.py +80 -0
- uacs/memory/__init__.py +5 -0
- uacs/memory/simple_memory.py +255 -0
- uacs/packages/__init__.py +26 -0
- uacs/packages/manager.py +413 -0
- uacs/packages/models.py +60 -0
- uacs/packages/sources.py +270 -0
- uacs/protocols/__init__.py +5 -0
- uacs/protocols/mcp/__init__.py +8 -0
- uacs/protocols/mcp/manager.py +77 -0
- uacs/protocols/mcp/skills_server.py +700 -0
- uacs/skills_validator.py +367 -0
- uacs/utils/__init__.py +5 -0
- uacs/utils/paths.py +24 -0
- uacs/visualization/README.md +132 -0
- uacs/visualization/__init__.py +36 -0
- uacs/visualization/models.py +195 -0
- uacs/visualization/static/index.html +857 -0
- uacs/visualization/storage.py +402 -0
- uacs/visualization/visualization.py +328 -0
- uacs/visualization/web_server.py +364 -0
- universal_agent_context-0.2.0.dist-info/METADATA +873 -0
- universal_agent_context-0.2.0.dist-info/RECORD +47 -0
- universal_agent_context-0.2.0.dist-info/WHEEL +4 -0
- universal_agent_context-0.2.0.dist-info/entry_points.txt +2 -0
- universal_agent_context-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
"""Agent-specific context adaptation for chat orchestration.
|
|
2
|
+
|
|
3
|
+
This module provides agent-specific context profiles with customized token allocation
|
|
4
|
+
and priority-based content selection.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from uacs.adapters.agent_skill_adapter import AgentSkillAdapter
|
|
11
|
+
from uacs.adapters.agents_md_adapter import AgentsMDAdapter
|
|
12
|
+
from uacs.context.shared_context import SharedContextManager
|
|
13
|
+
from uacs.utils.paths import get_project_root
|
|
14
|
+
|
|
15
|
+
# Optional MAOS integration
|
|
16
|
+
try:
|
|
17
|
+
from multi_agent_cli.maos.conversation import ConversationManager
|
|
18
|
+
|
|
19
|
+
MAOS_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
MAOS_AVAILABLE = False
|
|
22
|
+
ConversationManager = None # type: ignore[assignment,misc]
|
|
23
|
+
|
|
24
|
+
# Default context profiles for each agent
|
|
25
|
+
AGENT_CONTEXT_PROFILES = {
|
|
26
|
+
"claude": {
|
|
27
|
+
"priority": ["reasoning", "analysis", "architecture"],
|
|
28
|
+
"token_allocation": {
|
|
29
|
+
"agents_md": 0.30, # 30% - High project context
|
|
30
|
+
"skills": 0.20, # 20% - Relevant skills
|
|
31
|
+
"conversation": 0.30, # 30% - Full conversation history
|
|
32
|
+
"shared_context": 0.20, # 20% - Shared memory
|
|
33
|
+
},
|
|
34
|
+
"history_depth": 10, # More history for analysis
|
|
35
|
+
"compress_threshold": 0.5, # Less aggressive compression
|
|
36
|
+
},
|
|
37
|
+
"gemini": {
|
|
38
|
+
"priority": ["code", "implementation", "examples"],
|
|
39
|
+
"token_allocation": {
|
|
40
|
+
"agents_md": 0.15, # 15% - Less project context
|
|
41
|
+
"skills": 0.35, # 35% - More skills/capabilities
|
|
42
|
+
"conversation": 0.25, # 25% - Recent conversation
|
|
43
|
+
"shared_context": 0.25, # 25% - Shared memory
|
|
44
|
+
},
|
|
45
|
+
"history_depth": 5, # Recent context only
|
|
46
|
+
"compress_threshold": 0.7, # More aggressive compression
|
|
47
|
+
},
|
|
48
|
+
"copilot": {
|
|
49
|
+
"priority": ["code", "shell", "quick-answers"],
|
|
50
|
+
"token_allocation": {
|
|
51
|
+
"agents_md": 0.10, # 10% - Minimal project context
|
|
52
|
+
"skills": 0.40, # 40% - Heavy skills focus
|
|
53
|
+
"conversation": 0.30, # 30% - Recent turns
|
|
54
|
+
"shared_context": 0.20, # 20% - Shared memory
|
|
55
|
+
},
|
|
56
|
+
"history_depth": 3, # Very recent only
|
|
57
|
+
"compress_threshold": 0.8, # Aggressive compression
|
|
58
|
+
},
|
|
59
|
+
"openai": {
|
|
60
|
+
"priority": ["general", "creative", "structured"],
|
|
61
|
+
"token_allocation": {
|
|
62
|
+
"agents_md": 0.25, # 25% - Balanced
|
|
63
|
+
"skills": 0.25, # 25% - Balanced
|
|
64
|
+
"conversation": 0.25, # 25% - Balanced
|
|
65
|
+
"shared_context": 0.25, # 25% - Balanced
|
|
66
|
+
},
|
|
67
|
+
"history_depth": 7, # Moderate history
|
|
68
|
+
"compress_threshold": 0.6, # Moderate compression
|
|
69
|
+
},
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# Pre-defined context strategies
|
|
73
|
+
CONTEXT_STRATEGIES = {
|
|
74
|
+
"code_heavy": {
|
|
75
|
+
"token_allocation": {
|
|
76
|
+
"agents_md": 0.10,
|
|
77
|
+
"skills": 0.50,
|
|
78
|
+
"conversation": 0.25,
|
|
79
|
+
"shared_context": 0.15,
|
|
80
|
+
},
|
|
81
|
+
"priority": ["code", "implementation", "examples"],
|
|
82
|
+
},
|
|
83
|
+
"analysis_heavy": {
|
|
84
|
+
"token_allocation": {
|
|
85
|
+
"agents_md": 0.35,
|
|
86
|
+
"skills": 0.15,
|
|
87
|
+
"conversation": 0.35,
|
|
88
|
+
"shared_context": 0.15,
|
|
89
|
+
},
|
|
90
|
+
"priority": ["reasoning", "analysis", "architecture"],
|
|
91
|
+
},
|
|
92
|
+
"balanced": {
|
|
93
|
+
"token_allocation": {
|
|
94
|
+
"agents_md": 0.25,
|
|
95
|
+
"skills": 0.25,
|
|
96
|
+
"conversation": 0.25,
|
|
97
|
+
"shared_context": 0.25,
|
|
98
|
+
},
|
|
99
|
+
"priority": ["general", "balanced"],
|
|
100
|
+
},
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class AgentContextAdapter:
|
|
105
|
+
"""Builds agent-specific context based on profiles."""
|
|
106
|
+
|
|
107
|
+
def __init__(
|
|
108
|
+
self,
|
|
109
|
+
skills_path: Path | None = None,
|
|
110
|
+
agents_md_path: Path | None = None,
|
|
111
|
+
context_storage: Path | None = None,
|
|
112
|
+
):
|
|
113
|
+
"""Initialize agent context adapter.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
skills_path: Path to agent skills directory (ignored, using project root for discovery)
|
|
117
|
+
agents_md_path: Path to AGENTS.md or project root
|
|
118
|
+
context_storage: Path for shared context storage
|
|
119
|
+
"""
|
|
120
|
+
self.project_root = get_project_root()
|
|
121
|
+
self.agents_md = AgentsMDAdapter(agents_md_path)
|
|
122
|
+
self.shared_context = SharedContextManager(context_storage)
|
|
123
|
+
self.custom_profiles: dict[str, dict[str, Any]] = {}
|
|
124
|
+
|
|
125
|
+
def get_profile(self, agent_name: str) -> dict[str, Any]:
|
|
126
|
+
"""Get context profile for an agent.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
agent_name: Name of the agent
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Profile dictionary with token allocation and priorities
|
|
133
|
+
"""
|
|
134
|
+
# Check for custom profile first
|
|
135
|
+
if agent_name in self.custom_profiles:
|
|
136
|
+
return self.custom_profiles[agent_name]
|
|
137
|
+
|
|
138
|
+
# Fall back to default profile
|
|
139
|
+
return AGENT_CONTEXT_PROFILES.get(agent_name, AGENT_CONTEXT_PROFILES["openai"])
|
|
140
|
+
|
|
141
|
+
def set_custom_profile(self, agent_name: str, profile: dict[str, Any]):
|
|
142
|
+
"""Set a custom profile for an agent.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
agent_name: Name of the agent
|
|
146
|
+
profile: Custom profile dictionary
|
|
147
|
+
"""
|
|
148
|
+
self.custom_profiles[agent_name] = profile
|
|
149
|
+
|
|
150
|
+
def apply_strategy(self, agent_name: str, strategy_name: str):
|
|
151
|
+
"""Apply a pre-defined context strategy to an agent.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
agent_name: Name of the agent
|
|
155
|
+
strategy_name: Name of the strategy
|
|
156
|
+
"""
|
|
157
|
+
if strategy_name not in CONTEXT_STRATEGIES:
|
|
158
|
+
raise ValueError(f"Unknown strategy: {strategy_name}")
|
|
159
|
+
|
|
160
|
+
strategy = CONTEXT_STRATEGIES[strategy_name]
|
|
161
|
+
base_profile = self.get_profile(agent_name).copy()
|
|
162
|
+
|
|
163
|
+
# Update profile with strategy
|
|
164
|
+
base_profile.update(strategy)
|
|
165
|
+
self.custom_profiles[agent_name] = base_profile
|
|
166
|
+
|
|
167
|
+
def build_context(
|
|
168
|
+
self,
|
|
169
|
+
agent_name: str,
|
|
170
|
+
user_query: str,
|
|
171
|
+
conversation: ConversationManager,
|
|
172
|
+
max_tokens: int = 4000,
|
|
173
|
+
adjustments: dict[str, Any] | None = None,
|
|
174
|
+
) -> str:
|
|
175
|
+
"""Build agent-specific context.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
agent_name: Name of agent receiving context
|
|
179
|
+
user_query: User's current query
|
|
180
|
+
conversation: Conversation manager
|
|
181
|
+
max_tokens: Maximum tokens for context
|
|
182
|
+
adjustments: Optional runtime adjustments
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Formatted context string
|
|
186
|
+
"""
|
|
187
|
+
profile = self.get_profile(agent_name).copy()
|
|
188
|
+
|
|
189
|
+
# Apply conversation-specific adjustments
|
|
190
|
+
conv_adjustments = conversation.get_agent_context_adjustments(agent_name)
|
|
191
|
+
if conv_adjustments:
|
|
192
|
+
self._apply_adjustments(profile, conv_adjustments)
|
|
193
|
+
|
|
194
|
+
# Apply runtime adjustments
|
|
195
|
+
if adjustments:
|
|
196
|
+
self._apply_adjustments(profile, adjustments)
|
|
197
|
+
|
|
198
|
+
# Allocate tokens based on profile
|
|
199
|
+
tokens = self._allocate_tokens(profile, max_tokens)
|
|
200
|
+
|
|
201
|
+
# Build layered context
|
|
202
|
+
context_parts = []
|
|
203
|
+
|
|
204
|
+
# Layer 1: AGENTS.md project context
|
|
205
|
+
if tokens["agents_md"] > 0:
|
|
206
|
+
agents_context = self._get_project_context(max_tokens=tokens["agents_md"])
|
|
207
|
+
if agents_context:
|
|
208
|
+
context_parts.append(("PROJECT CONTEXT", agents_context))
|
|
209
|
+
|
|
210
|
+
# Layer 2: Skills (prioritize by agent profile)
|
|
211
|
+
if tokens["skills"] > 0:
|
|
212
|
+
skills_context = self._get_skills_context(
|
|
213
|
+
query=user_query,
|
|
214
|
+
priorities=profile.get("priority", []),
|
|
215
|
+
max_tokens=tokens["skills"],
|
|
216
|
+
)
|
|
217
|
+
if skills_context:
|
|
218
|
+
context_parts.append(("CAPABILITIES", skills_context))
|
|
219
|
+
|
|
220
|
+
# Layer 3: Conversation history (agent-specific depth)
|
|
221
|
+
if tokens["conversation"] > 0:
|
|
222
|
+
conv_context = conversation.get_context_for_agent(
|
|
223
|
+
agent_name=agent_name,
|
|
224
|
+
max_turns=profile.get("history_depth", 5),
|
|
225
|
+
max_tokens=tokens["conversation"],
|
|
226
|
+
compress_threshold=profile.get("compress_threshold", 0.6),
|
|
227
|
+
)
|
|
228
|
+
if conv_context:
|
|
229
|
+
context_parts.append(("CONVERSATION HISTORY", conv_context))
|
|
230
|
+
|
|
231
|
+
# Layer 4: Shared context (filtered by relevance)
|
|
232
|
+
if tokens["shared_context"] > 0:
|
|
233
|
+
shared = self.shared_context.get_compressed_context(
|
|
234
|
+
agent=agent_name, max_tokens=tokens["shared_context"]
|
|
235
|
+
)
|
|
236
|
+
if shared:
|
|
237
|
+
context_parts.append(("SHARED CONTEXT", shared))
|
|
238
|
+
|
|
239
|
+
return self._format_context(context_parts)
|
|
240
|
+
|
|
241
|
+
def _allocate_tokens(
|
|
242
|
+
self, profile: dict[str, Any], max_tokens: int
|
|
243
|
+
) -> dict[str, int]:
|
|
244
|
+
"""Allocate tokens based on profile.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
profile: Agent profile
|
|
248
|
+
max_tokens: Maximum tokens available
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Dictionary with token allocation per layer
|
|
252
|
+
"""
|
|
253
|
+
allocation = profile.get(
|
|
254
|
+
"token_allocation",
|
|
255
|
+
{
|
|
256
|
+
"agents_md": 0.25,
|
|
257
|
+
"skills": 0.25,
|
|
258
|
+
"conversation": 0.25,
|
|
259
|
+
"shared_context": 0.25,
|
|
260
|
+
},
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
return {
|
|
264
|
+
"agents_md": int(max_tokens * allocation.get("agents_md", 0.25)),
|
|
265
|
+
"skills": int(max_tokens * allocation.get("skills", 0.25)),
|
|
266
|
+
"conversation": int(max_tokens * allocation.get("conversation", 0.25)),
|
|
267
|
+
"shared_context": int(max_tokens * allocation.get("shared_context", 0.25)),
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
def _get_project_context(self, max_tokens: int) -> str:
|
|
271
|
+
"""Get project context from AGENTS.md.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
max_tokens: Maximum tokens for project context
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
Formatted project context
|
|
278
|
+
"""
|
|
279
|
+
if not self.agents_md.config:
|
|
280
|
+
return ""
|
|
281
|
+
|
|
282
|
+
prompt = self.agents_md.to_system_prompt()
|
|
283
|
+
|
|
284
|
+
# Truncate if over token limit
|
|
285
|
+
max_chars = max_tokens * 4 # Rough: 4 chars per token
|
|
286
|
+
if len(prompt) > max_chars:
|
|
287
|
+
prompt = prompt[:max_chars] + "..."
|
|
288
|
+
|
|
289
|
+
return prompt
|
|
290
|
+
|
|
291
|
+
def _get_skills_context(
|
|
292
|
+
self, query: str, priorities: list[str], max_tokens: int
|
|
293
|
+
) -> str:
|
|
294
|
+
"""Get skills context prioritized by agent needs.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
query: User query
|
|
298
|
+
priorities: Priority keywords for skill selection
|
|
299
|
+
max_tokens: Maximum tokens for skills
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
Formatted skills context
|
|
303
|
+
"""
|
|
304
|
+
# Discover skills
|
|
305
|
+
skill_adapters = AgentSkillAdapter.discover_skills(self.project_root)
|
|
306
|
+
|
|
307
|
+
matching_adapter = None
|
|
308
|
+
|
|
309
|
+
# Find matching skill by trigger
|
|
310
|
+
query_lower = query.lower()
|
|
311
|
+
for adapter in skill_adapters:
|
|
312
|
+
if adapter.parsed and hasattr(adapter.parsed, "triggers"):
|
|
313
|
+
for trigger in adapter.parsed.triggers:
|
|
314
|
+
if trigger.lower() in query_lower or query_lower in trigger.lower():
|
|
315
|
+
matching_adapter = adapter
|
|
316
|
+
break
|
|
317
|
+
if matching_adapter:
|
|
318
|
+
break
|
|
319
|
+
|
|
320
|
+
if not matching_adapter:
|
|
321
|
+
# Try to find skill matching priorities
|
|
322
|
+
for adapter in skill_adapters:
|
|
323
|
+
if adapter.parsed:
|
|
324
|
+
description = adapter.parsed.description.lower()
|
|
325
|
+
if any(p.lower() in description for p in priorities):
|
|
326
|
+
matching_adapter = adapter
|
|
327
|
+
break
|
|
328
|
+
|
|
329
|
+
if not matching_adapter:
|
|
330
|
+
return ""
|
|
331
|
+
|
|
332
|
+
skill_prompt = matching_adapter.to_system_prompt()
|
|
333
|
+
|
|
334
|
+
# Truncate if over token limit
|
|
335
|
+
max_chars = max_tokens * 4
|
|
336
|
+
if len(skill_prompt) > max_chars:
|
|
337
|
+
skill_prompt = skill_prompt[:max_chars] + "..."
|
|
338
|
+
|
|
339
|
+
return skill_prompt
|
|
340
|
+
|
|
341
|
+
def _format_context(self, context_parts: list[tuple[str, str]]) -> str:
|
|
342
|
+
"""Format context parts into final context string.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
context_parts: List of (section_name, content) tuples
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
Formatted context string
|
|
349
|
+
"""
|
|
350
|
+
if not context_parts:
|
|
351
|
+
return ""
|
|
352
|
+
|
|
353
|
+
sections = []
|
|
354
|
+
for section_name, content in context_parts:
|
|
355
|
+
sections.append(f"# {section_name}")
|
|
356
|
+
sections.append(content)
|
|
357
|
+
sections.append("") # Empty line between sections
|
|
358
|
+
|
|
359
|
+
return "\n".join(sections)
|
|
360
|
+
|
|
361
|
+
def _apply_adjustments(self, profile: dict[str, Any], adjustments: dict[str, Any]):
|
|
362
|
+
"""Apply adjustments to profile in-place.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
profile: Profile to adjust
|
|
366
|
+
adjustments: Adjustments to apply
|
|
367
|
+
"""
|
|
368
|
+
# Handle token allocation adjustments
|
|
369
|
+
if "token_allocation" in adjustments:
|
|
370
|
+
if "token_allocation" not in profile:
|
|
371
|
+
profile["token_allocation"] = {}
|
|
372
|
+
profile["token_allocation"].update(adjustments["token_allocation"])
|
|
373
|
+
|
|
374
|
+
# Handle other direct updates
|
|
375
|
+
for key, value in adjustments.items():
|
|
376
|
+
if key != "token_allocation":
|
|
377
|
+
profile[key] = value
|
|
378
|
+
|
|
379
|
+
def get_profile_summary(self, agent_name: str) -> dict[str, Any]:
|
|
380
|
+
"""Get summary of agent's context profile.
|
|
381
|
+
|
|
382
|
+
Args:
|
|
383
|
+
agent_name: Name of agent
|
|
384
|
+
|
|
385
|
+
Returns:
|
|
386
|
+
Profile summary dictionary
|
|
387
|
+
"""
|
|
388
|
+
profile = self.get_profile(agent_name)
|
|
389
|
+
|
|
390
|
+
return {
|
|
391
|
+
"agent": agent_name,
|
|
392
|
+
"token_allocation": profile.get("token_allocation", {}),
|
|
393
|
+
"history_depth": profile.get("history_depth", 5),
|
|
394
|
+
"compress_threshold": profile.get("compress_threshold", 0.6),
|
|
395
|
+
"priorities": profile.get("priority", []),
|
|
396
|
+
"is_custom": agent_name in self.custom_profiles,
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
def reset_profile(self, agent_name: str):
|
|
400
|
+
"""Reset agent to default profile.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
agent_name: Name of agent
|
|
404
|
+
"""
|
|
405
|
+
if agent_name in self.custom_profiles:
|
|
406
|
+
del self.custom_profiles[agent_name]
|