adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TML Enhanced Agent - Agent with Skill capabilities
|
|
3
|
+
|
|
4
|
+
Integrates SkillManager to provide agents with domain-specific expertise
|
|
5
|
+
following Anthropic's Agent Skills specification.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict, List, Optional, Any
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import json
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
|
|
13
|
+
from .skill_manager import SkillManager, Skill
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TMLEnhancedAgent:
|
|
17
|
+
"""
|
|
18
|
+
Agent enhanced with Skill capabilities.
|
|
19
|
+
|
|
20
|
+
Uses progressive disclosure to load relevant skills only when needed,
|
|
21
|
+
following Anthropic's specification for Agent Skills.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
agent_id: str,
|
|
27
|
+
provider: str,
|
|
28
|
+
model: str,
|
|
29
|
+
skills_dir: str = "tmlpd-skills",
|
|
30
|
+
assigned_skills: Optional[List[str]] = None
|
|
31
|
+
):
|
|
32
|
+
"""
|
|
33
|
+
Initialize TML Enhanced Agent
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
agent_id: Unique identifier for this agent
|
|
37
|
+
provider: LLM provider (e.g., 'anthropic', 'openai')
|
|
38
|
+
model: Model name (e.g., 'claude-sonnet-4', 'gpt-4-turbo')
|
|
39
|
+
skills_dir: Directory containing skill definitions
|
|
40
|
+
assigned_skills: List of skill names assigned to this agent
|
|
41
|
+
"""
|
|
42
|
+
self.agent_id = agent_id
|
|
43
|
+
self.provider = provider
|
|
44
|
+
self.model = model
|
|
45
|
+
self.assigned_skills = assigned_skills or []
|
|
46
|
+
|
|
47
|
+
# Initialize skill manager
|
|
48
|
+
self.skill_manager = SkillManager(skills_dir)
|
|
49
|
+
|
|
50
|
+
# Load metadata for assigned skills
|
|
51
|
+
for skill_name in self.assigned_skills:
|
|
52
|
+
if skill_name in self.skill_manager.skills:
|
|
53
|
+
# Metadata already loaded by SkillManager
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
def execute_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
57
|
+
"""
|
|
58
|
+
Execute a task using relevant skills for context.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
task: Task dictionary with 'description' and other metadata
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Result dictionary with task output
|
|
65
|
+
"""
|
|
66
|
+
task_description = task.get("description", "")
|
|
67
|
+
|
|
68
|
+
# Step 1: Find relevant skills
|
|
69
|
+
relevant_skills = self._get_relevant_skills(task_description)
|
|
70
|
+
|
|
71
|
+
# Step 2: Build enhanced prompt with skill context
|
|
72
|
+
enhanced_prompt = self._build_prompt_with_skills(
|
|
73
|
+
task_description,
|
|
74
|
+
relevant_skills,
|
|
75
|
+
task
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Step 3: Execute LLM call with enhanced context
|
|
79
|
+
result = self._execute_llm_call(enhanced_prompt)
|
|
80
|
+
|
|
81
|
+
# Step 4: Store successful pattern in memory (if enabled)
|
|
82
|
+
if result.get("success"):
|
|
83
|
+
self._remember_success_pattern(task, result)
|
|
84
|
+
|
|
85
|
+
return result
|
|
86
|
+
|
|
87
|
+
def _get_relevant_skills(
|
|
88
|
+
self,
|
|
89
|
+
task_description: str,
|
|
90
|
+
top_k: int = 3
|
|
91
|
+
) -> List[Skill]:
|
|
92
|
+
"""
|
|
93
|
+
Get relevant skills for this task.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
task_description: Task to find skills for
|
|
97
|
+
top_k: Maximum number of skills to load
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
List of loaded Skill objects
|
|
101
|
+
"""
|
|
102
|
+
# If agent has assigned skills, only consider those
|
|
103
|
+
if self.assigned_skills:
|
|
104
|
+
skill_names = self.assigned_skills[:top_k]
|
|
105
|
+
else:
|
|
106
|
+
# Otherwise, use skill discovery
|
|
107
|
+
skill_names = self.skill_manager.get_relevant_skills(
|
|
108
|
+
task_description,
|
|
109
|
+
top_k=top_k
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Load full skill content (Level 2: SKILL.md)
|
|
113
|
+
loaded_skills = []
|
|
114
|
+
|
|
115
|
+
for skill_name in skill_names:
|
|
116
|
+
try:
|
|
117
|
+
skill = self.skill_manager.load_skill(skill_name)
|
|
118
|
+
loaded_skills.append(skill)
|
|
119
|
+
except Exception as e:
|
|
120
|
+
print(f"Warning: Failed to load skill '{skill_name}': {e}")
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
return loaded_skills
|
|
124
|
+
|
|
125
|
+
def _build_prompt_with_skills(
|
|
126
|
+
self,
|
|
127
|
+
task_description: str,
|
|
128
|
+
skills: List[Skill],
|
|
129
|
+
task: Dict[str, Any]
|
|
130
|
+
) -> str:
|
|
131
|
+
"""
|
|
132
|
+
Build enhanced prompt with skill context.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
task_description: Original task description
|
|
136
|
+
skills: List of relevant loaded skills
|
|
137
|
+
task: Original task metadata
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Enhanced prompt with skill context
|
|
141
|
+
"""
|
|
142
|
+
parts = []
|
|
143
|
+
|
|
144
|
+
# Add task context
|
|
145
|
+
parts.append(f"# Task\n{task_description}\n")
|
|
146
|
+
|
|
147
|
+
# Add additional task metadata
|
|
148
|
+
if "context" in task:
|
|
149
|
+
parts.append(f"## Context\n{task['context']}\n")
|
|
150
|
+
|
|
151
|
+
if "requirements" in task:
|
|
152
|
+
parts.append(f"## Requirements\n{task['requirements']}\n")
|
|
153
|
+
|
|
154
|
+
# Add skill contexts
|
|
155
|
+
if skills:
|
|
156
|
+
parts.append("## Relevant Skills\n")
|
|
157
|
+
parts.append(f"The following skills provide expert guidance for this task:\n")
|
|
158
|
+
|
|
159
|
+
for skill in skills:
|
|
160
|
+
parts.append(f"\n### {skill.name}\n")
|
|
161
|
+
parts.append(f"{skill.content}\n")
|
|
162
|
+
|
|
163
|
+
# Add agent information
|
|
164
|
+
parts.append(f"\n## Agent Configuration\n")
|
|
165
|
+
parts.append(f"- Agent: {self.agent_id}\n")
|
|
166
|
+
parts.append(f"- Provider: {self.provider}\n")
|
|
167
|
+
parts.append(f"- Model: {self.model}\n")
|
|
168
|
+
|
|
169
|
+
# Add execution instruction
|
|
170
|
+
parts.append("\n## Instructions\n")
|
|
171
|
+
parts.append("Please complete the task following the guidance from the relevant skills above. ")
|
|
172
|
+
parts.append("Use best practices and patterns recommended by the skills.")
|
|
173
|
+
|
|
174
|
+
return "\n".join(parts)
|
|
175
|
+
|
|
176
|
+
def _execute_llm_call(self, prompt: str) -> Dict[str, Any]:
|
|
177
|
+
"""
|
|
178
|
+
Execute LLM call with the enhanced prompt.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
prompt: Enhanced prompt with skill context
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Result dictionary with response, tokens, cost, etc.
|
|
185
|
+
"""
|
|
186
|
+
# This is a placeholder - in production, you would call
|
|
187
|
+
# the actual LLM API based on self.provider and self.model
|
|
188
|
+
|
|
189
|
+
# Simulated response
|
|
190
|
+
return {
|
|
191
|
+
"success": True,
|
|
192
|
+
"output": f"Simulated response from {self.provider}:{self.model}",
|
|
193
|
+
"tokens_used": 100,
|
|
194
|
+
"cost": 0.01,
|
|
195
|
+
"execution_time": 2.5,
|
|
196
|
+
"timestamp": datetime.now().isoformat()
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
def _remember_success_pattern(self, task: Dict, result: Dict):
|
|
200
|
+
"""
|
|
201
|
+
Remember a successful execution pattern.
|
|
202
|
+
|
|
203
|
+
In production, this would save to:
|
|
204
|
+
- SimpleProjectMemory for project-level learning
|
|
205
|
+
- Or episodic memory for pattern discovery
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
task: Task that was executed
|
|
209
|
+
result: Successful result
|
|
210
|
+
"""
|
|
211
|
+
pattern = {
|
|
212
|
+
"task_description": task.get("description"),
|
|
213
|
+
"agent_id": self.agent_id,
|
|
214
|
+
"model": self.model,
|
|
215
|
+
"tokens": result.get("tokens_used"),
|
|
216
|
+
"cost": result.get("cost"),
|
|
217
|
+
"execution_time": result.get("execution_time"),
|
|
218
|
+
"timestamp": datetime.now().isoformat()
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
# Store pattern (implementation depends on memory system)
|
|
222
|
+
# For now, just log it
|
|
223
|
+
print(f"Pattern learned: {task.get('description')[:50]}...")
|
|
224
|
+
|
|
225
|
+
def get_assigned_skills(self) -> List[str]:
|
|
226
|
+
"""Get list of skills assigned to this agent"""
|
|
227
|
+
return self.assigned_skills.copy()
|
|
228
|
+
|
|
229
|
+
def add_skill(self, skill_name: str):
|
|
230
|
+
"""
|
|
231
|
+
Assign a skill to this agent.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
skill_name: Name of the skill to assign
|
|
235
|
+
"""
|
|
236
|
+
if skill_name not in self.assigned_skills:
|
|
237
|
+
self.assigned_skills.append(skill_name)
|
|
238
|
+
|
|
239
|
+
def remove_skill(self, skill_name: str):
|
|
240
|
+
"""
|
|
241
|
+
Remove a skill from this agent.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
skill_name: Name of the skill to remove
|
|
245
|
+
"""
|
|
246
|
+
if skill_name in self.assigned_skills:
|
|
247
|
+
self.assigned_skills.remove(skill_name)
|
|
248
|
+
|
|
249
|
+
def list_available_skills(self) -> List[str]:
|
|
250
|
+
"""List all available skills from skill manager"""
|
|
251
|
+
return self.skill_manager.list_skills()
|
|
252
|
+
|
|
253
|
+
def get_skill_info(self, skill_name: str) -> Optional[Dict]:
|
|
254
|
+
"""Get information about a specific skill"""
|
|
255
|
+
return self.skill_manager.get_skill_info(skill_name)
|
|
256
|
+
|
|
257
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
258
|
+
"""
|
|
259
|
+
Convert agent to dictionary representation.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Dictionary with agent configuration
|
|
263
|
+
"""
|
|
264
|
+
return {
|
|
265
|
+
"agent_id": self.agent_id,
|
|
266
|
+
"provider": self.provider,
|
|
267
|
+
"model": self.model,
|
|
268
|
+
"assigned_skills": self.assigned_skills,
|
|
269
|
+
"available_skills": self.list_available_skills()
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
class TMLEnhancedAgentFactory:
|
|
274
|
+
"""
|
|
275
|
+
Factory for creating TML Enhanced Agents with proper configuration.
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
@staticmethod
|
|
279
|
+
def create_from_config(config: Dict[str, Any]) -> TMLEnhancedAgent:
|
|
280
|
+
"""
|
|
281
|
+
Create agent from configuration dictionary.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
config: Configuration dictionary with keys:
|
|
285
|
+
- id: Agent ID
|
|
286
|
+
- provider: LLM provider
|
|
287
|
+
- model: Model name
|
|
288
|
+
- skills_dir: Skills directory
|
|
289
|
+
- skills: List of assigned skill names
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Configured TMLEnhancedAgent instance
|
|
293
|
+
"""
|
|
294
|
+
return TMLEnhancedAgent(
|
|
295
|
+
agent_id=config["id"],
|
|
296
|
+
provider=config["provider"],
|
|
297
|
+
model=config["model"],
|
|
298
|
+
skills_dir=config.get("skills_dir", "tmlpd-skills"),
|
|
299
|
+
assigned_skills=config.get("skills", [])
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
@staticmethod
|
|
303
|
+
def create_multiple_from_config(
|
|
304
|
+
agents_config: List[Dict[str, Any]]
|
|
305
|
+
) -> List[TMLEnhancedAgent]:
|
|
306
|
+
"""
|
|
307
|
+
Create multiple agents from configuration list.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
agents_config: List of agent configuration dictionaries
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
List of configured TMLEnhancedAgent instances
|
|
314
|
+
"""
|
|
315
|
+
return [
|
|
316
|
+
TMLEnhancedAgentFactory.create_from_config(agent_config)
|
|
317
|
+
for agent_config in agents_config
|
|
318
|
+
]
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TMLPD Memory Module
|
|
3
|
+
|
|
4
|
+
This module provides lightweight JSON-based memory for pattern learning.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .simple_memory import (
|
|
8
|
+
SimpleProjectMemory,
|
|
9
|
+
remember_success
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"SimpleProjectMemory",
|
|
14
|
+
"remember_success"
|
|
15
|
+
]
|
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Phase 3a: Episodic Memory Store
|
|
3
|
+
|
|
4
|
+
JSON-based episodic memory following Memoria framework (arXiv:2512.12686)
|
|
5
|
+
and A-Mem pattern (arXiv:2502.12110).
|
|
6
|
+
|
|
7
|
+
Episodic memory stores specific task executions with full context.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import uuid
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Dict, List, Any, Optional
|
|
15
|
+
from collections import defaultdict
|
|
16
|
+
import re
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class EpisodicMemoryStore:
|
|
20
|
+
"""
|
|
21
|
+
Episodic memory: stores specific task executions.
|
|
22
|
+
|
|
23
|
+
Based on Memoria framework (arXiv:2512.12686)
|
|
24
|
+
and A-Mem (arXiv:2502.12110)
|
|
25
|
+
|
|
26
|
+
Features:
|
|
27
|
+
- Full context storage
|
|
28
|
+
- Keyword indexing for fast retrieval
|
|
29
|
+
- Importance scoring
|
|
30
|
+
- Time-based decay
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, base_dir: str = ".taskmaster/memory/episodic"):
|
|
34
|
+
"""
|
|
35
|
+
Initialize episodic memory store.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
base_dir: Directory to store episodic memories
|
|
39
|
+
"""
|
|
40
|
+
self.base_dir = Path(base_dir)
|
|
41
|
+
self.base_dir.mkdir(parents=True, exist_ok=True)
|
|
42
|
+
|
|
43
|
+
# Index: keywords -> episode IDs
|
|
44
|
+
self.index_file = self.base_dir / "index.json"
|
|
45
|
+
self.keyword_index = self._load_index()
|
|
46
|
+
|
|
47
|
+
def _load_index(self) -> Dict[str, List[str]]:
|
|
48
|
+
"""Load keyword index"""
|
|
49
|
+
if self.index_file.exists():
|
|
50
|
+
with open(self.index_file, 'r') as f:
|
|
51
|
+
return json.load(f)
|
|
52
|
+
return {}
|
|
53
|
+
|
|
54
|
+
def _save_index(self):
|
|
55
|
+
"""Save keyword index"""
|
|
56
|
+
with open(self.index_file, 'w') as f:
|
|
57
|
+
json.dump(self.keyword_index, f, indent=2)
|
|
58
|
+
|
|
59
|
+
def store(
|
|
60
|
+
self,
|
|
61
|
+
task: Dict[str, Any],
|
|
62
|
+
result: Dict[str, Any],
|
|
63
|
+
agent_id: str,
|
|
64
|
+
skills: List[str],
|
|
65
|
+
provider: str,
|
|
66
|
+
model: str,
|
|
67
|
+
importance: float = 0.5,
|
|
68
|
+
metadata: Optional[Dict] = None
|
|
69
|
+
) -> str:
|
|
70
|
+
"""
|
|
71
|
+
Store an episodic memory.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
task: Task that was executed
|
|
75
|
+
result: Execution result
|
|
76
|
+
agent_id: Agent that executed
|
|
77
|
+
skills: Skills used
|
|
78
|
+
provider: LLM provider used
|
|
79
|
+
model: Model used
|
|
80
|
+
importance: Importance score (0-1)
|
|
81
|
+
metadata: Additional metadata
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Episode ID
|
|
85
|
+
"""
|
|
86
|
+
episode_id = f"episode_{uuid.uuid4().hex[:12]}"
|
|
87
|
+
|
|
88
|
+
# Extract keywords from task description
|
|
89
|
+
keywords = self._extract_keywords(task.get("description", ""))
|
|
90
|
+
|
|
91
|
+
# Create episode
|
|
92
|
+
episode = {
|
|
93
|
+
"id": episode_id,
|
|
94
|
+
"timestamp": datetime.now().isoformat(),
|
|
95
|
+
"task": {
|
|
96
|
+
"description": task.get("description", ""),
|
|
97
|
+
"keywords": keywords,
|
|
98
|
+
"requirements": task.get("requirements", ""),
|
|
99
|
+
"context": task.get("context", "")
|
|
100
|
+
},
|
|
101
|
+
"execution": {
|
|
102
|
+
"agent_id": agent_id,
|
|
103
|
+
"provider": provider,
|
|
104
|
+
"model": model,
|
|
105
|
+
"skills": skills
|
|
106
|
+
},
|
|
107
|
+
"result": {
|
|
108
|
+
"success": result.get("success", False),
|
|
109
|
+
"tokens_used": result.get("tokens_used", 0),
|
|
110
|
+
"cost": result.get("cost", 0.0),
|
|
111
|
+
"execution_time": result.get("execution_time", 0.0),
|
|
112
|
+
"latency_ms": result.get("latency_ms", 0.0)
|
|
113
|
+
},
|
|
114
|
+
"importance": importance,
|
|
115
|
+
"metadata": metadata or {},
|
|
116
|
+
"access_count": 0,
|
|
117
|
+
"last_accessed": datetime.now().isoformat()
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
# Save episode to file
|
|
121
|
+
episode_file = self.base_dir / f"{episode_id}.json"
|
|
122
|
+
with open(episode_file, 'w') as f:
|
|
123
|
+
json.dump(episode, f, indent=2)
|
|
124
|
+
|
|
125
|
+
# Update keyword index
|
|
126
|
+
for keyword in keywords:
|
|
127
|
+
if keyword not in self.keyword_index:
|
|
128
|
+
self.keyword_index[keyword] = []
|
|
129
|
+
self.keyword_index[keyword].append(episode_id)
|
|
130
|
+
|
|
131
|
+
self._save_index()
|
|
132
|
+
|
|
133
|
+
return episode_id
|
|
134
|
+
|
|
135
|
+
def recall(
|
|
136
|
+
self,
|
|
137
|
+
task: Dict[str, Any],
|
|
138
|
+
top_k: int = 5,
|
|
139
|
+
min_importance: float = 0.0,
|
|
140
|
+
max_age_days: int = 30
|
|
141
|
+
) -> List[Dict[str, Any]]:
|
|
142
|
+
"""
|
|
143
|
+
Recall relevant episodes based on task similarity.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
task: Current task
|
|
147
|
+
top_k: Maximum number of episodes to return
|
|
148
|
+
min_importance: Minimum importance threshold
|
|
149
|
+
max_age_days: Maximum age of episodes (days)
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
List of relevant episodes with similarity scores
|
|
153
|
+
"""
|
|
154
|
+
# Extract keywords from current task
|
|
155
|
+
task_keywords = self._extract_keywords(task.get("description", ""))
|
|
156
|
+
|
|
157
|
+
# Find episodes with keyword overlap
|
|
158
|
+
episode_scores = defaultdict(float)
|
|
159
|
+
|
|
160
|
+
for keyword in task_keywords:
|
|
161
|
+
if keyword in self.keyword_index:
|
|
162
|
+
for episode_id in self.keyword_index[keyword]:
|
|
163
|
+
episode_scores[episode_id] += 1.0
|
|
164
|
+
|
|
165
|
+
# Load and score episodes
|
|
166
|
+
scored_episodes = []
|
|
167
|
+
cutoff_date = datetime.now() - timedelta(days=max_age_days)
|
|
168
|
+
|
|
169
|
+
for episode_id, score in episode_scores.items():
|
|
170
|
+
# Load episode
|
|
171
|
+
episode_file = self.base_dir / f"{episode_id}.json"
|
|
172
|
+
|
|
173
|
+
if not episode_file.exists():
|
|
174
|
+
continue
|
|
175
|
+
|
|
176
|
+
with open(episode_file, 'r') as f:
|
|
177
|
+
episode = json.load(f)
|
|
178
|
+
|
|
179
|
+
# Check age
|
|
180
|
+
episode_date = datetime.fromisoformat(episode["timestamp"])
|
|
181
|
+
if episode_date < cutoff_date:
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
# Check importance
|
|
185
|
+
if episode["importance"] < min_importance:
|
|
186
|
+
continue
|
|
187
|
+
|
|
188
|
+
# Calculate final score
|
|
189
|
+
# Keyword similarity (40%)
|
|
190
|
+
keyword_score = score / max(len(task_keywords), 1)
|
|
191
|
+
|
|
192
|
+
# Recency boost (20%) - more recent = higher score
|
|
193
|
+
days_old = (datetime.now() - episode_date).days
|
|
194
|
+
recency_score = max(0, 1 - days_old / 365) # Decays over 1 year
|
|
195
|
+
|
|
196
|
+
# Access frequency boost (20%)
|
|
197
|
+
access_score = min(episode["access_count"] / 100, 1.0)
|
|
198
|
+
|
|
199
|
+
# Importance boost (20%)
|
|
200
|
+
importance_score = episode["importance"]
|
|
201
|
+
|
|
202
|
+
total_score = (
|
|
203
|
+
keyword_score * 0.4 +
|
|
204
|
+
recency_score * 0.2 +
|
|
205
|
+
access_score * 0.2 +
|
|
206
|
+
importance_score * 0.2
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
scored_episodes.append({
|
|
210
|
+
"episode": episode,
|
|
211
|
+
"similarity": keyword_score,
|
|
212
|
+
"total_score": total_score
|
|
213
|
+
})
|
|
214
|
+
|
|
215
|
+
# Update access count
|
|
216
|
+
episode["access_count"] += 1
|
|
217
|
+
episode["last_accessed"] = datetime.now().isoformat()
|
|
218
|
+
|
|
219
|
+
# Save updated episode
|
|
220
|
+
with open(episode_file, 'w') as f:
|
|
221
|
+
json.dump(episode, f, indent=2)
|
|
222
|
+
|
|
223
|
+
# Sort by total score
|
|
224
|
+
scored_episodes.sort(key=lambda x: x["total_score"], reverse=True)
|
|
225
|
+
|
|
226
|
+
return scored_episodes[:top_k]
|
|
227
|
+
|
|
228
|
+
def get_episode(self, episode_id: str) -> Optional[Dict[str, Any]]:
|
|
229
|
+
"""Get specific episode by ID"""
|
|
230
|
+
episode_file = self.base_dir / f"{episode_id}.json"
|
|
231
|
+
|
|
232
|
+
if not episode_file.exists():
|
|
233
|
+
return None
|
|
234
|
+
|
|
235
|
+
with open(episode_file, 'r') as f:
|
|
236
|
+
return json.load(f)
|
|
237
|
+
|
|
238
|
+
def _extract_keywords(self, text: str) -> set:
|
|
239
|
+
"""
|
|
240
|
+
Extract keywords from text.
|
|
241
|
+
|
|
242
|
+
Removes common stop words and short words.
|
|
243
|
+
"""
|
|
244
|
+
# Stop words
|
|
245
|
+
stop_words = {
|
|
246
|
+
"the", "a", "an", "and", "or", "but", "in", "on", "at", "to",
|
|
247
|
+
"for", "of", "with", "by", "from", "as", "is", "was", "are",
|
|
248
|
+
"been", "be", "have", "has", "had", "do", "does", "did", "will",
|
|
249
|
+
"would", "should", "could", "may", "might", "can", "this", "that"
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
# Extract words
|
|
253
|
+
words = re.findall(r'\w+', text.lower())
|
|
254
|
+
|
|
255
|
+
# Filter
|
|
256
|
+
keywords = {
|
|
257
|
+
w for w in words
|
|
258
|
+
if w not in stop words and len(w) > 2
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
return keywords
|
|
262
|
+
|
|
263
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
264
|
+
"""Get episodic memory statistics"""
|
|
265
|
+
total_episodes = len(list(self.base_dir.glob("*.json")))
|
|
266
|
+
|
|
267
|
+
# Calculate stats
|
|
268
|
+
total_importance = 0.0
|
|
269
|
+
total_tokens = 0
|
|
270
|
+
total_cost = 0.0
|
|
271
|
+
|
|
272
|
+
for episode_file in self.base_dir.glob("*.json"):
|
|
273
|
+
with open(episode_file, 'r') as f:
|
|
274
|
+
episode = json.load(f)
|
|
275
|
+
|
|
276
|
+
total_importance += episode.get("importance", 0)
|
|
277
|
+
total_tokens += episode["result"]["tokens_used"]
|
|
278
|
+
total_cost += episode["result"]["cost"]
|
|
279
|
+
|
|
280
|
+
return {
|
|
281
|
+
"total_episodes": total_episodes,
|
|
282
|
+
"total_keywords": len(self.keyword_index),
|
|
283
|
+
"avg_importance": total_importance / total_episodes if total_episodes > 0 else 0,
|
|
284
|
+
"total_tokens": total_tokens,
|
|
285
|
+
"total_cost": total_cost,
|
|
286
|
+
"avg_cost_per_episode": total_cost / total_episodes if total_episodes > 0 else 0
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
def cleanup_old_episodes(self, days_old: int = 90):
|
|
290
|
+
"""
|
|
291
|
+
Remove episodes older than specified days.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
days_old: Age threshold in days
|
|
295
|
+
"""
|
|
296
|
+
cutoff_date = datetime.now() - timedelta(days=days_old)
|
|
297
|
+
removed_count = 0
|
|
298
|
+
|
|
299
|
+
for episode_file in self.base_dir.glob("*.json"):
|
|
300
|
+
with open(episode_file, 'r') as f:
|
|
301
|
+
episode = json.load(f)
|
|
302
|
+
|
|
303
|
+
episode_date = datetime.fromisoformat(episode["timestamp"])
|
|
304
|
+
|
|
305
|
+
if episode_date < cutoff_date:
|
|
306
|
+
# Remove from keyword index
|
|
307
|
+
for keyword, episode_list in self.keyword_index.items():
|
|
308
|
+
if episode["id"] in episode_list:
|
|
309
|
+
episode_list.remove(episode["id"])
|
|
310
|
+
|
|
311
|
+
# Delete file
|
|
312
|
+
episode_file.unlink()
|
|
313
|
+
removed_count += 1
|
|
314
|
+
|
|
315
|
+
# Save updated index
|
|
316
|
+
self._save_index()
|
|
317
|
+
|
|
318
|
+
print(f"Removed {removed_count} episodes older than {days_old} days")
|
|
319
|
+
|
|
320
|
+
def export_episodes(self, output_path: str, criteria: Optional[Dict] = None):
|
|
321
|
+
"""
|
|
322
|
+
Export episodes to JSON file.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
output_path: Path to output file
|
|
326
|
+
criteria: Optional filtering criteria
|
|
327
|
+
"""
|
|
328
|
+
episodes = []
|
|
329
|
+
|
|
330
|
+
for episode_file in self.base_dir.glob("*.json"):
|
|
331
|
+
with open(episode_file, 'r') as f:
|
|
332
|
+
episode = json.load(f)
|
|
333
|
+
|
|
334
|
+
# Apply filters if provided
|
|
335
|
+
if criteria:
|
|
336
|
+
if "min_importance" in criteria:
|
|
337
|
+
if episode["importance"] < criteria["min_importance"]:
|
|
338
|
+
continue
|
|
339
|
+
|
|
340
|
+
if "provider" in criteria:
|
|
341
|
+
if episode["execution"]["provider"] != criteria["provider"]:
|
|
342
|
+
continue
|
|
343
|
+
|
|
344
|
+
episodes.append(episode)
|
|
345
|
+
|
|
346
|
+
# Write to output
|
|
347
|
+
output_path = Path(output_path)
|
|
348
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
349
|
+
|
|
350
|
+
with open(output_path, 'w') as f:
|
|
351
|
+
json.dump(episodes, f, indent=2)
|
|
352
|
+
|
|
353
|
+
print(f"Exported {len(episodes)} episodes to {output_path}")
|