adaptive-memory-multi-model-router 1.2.2 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +146 -66
  3. package/dist/index.d.ts +1 -1
  4. package/dist/index.js +1 -1
  5. package/dist/integrations/airtable.js +20 -0
  6. package/dist/integrations/discord.js +18 -0
  7. package/dist/integrations/github.js +23 -0
  8. package/dist/integrations/gmail.js +19 -0
  9. package/dist/integrations/google-calendar.js +18 -0
  10. package/dist/integrations/index.js +61 -0
  11. package/dist/integrations/jira.js +21 -0
  12. package/dist/integrations/linear.js +19 -0
  13. package/dist/integrations/notion.js +19 -0
  14. package/dist/integrations/slack.js +18 -0
  15. package/dist/integrations/telegram.js +19 -0
  16. package/dist/providers/registry.js +7 -3
  17. package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
  18. package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
  19. package/docs/CONFIGURATION.md +476 -0
  20. package/docs/COUNCIL_DECISION.json +308 -0
  21. package/docs/COUNCIL_SUMMARY.md +265 -0
  22. package/docs/COUNCIL_V2.2_DECISION.md +416 -0
  23. package/docs/IMPROVEMENT_ROADMAP.md +515 -0
  24. package/docs/LLM_COUNCIL_DECISION.md +508 -0
  25. package/docs/QUICK_START_VISIBILITY.md +782 -0
  26. package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
  27. package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
  28. package/docs/TMLPD_QNA.md +751 -0
  29. package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
  30. package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
  31. package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
  32. package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
  33. package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
  34. package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
  35. package/docs/launch-content/README.md +457 -0
  36. package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
  37. package/docs/launch-content/assets/cumulative_savings.png +0 -0
  38. package/docs/launch-content/assets/parallel_speedup.png +0 -0
  39. package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
  40. package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
  41. package/docs/launch-content/generate_charts.py +313 -0
  42. package/docs/launch-content/hn_show_post.md +139 -0
  43. package/docs/launch-content/partner_outreach_templates.md +745 -0
  44. package/docs/launch-content/reddit_posts.md +467 -0
  45. package/docs/launch-content/twitter_thread.txt +460 -0
  46. package/examples/QUICKSTART.md +1 -1
  47. package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
  48. package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
  49. package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
  50. package/openclaw-alexa-bridge/test_fixes.js +77 -0
  51. package/package.json +120 -29
  52. package/package.json.tmp +0 -0
  53. package/qna/TMLPD_QNA.md +3 -3
  54. package/skill/SKILL.md +2 -2
  55. package/src/__tests__/integration/tmpld_integration.test.py +540 -0
  56. package/src/agents/skill_enhanced_agent.py +318 -0
  57. package/src/memory/__init__.py +15 -0
  58. package/src/memory/agentic_memory.py +353 -0
  59. package/src/memory/semantic_memory.py +444 -0
  60. package/src/memory/simple_memory.py +466 -0
  61. package/src/memory/working_memory.py +447 -0
  62. package/src/orchestration/__init__.py +52 -0
  63. package/src/orchestration/execution_engine.py +353 -0
  64. package/src/orchestration/halo_orchestrator.py +367 -0
  65. package/src/orchestration/mcts_workflow.py +498 -0
  66. package/src/orchestration/role_assigner.py +473 -0
  67. package/src/orchestration/task_planner.py +522 -0
  68. package/src/providers/__init__.py +67 -0
  69. package/src/providers/anthropic.py +304 -0
  70. package/src/providers/base.py +241 -0
  71. package/src/providers/cerebras.py +373 -0
  72. package/src/providers/registry.py +476 -0
  73. package/src/routing/__init__.py +30 -0
  74. package/src/routing/universal_router.py +621 -0
  75. package/src/skills/TMLPD-QUICKREF.md +210 -0
  76. package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
  77. package/src/skills/TMLPD.md +540 -0
  78. package/src/skills/__tests__/skill_manager.test.ts +328 -0
  79. package/src/skills/skill_manager.py +385 -0
  80. package/src/skills/test-tmlpd.sh +108 -0
  81. package/src/skills/tmlpd-category.yaml +67 -0
  82. package/src/skills/tmlpd-monitoring.yaml +188 -0
  83. package/src/skills/tmlpd-phase.yaml +132 -0
  84. package/src/state/__init__.py +17 -0
  85. package/src/state/simple_checkpoint.py +508 -0
  86. package/src/tmlpd_agent.py +464 -0
  87. package/src/tmpld_v2.py +427 -0
  88. package/src/workflows/__init__.py +18 -0
  89. package/src/workflows/advanced_difficulty_classifier.py +377 -0
  90. package/src/workflows/chaining_executor.py +417 -0
  91. package/src/workflows/difficulty_integration.py +209 -0
  92. package/src/workflows/orchestrator.py +469 -0
  93. package/src/workflows/orchestrator_executor.py +456 -0
  94. package/src/workflows/parallelization_executor.py +382 -0
  95. package/src/workflows/router.py +311 -0
  96. package/test_integration_simple.py +86 -0
  97. package/test_mcts_workflow.py +150 -0
  98. package/test_templd_integration.py +262 -0
  99. package/test_universal_router.py +275 -0
  100. package/tmlpd-pi-extension/README.md +36 -0
  101. package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
  102. package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
  103. package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
  104. package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
  105. package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
  106. package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
  107. package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
  108. package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
  109. package/tmlpd-pi-extension/dist/cli.js +59 -0
  110. package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
  111. package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
  112. package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
  113. package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
  114. package/tmlpd-pi-extension/dist/index.d.ts +723 -0
  115. package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
  116. package/tmlpd-pi-extension/dist/index.js +239 -0
  117. package/tmlpd-pi-extension/dist/index.js.map +1 -0
  118. package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
  119. package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
  120. package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
  121. package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
  122. package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
  123. package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
  124. package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
  125. package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
  126. package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
  127. package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
  128. package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
  129. package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
  130. package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
  131. package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
  132. package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
  133. package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
  134. package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
  135. package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
  136. package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
  137. package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
  138. package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
  139. package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
  140. package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
  141. package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
  142. package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
  143. package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
  144. package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
  145. package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
  146. package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
  147. package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
  148. package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
  149. package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
  150. package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
  151. package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
  152. package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
  153. package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
  154. package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
  155. package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
  156. package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
  157. package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
  158. package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
  159. package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
  160. package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
  161. package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
  162. package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
  163. package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
  164. package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
  165. package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
  166. package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
  167. package/tmlpd-pi-extension/package-lock.json +75 -0
  168. package/tmlpd-pi-extension/package.json +172 -0
  169. package/tmlpd-pi-extension/python/examples.py +53 -0
  170. package/tmlpd-pi-extension/python/integrations.py +330 -0
  171. package/tmlpd-pi-extension/python/setup.py +28 -0
  172. package/tmlpd-pi-extension/python/tmlpd.py +369 -0
  173. package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
  174. package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
  175. package/tmlpd-pi-extension/skill/SKILL.md +238 -0
  176. package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
  177. package/tmlpd-pi-extension/tsconfig.json +18 -0
  178. package/demo/research-demo.js +0 -266
  179. package/notebooks/quickstart.ipynb +0 -157
  180. package/rust/tmlpd.h +0 -268
  181. package/src/cache/prefixCache.ts +0 -365
  182. package/src/routing/advancedRouter.ts +0 -406
  183. package/src/utils/speculativeDecoding.ts +0 -344
  184. /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
  185. /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
  186. /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
  187. /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
  188. /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
  189. /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
  190. /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
  191. /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
  192. /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
  193. /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
  194. /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
  195. /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
@@ -0,0 +1,318 @@
1
+ """
2
+ TML Enhanced Agent - Agent with Skill capabilities
3
+
4
+ Integrates SkillManager to provide agents with domain-specific expertise
5
+ following Anthropic's Agent Skills specification.
6
+ """
7
+
8
+ from typing import Dict, List, Optional, Any
9
+ from pathlib import Path
10
+ import json
11
+ from datetime import datetime
12
+
13
+ from .skill_manager import SkillManager, Skill
14
+
15
+
16
+ class TMLEnhancedAgent:
17
+ """
18
+ Agent enhanced with Skill capabilities.
19
+
20
+ Uses progressive disclosure to load relevant skills only when needed,
21
+ following Anthropic's specification for Agent Skills.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ agent_id: str,
27
+ provider: str,
28
+ model: str,
29
+ skills_dir: str = "tmlpd-skills",
30
+ assigned_skills: Optional[List[str]] = None
31
+ ):
32
+ """
33
+ Initialize TML Enhanced Agent
34
+
35
+ Args:
36
+ agent_id: Unique identifier for this agent
37
+ provider: LLM provider (e.g., 'anthropic', 'openai')
38
+ model: Model name (e.g., 'claude-sonnet-4', 'gpt-4-turbo')
39
+ skills_dir: Directory containing skill definitions
40
+ assigned_skills: List of skill names assigned to this agent
41
+ """
42
+ self.agent_id = agent_id
43
+ self.provider = provider
44
+ self.model = model
45
+ self.assigned_skills = assigned_skills or []
46
+
47
+ # Initialize skill manager
48
+ self.skill_manager = SkillManager(skills_dir)
49
+
50
+ # Load metadata for assigned skills
51
+ for skill_name in self.assigned_skills:
52
+ if skill_name in self.skill_manager.skills:
53
+ # Metadata already loaded by SkillManager
54
+ pass
55
+
56
+ def execute_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
57
+ """
58
+ Execute a task using relevant skills for context.
59
+
60
+ Args:
61
+ task: Task dictionary with 'description' and other metadata
62
+
63
+ Returns:
64
+ Result dictionary with task output
65
+ """
66
+ task_description = task.get("description", "")
67
+
68
+ # Step 1: Find relevant skills
69
+ relevant_skills = self._get_relevant_skills(task_description)
70
+
71
+ # Step 2: Build enhanced prompt with skill context
72
+ enhanced_prompt = self._build_prompt_with_skills(
73
+ task_description,
74
+ relevant_skills,
75
+ task
76
+ )
77
+
78
+ # Step 3: Execute LLM call with enhanced context
79
+ result = self._execute_llm_call(enhanced_prompt)
80
+
81
+ # Step 4: Store successful pattern in memory (if enabled)
82
+ if result.get("success"):
83
+ self._remember_success_pattern(task, result)
84
+
85
+ return result
86
+
87
+ def _get_relevant_skills(
88
+ self,
89
+ task_description: str,
90
+ top_k: int = 3
91
+ ) -> List[Skill]:
92
+ """
93
+ Get relevant skills for this task.
94
+
95
+ Args:
96
+ task_description: Task to find skills for
97
+ top_k: Maximum number of skills to load
98
+
99
+ Returns:
100
+ List of loaded Skill objects
101
+ """
102
+ # If agent has assigned skills, only consider those
103
+ if self.assigned_skills:
104
+ skill_names = self.assigned_skills[:top_k]
105
+ else:
106
+ # Otherwise, use skill discovery
107
+ skill_names = self.skill_manager.get_relevant_skills(
108
+ task_description,
109
+ top_k=top_k
110
+ )
111
+
112
+ # Load full skill content (Level 2: SKILL.md)
113
+ loaded_skills = []
114
+
115
+ for skill_name in skill_names:
116
+ try:
117
+ skill = self.skill_manager.load_skill(skill_name)
118
+ loaded_skills.append(skill)
119
+ except Exception as e:
120
+ print(f"Warning: Failed to load skill '{skill_name}': {e}")
121
+ continue
122
+
123
+ return loaded_skills
124
+
125
+ def _build_prompt_with_skills(
126
+ self,
127
+ task_description: str,
128
+ skills: List[Skill],
129
+ task: Dict[str, Any]
130
+ ) -> str:
131
+ """
132
+ Build enhanced prompt with skill context.
133
+
134
+ Args:
135
+ task_description: Original task description
136
+ skills: List of relevant loaded skills
137
+ task: Original task metadata
138
+
139
+ Returns:
140
+ Enhanced prompt with skill context
141
+ """
142
+ parts = []
143
+
144
+ # Add task context
145
+ parts.append(f"# Task\n{task_description}\n")
146
+
147
+ # Add additional task metadata
148
+ if "context" in task:
149
+ parts.append(f"## Context\n{task['context']}\n")
150
+
151
+ if "requirements" in task:
152
+ parts.append(f"## Requirements\n{task['requirements']}\n")
153
+
154
+ # Add skill contexts
155
+ if skills:
156
+ parts.append("## Relevant Skills\n")
157
+ parts.append(f"The following skills provide expert guidance for this task:\n")
158
+
159
+ for skill in skills:
160
+ parts.append(f"\n### {skill.name}\n")
161
+ parts.append(f"{skill.content}\n")
162
+
163
+ # Add agent information
164
+ parts.append(f"\n## Agent Configuration\n")
165
+ parts.append(f"- Agent: {self.agent_id}\n")
166
+ parts.append(f"- Provider: {self.provider}\n")
167
+ parts.append(f"- Model: {self.model}\n")
168
+
169
+ # Add execution instruction
170
+ parts.append("\n## Instructions\n")
171
+ parts.append("Please complete the task following the guidance from the relevant skills above. ")
172
+ parts.append("Use best practices and patterns recommended by the skills.")
173
+
174
+ return "\n".join(parts)
175
+
176
+ def _execute_llm_call(self, prompt: str) -> Dict[str, Any]:
177
+ """
178
+ Execute LLM call with the enhanced prompt.
179
+
180
+ Args:
181
+ prompt: Enhanced prompt with skill context
182
+
183
+ Returns:
184
+ Result dictionary with response, tokens, cost, etc.
185
+ """
186
+ # This is a placeholder - in production, you would call
187
+ # the actual LLM API based on self.provider and self.model
188
+
189
+ # Simulated response
190
+ return {
191
+ "success": True,
192
+ "output": f"Simulated response from {self.provider}:{self.model}",
193
+ "tokens_used": 100,
194
+ "cost": 0.01,
195
+ "execution_time": 2.5,
196
+ "timestamp": datetime.now().isoformat()
197
+ }
198
+
199
+ def _remember_success_pattern(self, task: Dict, result: Dict):
200
+ """
201
+ Remember a successful execution pattern.
202
+
203
+ In production, this would save to:
204
+ - SimpleProjectMemory for project-level learning
205
+ - Or episodic memory for pattern discovery
206
+
207
+ Args:
208
+ task: Task that was executed
209
+ result: Successful result
210
+ """
211
+ pattern = {
212
+ "task_description": task.get("description"),
213
+ "agent_id": self.agent_id,
214
+ "model": self.model,
215
+ "tokens": result.get("tokens_used"),
216
+ "cost": result.get("cost"),
217
+ "execution_time": result.get("execution_time"),
218
+ "timestamp": datetime.now().isoformat()
219
+ }
220
+
221
+ # Store pattern (implementation depends on memory system)
222
+ # For now, just log it
223
+ print(f"Pattern learned: {task.get('description')[:50]}...")
224
+
225
+ def get_assigned_skills(self) -> List[str]:
226
+ """Get list of skills assigned to this agent"""
227
+ return self.assigned_skills.copy()
228
+
229
+ def add_skill(self, skill_name: str):
230
+ """
231
+ Assign a skill to this agent.
232
+
233
+ Args:
234
+ skill_name: Name of the skill to assign
235
+ """
236
+ if skill_name not in self.assigned_skills:
237
+ self.assigned_skills.append(skill_name)
238
+
239
+ def remove_skill(self, skill_name: str):
240
+ """
241
+ Remove a skill from this agent.
242
+
243
+ Args:
244
+ skill_name: Name of the skill to remove
245
+ """
246
+ if skill_name in self.assigned_skills:
247
+ self.assigned_skills.remove(skill_name)
248
+
249
+ def list_available_skills(self) -> List[str]:
250
+ """List all available skills from skill manager"""
251
+ return self.skill_manager.list_skills()
252
+
253
+ def get_skill_info(self, skill_name: str) -> Optional[Dict]:
254
+ """Get information about a specific skill"""
255
+ return self.skill_manager.get_skill_info(skill_name)
256
+
257
+ def to_dict(self) -> Dict[str, Any]:
258
+ """
259
+ Convert agent to dictionary representation.
260
+
261
+ Returns:
262
+ Dictionary with agent configuration
263
+ """
264
+ return {
265
+ "agent_id": self.agent_id,
266
+ "provider": self.provider,
267
+ "model": self.model,
268
+ "assigned_skills": self.assigned_skills,
269
+ "available_skills": self.list_available_skills()
270
+ }
271
+
272
+
273
+ class TMLEnhancedAgentFactory:
274
+ """
275
+ Factory for creating TML Enhanced Agents with proper configuration.
276
+ """
277
+
278
+ @staticmethod
279
+ def create_from_config(config: Dict[str, Any]) -> TMLEnhancedAgent:
280
+ """
281
+ Create agent from configuration dictionary.
282
+
283
+ Args:
284
+ config: Configuration dictionary with keys:
285
+ - id: Agent ID
286
+ - provider: LLM provider
287
+ - model: Model name
288
+ - skills_dir: Skills directory
289
+ - skills: List of assigned skill names
290
+
291
+ Returns:
292
+ Configured TMLEnhancedAgent instance
293
+ """
294
+ return TMLEnhancedAgent(
295
+ agent_id=config["id"],
296
+ provider=config["provider"],
297
+ model=config["model"],
298
+ skills_dir=config.get("skills_dir", "tmlpd-skills"),
299
+ assigned_skills=config.get("skills", [])
300
+ )
301
+
302
+ @staticmethod
303
+ def create_multiple_from_config(
304
+ agents_config: List[Dict[str, Any]]
305
+ ) -> List[TMLEnhancedAgent]:
306
+ """
307
+ Create multiple agents from configuration list.
308
+
309
+ Args:
310
+ agents_config: List of agent configuration dictionaries
311
+
312
+ Returns:
313
+ List of configured TMLEnhancedAgent instances
314
+ """
315
+ return [
316
+ TMLEnhancedAgentFactory.create_from_config(agent_config)
317
+ for agent_config in agents_config
318
+ ]
@@ -0,0 +1,15 @@
1
+ """
2
+ TMLPD Memory Module
3
+
4
+ This module provides lightweight JSON-based memory for pattern learning.
5
+ """
6
+
7
+ from .simple_memory import (
8
+ SimpleProjectMemory,
9
+ remember_success
10
+ )
11
+
12
+ __all__ = [
13
+ "SimpleProjectMemory",
14
+ "remember_success"
15
+ ]
@@ -0,0 +1,353 @@
1
+ """
2
+ Phase 3a: Episodic Memory Store
3
+
4
+ JSON-based episodic memory following Memoria framework (arXiv:2512.12686)
5
+ and A-Mem pattern (arXiv:2502.12110).
6
+
7
+ Episodic memory stores specific task executions with full context.
8
+ """
9
+
10
+ import json
11
+ import uuid
12
+ from datetime import datetime, timedelta
13
+ from pathlib import Path
14
+ from typing import Dict, List, Any, Optional
15
+ from collections import defaultdict
16
+ import re
17
+
18
+
19
+ class EpisodicMemoryStore:
20
+ """
21
+ Episodic memory: stores specific task executions.
22
+
23
+ Based on Memoria framework (arXiv:2512.12686)
24
+ and A-Mem (arXiv:2502.12110)
25
+
26
+ Features:
27
+ - Full context storage
28
+ - Keyword indexing for fast retrieval
29
+ - Importance scoring
30
+ - Time-based decay
31
+ """
32
+
33
+ def __init__(self, base_dir: str = ".taskmaster/memory/episodic"):
34
+ """
35
+ Initialize episodic memory store.
36
+
37
+ Args:
38
+ base_dir: Directory to store episodic memories
39
+ """
40
+ self.base_dir = Path(base_dir)
41
+ self.base_dir.mkdir(parents=True, exist_ok=True)
42
+
43
+ # Index: keywords -> episode IDs
44
+ self.index_file = self.base_dir / "index.json"
45
+ self.keyword_index = self._load_index()
46
+
47
+ def _load_index(self) -> Dict[str, List[str]]:
48
+ """Load keyword index"""
49
+ if self.index_file.exists():
50
+ with open(self.index_file, 'r') as f:
51
+ return json.load(f)
52
+ return {}
53
+
54
+ def _save_index(self):
55
+ """Save keyword index"""
56
+ with open(self.index_file, 'w') as f:
57
+ json.dump(self.keyword_index, f, indent=2)
58
+
59
+ def store(
60
+ self,
61
+ task: Dict[str, Any],
62
+ result: Dict[str, Any],
63
+ agent_id: str,
64
+ skills: List[str],
65
+ provider: str,
66
+ model: str,
67
+ importance: float = 0.5,
68
+ metadata: Optional[Dict] = None
69
+ ) -> str:
70
+ """
71
+ Store an episodic memory.
72
+
73
+ Args:
74
+ task: Task that was executed
75
+ result: Execution result
76
+ agent_id: Agent that executed
77
+ skills: Skills used
78
+ provider: LLM provider used
79
+ model: Model used
80
+ importance: Importance score (0-1)
81
+ metadata: Additional metadata
82
+
83
+ Returns:
84
+ Episode ID
85
+ """
86
+ episode_id = f"episode_{uuid.uuid4().hex[:12]}"
87
+
88
+ # Extract keywords from task description
89
+ keywords = self._extract_keywords(task.get("description", ""))
90
+
91
+ # Create episode
92
+ episode = {
93
+ "id": episode_id,
94
+ "timestamp": datetime.now().isoformat(),
95
+ "task": {
96
+ "description": task.get("description", ""),
97
+ "keywords": keywords,
98
+ "requirements": task.get("requirements", ""),
99
+ "context": task.get("context", "")
100
+ },
101
+ "execution": {
102
+ "agent_id": agent_id,
103
+ "provider": provider,
104
+ "model": model,
105
+ "skills": skills
106
+ },
107
+ "result": {
108
+ "success": result.get("success", False),
109
+ "tokens_used": result.get("tokens_used", 0),
110
+ "cost": result.get("cost", 0.0),
111
+ "execution_time": result.get("execution_time", 0.0),
112
+ "latency_ms": result.get("latency_ms", 0.0)
113
+ },
114
+ "importance": importance,
115
+ "metadata": metadata or {},
116
+ "access_count": 0,
117
+ "last_accessed": datetime.now().isoformat()
118
+ }
119
+
120
+ # Save episode to file
121
+ episode_file = self.base_dir / f"{episode_id}.json"
122
+ with open(episode_file, 'w') as f:
123
+ json.dump(episode, f, indent=2)
124
+
125
+ # Update keyword index
126
+ for keyword in keywords:
127
+ if keyword not in self.keyword_index:
128
+ self.keyword_index[keyword] = []
129
+ self.keyword_index[keyword].append(episode_id)
130
+
131
+ self._save_index()
132
+
133
+ return episode_id
134
+
135
+ def recall(
136
+ self,
137
+ task: Dict[str, Any],
138
+ top_k: int = 5,
139
+ min_importance: float = 0.0,
140
+ max_age_days: int = 30
141
+ ) -> List[Dict[str, Any]]:
142
+ """
143
+ Recall relevant episodes based on task similarity.
144
+
145
+ Args:
146
+ task: Current task
147
+ top_k: Maximum number of episodes to return
148
+ min_importance: Minimum importance threshold
149
+ max_age_days: Maximum age of episodes (days)
150
+
151
+ Returns:
152
+ List of relevant episodes with similarity scores
153
+ """
154
+ # Extract keywords from current task
155
+ task_keywords = self._extract_keywords(task.get("description", ""))
156
+
157
+ # Find episodes with keyword overlap
158
+ episode_scores = defaultdict(float)
159
+
160
+ for keyword in task_keywords:
161
+ if keyword in self.keyword_index:
162
+ for episode_id in self.keyword_index[keyword]:
163
+ episode_scores[episode_id] += 1.0
164
+
165
+ # Load and score episodes
166
+ scored_episodes = []
167
+ cutoff_date = datetime.now() - timedelta(days=max_age_days)
168
+
169
+ for episode_id, score in episode_scores.items():
170
+ # Load episode
171
+ episode_file = self.base_dir / f"{episode_id}.json"
172
+
173
+ if not episode_file.exists():
174
+ continue
175
+
176
+ with open(episode_file, 'r') as f:
177
+ episode = json.load(f)
178
+
179
+ # Check age
180
+ episode_date = datetime.fromisoformat(episode["timestamp"])
181
+ if episode_date < cutoff_date:
182
+ continue
183
+
184
+ # Check importance
185
+ if episode["importance"] < min_importance:
186
+ continue
187
+
188
+ # Calculate final score
189
+ # Keyword similarity (40%)
190
+ keyword_score = score / max(len(task_keywords), 1)
191
+
192
+ # Recency boost (20%) - more recent = higher score
193
+ days_old = (datetime.now() - episode_date).days
194
+ recency_score = max(0, 1 - days_old / 365) # Decays over 1 year
195
+
196
+ # Access frequency boost (20%)
197
+ access_score = min(episode["access_count"] / 100, 1.0)
198
+
199
+ # Importance boost (20%)
200
+ importance_score = episode["importance"]
201
+
202
+ total_score = (
203
+ keyword_score * 0.4 +
204
+ recency_score * 0.2 +
205
+ access_score * 0.2 +
206
+ importance_score * 0.2
207
+ )
208
+
209
+ scored_episodes.append({
210
+ "episode": episode,
211
+ "similarity": keyword_score,
212
+ "total_score": total_score
213
+ })
214
+
215
+ # Update access count
216
+ episode["access_count"] += 1
217
+ episode["last_accessed"] = datetime.now().isoformat()
218
+
219
+ # Save updated episode
220
+ with open(episode_file, 'w') as f:
221
+ json.dump(episode, f, indent=2)
222
+
223
+ # Sort by total score
224
+ scored_episodes.sort(key=lambda x: x["total_score"], reverse=True)
225
+
226
+ return scored_episodes[:top_k]
227
+
228
+ def get_episode(self, episode_id: str) -> Optional[Dict[str, Any]]:
229
+ """Get specific episode by ID"""
230
+ episode_file = self.base_dir / f"{episode_id}.json"
231
+
232
+ if not episode_file.exists():
233
+ return None
234
+
235
+ with open(episode_file, 'r') as f:
236
+ return json.load(f)
237
+
238
+ def _extract_keywords(self, text: str) -> set:
239
+ """
240
+ Extract keywords from text.
241
+
242
+ Removes common stop words and short words.
243
+ """
244
+ # Stop words
245
+ stop_words = {
246
+ "the", "a", "an", "and", "or", "but", "in", "on", "at", "to",
247
+ "for", "of", "with", "by", "from", "as", "is", "was", "are",
248
+ "been", "be", "have", "has", "had", "do", "does", "did", "will",
249
+ "would", "should", "could", "may", "might", "can", "this", "that"
250
+ }
251
+
252
+ # Extract words
253
+ words = re.findall(r'\w+', text.lower())
254
+
255
+ # Filter
256
+ keywords = {
257
+ w for w in words
258
+ if w not in stop words and len(w) > 2
259
+ }
260
+
261
+ return keywords
262
+
263
+ def get_stats(self) -> Dict[str, Any]:
264
+ """Get episodic memory statistics"""
265
+ total_episodes = len(list(self.base_dir.glob("*.json")))
266
+
267
+ # Calculate stats
268
+ total_importance = 0.0
269
+ total_tokens = 0
270
+ total_cost = 0.0
271
+
272
+ for episode_file in self.base_dir.glob("*.json"):
273
+ with open(episode_file, 'r') as f:
274
+ episode = json.load(f)
275
+
276
+ total_importance += episode.get("importance", 0)
277
+ total_tokens += episode["result"]["tokens_used"]
278
+ total_cost += episode["result"]["cost"]
279
+
280
+ return {
281
+ "total_episodes": total_episodes,
282
+ "total_keywords": len(self.keyword_index),
283
+ "avg_importance": total_importance / total_episodes if total_episodes > 0 else 0,
284
+ "total_tokens": total_tokens,
285
+ "total_cost": total_cost,
286
+ "avg_cost_per_episode": total_cost / total_episodes if total_episodes > 0 else 0
287
+ }
288
+
289
+ def cleanup_old_episodes(self, days_old: int = 90):
290
+ """
291
+ Remove episodes older than specified days.
292
+
293
+ Args:
294
+ days_old: Age threshold in days
295
+ """
296
+ cutoff_date = datetime.now() - timedelta(days=days_old)
297
+ removed_count = 0
298
+
299
+ for episode_file in self.base_dir.glob("*.json"):
300
+ with open(episode_file, 'r') as f:
301
+ episode = json.load(f)
302
+
303
+ episode_date = datetime.fromisoformat(episode["timestamp"])
304
+
305
+ if episode_date < cutoff_date:
306
+ # Remove from keyword index
307
+ for keyword, episode_list in self.keyword_index.items():
308
+ if episode["id"] in episode_list:
309
+ episode_list.remove(episode["id"])
310
+
311
+ # Delete file
312
+ episode_file.unlink()
313
+ removed_count += 1
314
+
315
+ # Save updated index
316
+ self._save_index()
317
+
318
+ print(f"Removed {removed_count} episodes older than {days_old} days")
319
+
320
+ def export_episodes(self, output_path: str, criteria: Optional[Dict] = None):
321
+ """
322
+ Export episodes to JSON file.
323
+
324
+ Args:
325
+ output_path: Path to output file
326
+ criteria: Optional filtering criteria
327
+ """
328
+ episodes = []
329
+
330
+ for episode_file in self.base_dir.glob("*.json"):
331
+ with open(episode_file, 'r') as f:
332
+ episode = json.load(f)
333
+
334
+ # Apply filters if provided
335
+ if criteria:
336
+ if "min_importance" in criteria:
337
+ if episode["importance"] < criteria["min_importance"]:
338
+ continue
339
+
340
+ if "provider" in criteria:
341
+ if episode["execution"]["provider"] != criteria["provider"]:
342
+ continue
343
+
344
+ episodes.append(episode)
345
+
346
+ # Write to output
347
+ output_path = Path(output_path)
348
+ output_path.parent.mkdir(parents=True, exist_ok=True)
349
+
350
+ with open(output_path, 'w') as f:
351
+ json.dump(episodes, f, indent=2)
352
+
353
+ print(f"Exported {len(episodes)} episodes to {output_path}")