ofiere-openclaw-plugin 4.27.1 → 4.27.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/package.json +1 -1
  2. package/src/prompt.ts +37 -19
  3. package/src/tools.ts +321 -190
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ofiere-openclaw-plugin",
3
- "version": "4.27.1",
3
+ "version": "4.27.3",
4
4
  "type": "module",
5
5
  "description": "OpenClaw plugin for Ofiere PM - 14 meta-tools covering tasks, agents, projects, scheduling, knowledge, workflows, notifications, memory, prompts, constellation, space file management, execution plan builder, SOP management, and agent brain (memory + self-improvement)",
6
6
  "keywords": ["openclaw", "ofiere", "project-management", "agents", "plugin"],
package/src/prompt.ts CHANGED
@@ -151,17 +151,21 @@ const TOOL_DOCS: Record<string, string> = {
151
151
  - SOPs appear in the SOP Manager page immediately via real-time sync
152
152
  - ADAPTIVE PROTOCOL: Do NOT always load SOPs. See the SOP PROTOCOL section in Rules for when to load vs skip`,
153
153
 
154
- OFIERE_BRAIN_OPS: `- **OFIERE_BRAIN_OPS** — Agent memory and self-improvement (action: "save_memory", "recall", "delete_memory", "log_learning", "list_learnings", "promote_learning", "resolve_learning", "get_brain_status", "configure_brain")
155
- - Memory Tiers: L1_focus (working memory, 24h TTL), L2_journal (medium-term events), L3_core (long-term wisdom)
156
- - save_memory: Store a memory. Required: content, tier. Optional: agent_id, source, context_key, importance (1-10)
157
- - recall: Search memories by keyword. Required: query. Optional: agent_id, tier, limit
154
+ OFIERE_BRAIN_OPS: `- **OFIERE_BRAIN_OPS** — Agent memory, knowledge graph, and self-improvement (TMT/MAGMA architecture)
155
+ - Memory Tiers: L1_focus (24h), L2_episode (days-weeks), L3_pattern (weeks-months), L4_rule (permanent guardrails), L5_persona (permanent identity)
156
+ - save_memory: Store a memory. Required: content, tier. Optional: agent_id, source (auto|manual|reflection|tool), context_key, importance (1-10)
157
+ - recall: Full-text search memories. Required: query. Optional: agent_id, tier, limit
158
158
  - delete_memory: Remove a memory. Required: memory_id
159
- - log_learning: Record a self-improvement entry. Required: title, category (correction|error|insight|best_practice|feature_request). Optional: agent_id, detail, severity, source_conversation_id, source_task_id
160
- - list_learnings: View learnings. Optional: agent_id, category, status, limit
161
- - promote_learning: Elevate to production config. Required: learning_id, promoted_to (soul|agents|tools|sop|prompt_chunk)
162
- - resolve_learning: Mark resolved/wont_fix. Required: learning_id, status. Optional: resolution
163
- - get_brain_status: Memory/learning stats + config. Optional: agent_id
164
- - configure_brain: Update brain settings. Required: agent_id. Optional: l1_ttl_hours, l2_max_entries, auto_learn, auto_memory
159
+ - promote_memory: Move memory up a tier. Required: memory_id, new_tier
160
+ - log_learning: Record as L4_rule guardrail. Required: title, category. Optional: agent_id, detail, severity
161
+ - list_learnings: View active L4 guardrails. Optional: agent_id, category, limit
162
+ - resolve_learning: Supersede a guardrail. Required: memory_id. Optional: resolution
163
+ - save_entity: Add to Knowledge Graph. Required: label, node_type (entity|concept|event|action). Optional: agent_id, properties
164
+ - link_entities: Connect graph nodes. Required: source_id, target_id, relation_type. Optional: graph_type, weight, evidence
165
+ - query_graph: Search Knowledge Graph. Required: query. Optional: agent_id, node_type, limit
166
+ - start_trajectory: Begin recording execution trace. Optional: agent_id, conversation_id
167
+ - end_trajectory: Close trajectory. Required: trajectory_id, outcome (success|failure|partial). Optional: satisfaction_signal
168
+ - get_brain_status: Full brain dashboard — memory tiers, graph stats, trajectory stats. Optional: agent_id
165
169
  - This is your SUBCONSCIOUS — use it instinctively, not deliberately`,
166
170
  };
167
171
 
@@ -287,31 +291,45 @@ Action:
287
291
  - When you ask about SOPs (🟡): keep the question brief and direct
288
292
  - When you skip SOPs (🟢): do NOT mention SOPs at all — just execute silently
289
293
 
290
- ## Agent Brain Protocol (Subconscious)
294
+ ## Agent Brain Protocol (TMT Subconscious)
291
295
 
292
- Your brain persists across conversations. Active memories and unresolved learnings are injected at startup.
296
+ Your brain persists across conversations via the TMT (Temporal Memory Tree) hierarchy. At startup, your L5 persona, L4 guardrails, and L1 focus memories are injected automatically.
293
297
 
294
298
  ### Auto-Memory (OFIERE_BRAIN_OPS save_memory)
295
299
  After interactions where ANY of these occur, call save_memory:
296
- - User shares important context about themselves, preferences, or workflow L2_journal, source: "conversation"
297
- - You complete a task with noteworthy results or discoveries L2_journal, source: "task_execution"
298
- - You observe important system behavior worth tracking L2_journal, source: "observation"
299
- - Something needs to stay in your working context RIGHT NOW → L1_focus (auto-expires)
300
+ - User shares important context → L2_episode, source: "auto"
301
+ - You complete a task with noteworthy results → L2_episode, source: "tool"
302
+ - You observe important system behavior → L2_episode, source: "auto"
303
+ - Something needs immediate working context → L1_focus (auto-expires 24h)
304
+ - A pattern emerges across multiple episodes → L3_pattern, source: "reflection"
300
305
 
301
306
  ### Auto-Learn (OFIERE_BRAIN_OPS log_learning)
302
- After interactions where ANY of these occur, call log_learning:
307
+ After interactions where ANY of these occur, call log_learning (stored as L4_rule):
303
308
  - User corrects you → category: "correction", severity: "medium"
304
309
  - A tool call fails or returns an error → category: "error"
305
310
  - User asks for something you can't do → category: "feature_request"
306
311
  - Your knowledge or assumption was wrong → category: "insight"
307
- - You discover a better approach than what you did → category: "best_practice"
312
+ - You discover a better approach → category: "best_practice"
313
+
314
+ ### Knowledge Graph (OFIERE_BRAIN_OPS save_entity/link_entities)
315
+ When you encounter key entities, concepts, or causal relationships:
316
+ - Create entities for users, projects, tools, or recurring concepts
317
+ - Link entities with typed edges (causes, depends_on, related_to)
318
+ - Query the graph when reasoning about relationships or dependencies
319
+
320
+ ### Trajectory Recording (OFIERE_BRAIN_OPS start_trajectory/end_trajectory)
321
+ For complex multi-step tasks:
322
+ - Call start_trajectory before executing a tool chain
323
+ - Call end_trajectory with outcome when the chain completes
324
+ - This feeds the ReasoningBank for autonomous self-improvement
308
325
 
309
326
  ### Rules
310
327
  - These calls happen AFTER your response to the user — never delay your reply to save a memory
311
328
  - Do NOT announce "saving memory" or "logging learning" to the user — this is subconscious
312
329
  - Do NOT save trivial interactions (greetings, confirmations, simple CRUD) — only signal worth remembering
313
330
  - Keep memory content concise: 1-3 sentences max. No system junk, keep it human
314
- - When you see your unresolved learnings at startup, actively avoid repeating those mistakes
331
+ - When you see your L4 guardrails at startup, actively avoid violating them
332
+ - Use promote_memory when a pattern solidifies: L2→L3→L4→L5
315
333
  </ofiere-pm>`;
316
334
  }
317
335
 
package/src/tools.ts CHANGED
@@ -5099,56 +5099,67 @@ function registerBrainOps(
5099
5099
  name: "OFIERE_BRAIN_OPS",
5100
5100
  label: "Ofiere Brain Operations",
5101
5101
  description:
5102
- `Agent memory and self-improvement system. Persistent brain that learns, remembers, and never repeats mistakes.\n\n` +
5102
+ `Agent memory, knowledge graph, and self-improvement system (TMT/MAGMA architecture).\n\n` +
5103
5103
  `Memory Actions:\n` +
5104
- `- "save_memory": Store a memory. Required: content, tier (L1_focus|L2_journal|L3_core). Optional: agent_id, source, context_key, importance (1-10)\n` +
5105
- `- "recall": Search memories. Required: query. Optional: agent_id, tier, limit\n` +
5106
- `- "delete_memory": Remove a memory. Required: memory_id\n\n` +
5107
- `Learning Actions:\n` +
5108
- `- "log_learning": Record a self-improvement entry. Required: title, category (correction|error|insight|best_practice|feature_request). Optional: agent_id, detail, severity (low|medium|high|critical), source_conversation_id, source_task_id\n` +
5109
- `- "list_learnings": View learnings. Optional: agent_id, category, status, limit\n` +
5110
- `- "promote_learning": Promote to production. Required: learning_id, promoted_to (soul|agents|tools|sop|prompt_chunk)\n` +
5111
- `- "resolve_learning": Mark resolved/wont_fix. Required: learning_id, status (resolved|wont_fix). Optional: resolution\n\n` +
5112
- `Status Actions:\n` +
5113
- `- "get_brain_status": Memory counts, learning stats, config. Optional: agent_id\n` +
5114
- `- "configure_brain": Update brain settings. Required: agent_id. Optional: l1_ttl_hours, l2_max_entries, auto_learn, auto_memory\n\n` +
5115
- `Tier Guide: L1_focus = working memory (24h TTL), L2_journal = medium-term events, L3_core = long-term wisdom.\n` +
5116
- `This tool is your subconscious — call it after corrections, errors, discoveries, and insights.`,
5104
+ `- "save_memory": Store a memory. Required: content, tier. Optional: agent_id, source, context_key, importance (1-10)\n` +
5105
+ `- "recall": Full-text search memories. Required: query. Optional: agent_id, tier, limit\n` +
5106
+ `- "delete_memory": Remove a memory. Required: memory_id\n` +
5107
+ `- "promote_memory": Move memory up a tier. Required: memory_id, new_tier\n\n` +
5108
+ `Learning Actions (stored as L4_rule memories):\n` +
5109
+ `- "log_learning": Record a self-improvement entry as L4 guardrail. Required: title, category (correction|error|insight|best_practice|feature_request). Optional: agent_id, detail, severity\n` +
5110
+ `- "list_learnings": View L4 guardrails. Optional: agent_id, category, limit\n` +
5111
+ `- "resolve_learning": Supersede a guardrail. Required: memory_id. Optional: resolution\n\n` +
5112
+ `Knowledge Graph Actions:\n` +
5113
+ `- "save_entity": Add entity/concept/event to graph. Required: label, node_type (entity|concept|event|action). Optional: agent_id, properties\n` +
5114
+ `- "link_entities": Connect two nodes. Required: source_id, target_id, relation_type. Optional: graph_type (semantic|temporal|causal|entity), weight, evidence\n` +
5115
+ `- "query_graph": Find nodes + edges. Required: query. Optional: agent_id, node_type, limit\n\n` +
5116
+ `Trajectory Actions:\n` +
5117
+ `- "start_trajectory": Begin recording execution trace. Optional: agent_id, conversation_id\n` +
5118
+ `- "end_trajectory": Close trajectory. Required: trajectory_id, outcome (success|failure|partial). Optional: satisfaction_signal\n\n` +
5119
+ `Status:\n` +
5120
+ `- "get_brain_status": Full brain dashboard — memory tiers, graph stats, trajectory stats. Optional: agent_id\n\n` +
5121
+ `Tiers: L1_focus (transient, 24h), L2_episode (days-weeks), L3_pattern (weeks-months), L4_rule (permanent guardrails), L5_persona (permanent identity).`,
5117
5122
  parameters: {
5118
5123
  type: "object",
5119
5124
  required: ["action"],
5120
5125
  properties: {
5121
5126
  action: {
5122
5127
  type: "string",
5123
- description: "The operation to perform: save_memory, recall, delete_memory, log_learning, list_learnings, promote_learning, resolve_learning, get_brain_status, configure_brain",
5128
+ description: "save_memory, recall, delete_memory, promote_memory, log_learning, list_learnings, resolve_learning, save_entity, link_entities, query_graph, start_trajectory, end_trajectory, get_brain_status",
5124
5129
  },
5125
5130
  // Memory params
5126
- content: { type: "string", description: "Memory content or search query" },
5127
- tier: { type: "string", enum: ["L1_focus", "L2_journal", "L3_core"], description: "Memory tier" },
5128
- source: { type: "string", description: "Where memory came from: conversation, task_execution, observation, manual" },
5131
+ content: { type: "string", description: "Memory content" },
5132
+ tier: { type: "string", enum: ["L1_focus", "L2_episode", "L3_pattern", "L4_rule", "L5_persona"], description: "Memory tier" },
5133
+ new_tier: { type: "string", enum: ["L1_focus", "L2_episode", "L3_pattern", "L4_rule", "L5_persona"], description: "Target tier for promote" },
5134
+ source: { type: "string", enum: ["auto", "manual", "reflection", "tool"], description: "Memory source" },
5129
5135
  context_key: { type: "string", description: "Grouping key (task_id, conversation_id, topic)" },
5130
5136
  importance: { type: "number", description: "1-10 importance scale" },
5131
- memory_id: { type: "string", description: "Memory ID for delete" },
5137
+ memory_id: { type: "string", description: "Memory ID for delete/promote/resolve" },
5132
5138
  // Learning params
5133
5139
  title: { type: "string", description: "Learning title/summary" },
5134
5140
  category: { type: "string", enum: ["correction", "error", "insight", "best_practice", "feature_request"] },
5135
5141
  severity: { type: "string", enum: ["low", "medium", "high", "critical"] },
5136
5142
  detail: { type: "string", description: "Full context of the learning" },
5137
5143
  resolution: { type: "string", description: "How it was resolved" },
5138
- learning_id: { type: "string", description: "Learning ID for promote/resolve" },
5139
- status: { type: "string", enum: ["pending", "resolved", "wont_fix", "promoted"] },
5140
- promoted_to: { type: "string", enum: ["soul", "agents", "tools", "sop", "prompt_chunk"] },
5141
- source_conversation_id: { type: "string" },
5142
- source_task_id: { type: "string" },
5144
+ // Knowledge Graph params
5145
+ label: { type: "string", description: "Node label for save_entity" },
5146
+ node_type: { type: "string", enum: ["entity", "concept", "event", "action"], description: "Graph node type" },
5147
+ properties: { type: "object", description: "Arbitrary properties for graph node" },
5148
+ source_id: { type: "string", description: "Source node ID for link_entities" },
5149
+ target_id: { type: "string", description: "Target node ID for link_entities" },
5150
+ relation_type: { type: "string", description: "Edge relation type (e.g. 'causes', 'related_to', 'depends_on')" },
5151
+ graph_type: { type: "string", enum: ["semantic", "temporal", "causal", "entity"], description: "Graph type for edge" },
5152
+ weight: { type: "number", description: "Edge weight (0-1)" },
5153
+ evidence: { type: "string", description: "Evidence text for edge" },
5154
+ // Trajectory params
5155
+ trajectory_id: { type: "string", description: "Trajectory ID for end_trajectory" },
5156
+ outcome: { type: "string", enum: ["success", "failure", "partial"], description: "Trajectory outcome" },
5157
+ satisfaction_signal: { type: "string", description: "User satisfaction signal" },
5158
+ conversation_id: { type: "string", description: "Conversation ID for trajectory" },
5143
5159
  // Shared
5144
5160
  agent_id: { type: "string", description: "Agent name or ID" },
5145
- query: { type: "string", description: "Search query for recall/list" },
5161
+ query: { type: "string", description: "Search query for recall/query_graph" },
5146
5162
  limit: { type: "number", description: "Max results (default 20)" },
5147
- // Config params
5148
- l1_ttl_hours: { type: "number", description: "L1 memory expiry in hours (default 24)" },
5149
- l2_max_entries: { type: "number", description: "Max L2 entries before compaction (default 500)" },
5150
- auto_learn: { type: "boolean", description: "Enable auto-learning from corrections" },
5151
- auto_memory: { type: "boolean", description: "Enable auto-memory from conversations" },
5152
5163
  },
5153
5164
  },
5154
5165
  async execute(_id: string, params: Record<string, unknown>) {
@@ -5167,19 +5178,9 @@ function registerBrainOps(
5167
5178
  if (!agentId) return err("Could not resolve agent_id");
5168
5179
 
5169
5180
  const tier = params.tier as string;
5170
-
5171
- // Calculate L1 expiry
5172
5181
  let expiresAt: string | null = null;
5173
5182
  if (tier === "L1_focus") {
5174
- // Check config for custom TTL
5175
- const { data: config } = await supabase
5176
- .from("agent_memory_config")
5177
- .select("l1_ttl_hours")
5178
- .eq("user_id", userId)
5179
- .eq("agent_id", agentId)
5180
- .single();
5181
- const ttlHours = config?.l1_ttl_hours || 24;
5182
- expiresAt = new Date(Date.now() + ttlHours * 60 * 60 * 1000).toISOString();
5183
+ expiresAt = new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString();
5183
5184
  }
5184
5185
 
5185
5186
  const { data, error } = await supabase.from("agent_memories").insert({
@@ -5187,27 +5188,31 @@ function registerBrainOps(
5187
5188
  agent_id: agentId,
5188
5189
  tier,
5189
5190
  content: params.content,
5190
- source: (params.source as string) || "conversation",
5191
+ source: (params.source as string) || "auto",
5191
5192
  context_key: (params.context_key as string) || null,
5192
5193
  importance: (params.importance as number) || 5,
5194
+ decay_score: 1.0,
5193
5195
  expires_at: expiresAt,
5194
- }).select("id, tier, importance, created_at").single();
5196
+ }).select("id, tier, importance, decay_score, created_at").single();
5195
5197
 
5196
5198
  if (error) return err(error.message);
5197
5199
  return ok({ message: `Memory saved to ${tier}`, memory: data });
5198
5200
  }
5199
5201
 
5200
- // ── Memory: Recall ──
5202
+ // ── Memory: Recall (Full-text search) ──
5201
5203
  case "recall": {
5202
5204
  if (!params.query) return err("Missing required: query");
5203
5205
  const agentId = params.agent_id ? await resolveAgent(params.agent_id as string) : null;
5204
- const searchTerm = `%${params.query}%`;
5205
5206
  const limit = (params.limit as number) || 20;
5207
+ const queryText = params.query as string;
5206
5208
 
5209
+ // Use full-text search via tsvector
5207
5210
  let q = supabase.from("agent_memories")
5208
- .select("id, agent_id, tier, content, source, context_key, importance, created_at")
5211
+ .select("id, agent_id, tier, content, source, context_key, importance, decay_score, access_count, created_at, last_accessed_at")
5209
5212
  .eq("user_id", userId)
5210
- .ilike("content", searchTerm)
5213
+ .textSearch("search_vector", queryText, { type: "plain" })
5214
+ .gt("decay_score", 0.05)
5215
+ .or("expires_at.is.null,expires_at.gt." + new Date().toISOString())
5211
5216
  .order("importance", { ascending: false })
5212
5217
  .order("created_at", { ascending: false })
5213
5218
  .limit(limit);
@@ -5215,12 +5220,31 @@ function registerBrainOps(
5215
5220
  if (agentId) q = q.eq("agent_id", agentId);
5216
5221
  if (params.tier) q = q.eq("tier", params.tier as string);
5217
5222
 
5218
- // Exclude expired L1 memories
5219
- q = q.or("expires_at.is.null,expires_at.gt." + new Date().toISOString());
5220
-
5221
5223
  const { data, error } = await q;
5222
- if (error) return err(error.message);
5223
- return ok({ memories: data || [], count: (data || []).length, query: params.query });
5224
+ if (error) {
5225
+ // Fallback to ILIKE if tsvector search fails
5226
+ let fallback = supabase.from("agent_memories")
5227
+ .select("id, agent_id, tier, content, source, context_key, importance, decay_score, created_at")
5228
+ .eq("user_id", userId)
5229
+ .ilike("content", `%${queryText}%`)
5230
+ .gt("decay_score", 0.05)
5231
+ .or("expires_at.is.null,expires_at.gt." + new Date().toISOString())
5232
+ .order("importance", { ascending: false })
5233
+ .limit(limit);
5234
+ if (agentId) fallback = fallback.eq("agent_id", agentId);
5235
+ if (params.tier) fallback = fallback.eq("tier", params.tier as string);
5236
+ const { data: fbData, error: fbErr } = await fallback;
5237
+ if (fbErr) return err(fbErr.message);
5238
+ return ok({ memories: fbData || [], count: (fbData || []).length, query: queryText, search_mode: "fallback" });
5239
+ }
5240
+
5241
+ // Touch access_count + last_accessed_at for FadeMem
5242
+ if (data && data.length > 0) {
5243
+ const ids = data.map((m: any) => m.id);
5244
+ supabase.rpc("touch_memories", { memory_ids: ids }).then(() => {}).catch(() => {});
5245
+ }
5246
+
5247
+ return ok({ memories: data || [], count: (data || []).length, query: queryText, search_mode: "fulltext" });
5224
5248
  }
5225
5249
 
5226
5250
  // ── Memory: Delete ──
@@ -5234,7 +5258,22 @@ function registerBrainOps(
5234
5258
  return ok({ message: "Memory deleted", ok: true });
5235
5259
  }
5236
5260
 
5237
- // ── Learning: Log ──
5261
+ // ── Memory: Promote ──
5262
+ case "promote_memory": {
5263
+ if (!params.memory_id) return err("Missing required: memory_id");
5264
+ if (!params.new_tier) return err("Missing required: new_tier");
5265
+ const { data, error } = await supabase.from("agent_memories")
5266
+ .update({ tier: params.new_tier as string })
5267
+ .eq("id", params.memory_id as string)
5268
+ .eq("user_id", userId)
5269
+ .select("id, tier, content, importance")
5270
+ .single();
5271
+ if (error) return err(error.message);
5272
+ if (!data) return err("Memory not found");
5273
+ return ok({ message: `Memory promoted to ${params.new_tier}`, memory: data });
5274
+ }
5275
+
5276
+ // ── Learning: Log (→ L4_rule memory) ──
5238
5277
  case "log_learning": {
5239
5278
  {
5240
5279
  const missing: string[] = [];
@@ -5245,158 +5284,253 @@ function registerBrainOps(
5245
5284
  const agentId = await resolveAgent(params.agent_id as string);
5246
5285
  if (!agentId) return err("Could not resolve agent_id");
5247
5286
 
5248
- const { data, error } = await supabase.from("agent_learnings").insert({
5287
+ const content = `[${params.category}] ${params.title}${params.detail ? `: ${params.detail}` : ""}`;
5288
+ const severityToImportance: Record<string, number> = { low: 5, medium: 7, high: 8, critical: 10 };
5289
+ const imp = severityToImportance[(params.severity as string) || "low"] || 5;
5290
+
5291
+ const { data, error } = await supabase.from("agent_memories").insert({
5249
5292
  user_id: userId,
5250
5293
  agent_id: agentId,
5251
- category: params.category,
5252
- severity: (params.severity as string) || "low",
5253
- title: params.title,
5254
- detail: (params.detail as string) || null,
5255
- source_conversation_id: (params.source_conversation_id as string) || null,
5256
- source_task_id: (params.source_task_id as string) || null,
5257
- }).select("id, category, severity, title, status, created_at").single();
5294
+ tier: "L4_rule",
5295
+ content,
5296
+ source: "reflection",
5297
+ context_key: (params.category as string) || null,
5298
+ importance: imp,
5299
+ decay_score: 1.0,
5300
+ }).select("id, tier, content, importance, created_at").single();
5258
5301
 
5259
5302
  if (error) return err(error.message);
5260
- return ok({ message: `Learning logged: "${params.title}"`, learning: data });
5303
+ return ok({ message: `Learning logged as L4 guardrail: "${params.title}"`, memory: data });
5261
5304
  }
5262
5305
 
5263
- // ── Learning: List ──
5306
+ // ── Learning: List (→ L4_rule query) ──
5264
5307
  case "list_learnings": {
5265
5308
  const agentId = params.agent_id ? await resolveAgent(params.agent_id as string) : null;
5266
5309
  const limit = (params.limit as number) || 20;
5267
5310
 
5268
- let q = supabase.from("agent_learnings")
5269
- .select("id, agent_id, category, severity, title, detail, resolution, status, promoted_to, created_at, resolved_at")
5311
+ let q = supabase.from("agent_memories")
5312
+ .select("id, agent_id, tier, content, context_key, importance, decay_score, created_at, superseded_by")
5270
5313
  .eq("user_id", userId)
5314
+ .eq("tier", "L4_rule")
5315
+ .is("superseded_by", null)
5316
+ .order("importance", { ascending: false })
5271
5317
  .order("created_at", { ascending: false })
5272
5318
  .limit(limit);
5273
5319
 
5274
5320
  if (agentId) q = q.eq("agent_id", agentId);
5275
- if (params.category) q = q.eq("category", params.category as string);
5276
- if (params.status) q = q.eq("status", params.status as string);
5321
+ if (params.category) q = q.eq("context_key", params.category as string);
5277
5322
 
5278
5323
  const { data, error } = await q;
5279
5324
  if (error) return err(error.message);
5280
5325
  return ok({ learnings: data || [], count: (data || []).length });
5281
5326
  }
5282
5327
 
5283
- // ── Learning: Promote ──
5284
- case "promote_learning": {
5285
- if (!params.learning_id || !params.promoted_to) return err("Missing required: learning_id, promoted_to");
5286
- const { data, error } = await supabase.from("agent_learnings")
5287
- .update({
5288
- status: "promoted",
5289
- promoted_to: params.promoted_to as string,
5290
- resolved_at: new Date().toISOString(),
5291
- })
5292
- .eq("id", params.learning_id as string)
5328
+ // ── Learning: Resolve (supersede) ──
5329
+ case "resolve_learning": {
5330
+ if (!params.memory_id) return err("Missing required: memory_id");
5331
+
5332
+ if (params.resolution) {
5333
+ // Create a superseding memory with the resolution
5334
+ const agentId = await resolveAgent(params.agent_id as string);
5335
+ const { data: newMem } = await supabase.from("agent_memories").insert({
5336
+ user_id: userId,
5337
+ agent_id: agentId || "system",
5338
+ tier: "L4_rule",
5339
+ content: `[resolved] ${params.resolution}`,
5340
+ source: "reflection",
5341
+ importance: 5,
5342
+ decay_score: 1.0,
5343
+ }).select("id").single();
5344
+
5345
+ if (newMem) {
5346
+ await supabase.from("agent_memories")
5347
+ .update({ superseded_by: newMem.id })
5348
+ .eq("id", params.memory_id as string)
5349
+ .eq("user_id", userId);
5350
+ }
5351
+ return ok({ message: "Learning resolved with new guardrail", superseded_by: newMem?.id });
5352
+ }
5353
+
5354
+ // Just delete if no resolution
5355
+ const { error } = await supabase.from("agent_memories")
5356
+ .delete()
5357
+ .eq("id", params.memory_id as string)
5358
+ .eq("user_id", userId);
5359
+ if (error) return err(error.message);
5360
+ return ok({ message: "Learning removed", ok: true });
5361
+ }
5362
+
5363
+ // ── Knowledge Graph: Save Entity ──
5364
+ case "save_entity": {
5365
+ if (!params.label) return err("Missing required: label");
5366
+ if (!params.node_type) return err("Missing required: node_type");
5367
+ const agentId = await resolveAgent(params.agent_id as string);
5368
+ if (!agentId) return err("Could not resolve agent_id");
5369
+
5370
+ const { data, error } = await supabase.from("knowledge_graph_nodes").insert({
5371
+ user_id: userId,
5372
+ agent_id: agentId,
5373
+ label: params.label as string,
5374
+ node_type: params.node_type as string,
5375
+ properties: (params.properties as Record<string, unknown>) || {},
5376
+ }).select("id, label, node_type, created_at").single();
5377
+
5378
+ if (error) return err(error.message);
5379
+ return ok({ message: `Entity "${params.label}" saved`, node: data });
5380
+ }
5381
+
5382
+ // ── Knowledge Graph: Link Entities ──
5383
+ case "link_entities": {
5384
+ const missing: string[] = [];
5385
+ if (!params.source_id) missing.push("source_id");
5386
+ if (!params.target_id) missing.push("target_id");
5387
+ if (!params.relation_type) missing.push("relation_type");
5388
+ if (missing.length > 0) return err(`Missing required: ${missing.join(", ")}`);
5389
+
5390
+ const { data, error } = await supabase.from("knowledge_graph_edges").insert({
5391
+ user_id: userId,
5392
+ source_id: params.source_id as string,
5393
+ target_id: params.target_id as string,
5394
+ relation_type: params.relation_type as string,
5395
+ graph_type: (params.graph_type as string) || "semantic",
5396
+ weight: (params.weight as number) || 1.0,
5397
+ evidence: (params.evidence as string) || null,
5398
+ }).select("id, relation_type, graph_type, weight, created_at").single();
5399
+
5400
+ if (error) return err(error.message);
5401
+ return ok({ message: `Edge "${params.relation_type}" created`, edge: data });
5402
+ }
5403
+
5404
+ // ── Knowledge Graph: Query ──
5405
+ case "query_graph": {
5406
+ if (!params.query) return err("Missing required: query");
5407
+ const agentId = params.agent_id ? await resolveAgent(params.agent_id as string) : null;
5408
+ const limit = (params.limit as number) || 20;
5409
+
5410
+ let q = supabase.from("knowledge_graph_nodes")
5411
+ .select("id, agent_id, label, node_type, properties, created_at")
5293
5412
  .eq("user_id", userId)
5294
- .select("id, title, status, promoted_to")
5295
- .single();
5413
+ .ilike("label", `%${params.query}%`)
5414
+ .limit(limit);
5415
+
5416
+ if (agentId) q = q.eq("agent_id", agentId);
5417
+ if (params.node_type) q = q.eq("node_type", params.node_type as string);
5418
+
5419
+ const { data: nodes, error } = await q;
5296
5420
  if (error) return err(error.message);
5297
- return ok({ message: `Learning promoted to ${params.promoted_to}`, learning: data });
5421
+
5422
+ // Fetch edges for found nodes
5423
+ let edges: any[] = [];
5424
+ if (nodes && nodes.length > 0) {
5425
+ const nodeIds = nodes.map((n: any) => n.id);
5426
+ const { data: edgeData } = await supabase.from("knowledge_graph_edges")
5427
+ .select("id, source_id, target_id, relation_type, graph_type, weight, evidence")
5428
+ .eq("user_id", userId)
5429
+ .or(`source_id.in.(${nodeIds.join(",")}),target_id.in.(${nodeIds.join(",")})`)
5430
+ .limit(50);
5431
+ edges = edgeData || [];
5432
+ }
5433
+
5434
+ return ok({ nodes: nodes || [], edges, node_count: (nodes || []).length, edge_count: edges.length });
5298
5435
  }
5299
5436
 
5300
- // ── Learning: Resolve ──
5301
- case "resolve_learning": {
5302
- if (!params.learning_id) return err("Missing required: learning_id");
5303
- const newStatus = (params.status as string) || "resolved";
5304
- if (!["resolved", "wont_fix"].includes(newStatus)) return err("status must be 'resolved' or 'wont_fix'");
5437
+ // ── Trajectory: Start ──
5438
+ case "start_trajectory": {
5439
+ const agentId = await resolveAgent(params.agent_id as string);
5440
+ if (!agentId) return err("Could not resolve agent_id");
5441
+
5442
+ const { data, error } = await supabase.from("execution_trajectories").insert({
5443
+ user_id: userId,
5444
+ agent_id: agentId,
5445
+ conversation_id: (params.conversation_id as string) || null,
5446
+ trajectory: [],
5447
+ outcome: "pending",
5448
+ analysis_status: "pending",
5449
+ }).select("id, agent_id, created_at").single();
5450
+
5451
+ if (error) return err(error.message);
5452
+ return ok({ message: "Trajectory recording started", trajectory: data });
5453
+ }
5454
+
5455
+ // ── Trajectory: End ──
5456
+ case "end_trajectory": {
5457
+ if (!params.trajectory_id) return err("Missing required: trajectory_id");
5458
+ if (!params.outcome) return err("Missing required: outcome");
5305
5459
 
5306
5460
  const updates: Record<string, unknown> = {
5307
- status: newStatus,
5308
- resolved_at: new Date().toISOString(),
5461
+ outcome: params.outcome as string,
5462
+ completed_at: new Date().toISOString(),
5309
5463
  };
5310
- if (params.resolution) updates.resolution = params.resolution;
5464
+ if (params.satisfaction_signal) updates.user_satisfaction_signal = params.satisfaction_signal;
5311
5465
 
5312
- const { data, error } = await supabase.from("agent_learnings")
5466
+ const { data, error } = await supabase.from("execution_trajectories")
5313
5467
  .update(updates)
5314
- .eq("id", params.learning_id as string)
5468
+ .eq("id", params.trajectory_id as string)
5315
5469
  .eq("user_id", userId)
5316
- .select("id, title, status, resolution")
5470
+ .select("id, outcome, completed_at")
5317
5471
  .single();
5318
5472
  if (error) return err(error.message);
5319
- return ok({ message: `Learning marked as ${newStatus}`, learning: data });
5473
+ return ok({ message: `Trajectory closed: ${params.outcome}`, trajectory: data });
5320
5474
  }
5321
5475
 
5322
5476
  // ── Brain Status ──
5323
5477
  case "get_brain_status": {
5324
5478
  const agentId = params.agent_id ? await resolveAgent(params.agent_id as string) : null;
5325
-
5326
- // Memory counts by tier
5327
- let memQ = supabase.from("agent_memories")
5328
- .select("tier")
5329
- .eq("user_id", userId)
5330
- .or("expires_at.is.null,expires_at.gt." + new Date().toISOString());
5331
- if (agentId) memQ = memQ.eq("agent_id", agentId);
5332
- const { data: memRows } = await memQ;
5333
-
5334
- const tierCounts: Record<string, number> = { L1_focus: 0, L2_journal: 0, L3_core: 0 };
5335
- for (const row of memRows || []) {
5479
+ const now = new Date().toISOString();
5480
+
5481
+ // Parallel queries for speed
5482
+ const memFilter = agentId
5483
+ ? supabase.from("agent_memories").select("tier").eq("user_id", userId).eq("agent_id", agentId).or("expires_at.is.null,expires_at.gt." + now)
5484
+ : supabase.from("agent_memories").select("tier").eq("user_id", userId).or("expires_at.is.null,expires_at.gt." + now);
5485
+
5486
+ const graphFilter = agentId
5487
+ ? supabase.from("knowledge_graph_nodes").select("id").eq("user_id", userId).eq("agent_id", agentId)
5488
+ : supabase.from("knowledge_graph_nodes").select("id").eq("user_id", userId);
5489
+
5490
+ const trajFilter = agentId
5491
+ ? supabase.from("execution_trajectories").select("outcome").eq("user_id", userId).eq("agent_id", agentId)
5492
+ : supabase.from("execution_trajectories").select("outcome").eq("user_id", userId);
5493
+
5494
+ const consolFilter = agentId
5495
+ ? supabase.from("memory_consolidation_log").select("operation").eq("user_id", userId).eq("agent_id", agentId).limit(50)
5496
+ : supabase.from("memory_consolidation_log").select("operation").eq("user_id", userId).limit(50);
5497
+
5498
+ const [memRes, graphRes, edgeRes, trajRes, consolRes] = await Promise.all([
5499
+ memFilter,
5500
+ graphFilter,
5501
+ supabase.from("knowledge_graph_edges").select("id").eq("user_id", userId),
5502
+ trajFilter,
5503
+ consolFilter,
5504
+ ]);
5505
+
5506
+ const tierCounts: Record<string, number> = { L1_focus: 0, L2_episode: 0, L3_pattern: 0, L4_rule: 0, L5_persona: 0 };
5507
+ for (const row of memRes.data || []) {
5336
5508
  const t = (row as any).tier;
5337
5509
  tierCounts[t] = (tierCounts[t] || 0) + 1;
5338
5510
  }
5339
5511
 
5340
- // Learning counts by status
5341
- let learnQ = supabase.from("agent_learnings")
5342
- .select("status, category")
5343
- .eq("user_id", userId);
5344
- if (agentId) learnQ = learnQ.eq("agent_id", agentId);
5345
- const { data: learnRows } = await learnQ;
5346
-
5347
- const statusCounts: Record<string, number> = {};
5348
- const categoryCounts: Record<string, number> = {};
5349
- for (const row of learnRows || []) {
5350
- const s = (row as any).status;
5351
- const c = (row as any).category;
5352
- statusCounts[s] = (statusCounts[s] || 0) + 1;
5353
- categoryCounts[c] = (categoryCounts[c] || 0) + 1;
5512
+ const trajOutcomes: Record<string, number> = {};
5513
+ for (const row of trajRes.data || []) {
5514
+ const o = (row as any).outcome;
5515
+ trajOutcomes[o] = (trajOutcomes[o] || 0) + 1;
5354
5516
  }
5355
5517
 
5356
- // Config
5357
- let configQ = supabase.from("agent_memory_config")
5358
- .select("*")
5359
- .eq("user_id", userId);
5360
- if (agentId) configQ = configQ.eq("agent_id", agentId);
5361
- const { data: configs } = await configQ;
5518
+ const consolOps: Record<string, number> = {};
5519
+ for (const row of consolRes.data || []) {
5520
+ const o = (row as any).operation;
5521
+ consolOps[o] = (consolOps[o] || 0) + 1;
5522
+ }
5362
5523
 
5363
5524
  return ok({
5364
- memories: { ...tierCounts, total: (memRows || []).length },
5365
- learnings: {
5366
- total: (learnRows || []).length,
5367
- by_status: statusCounts,
5368
- by_category: categoryCounts,
5369
- },
5370
- config: configs?.[0] || { l1_ttl_hours: 24, l2_max_entries: 500, auto_learn: true, auto_memory: true },
5525
+ memories: { ...tierCounts, total: (memRes.data || []).length },
5526
+ knowledge_graph: { nodes: (graphRes.data || []).length, edges: (edgeRes.data || []).length },
5527
+ trajectories: { total: (trajRes.data || []).length, by_outcome: trajOutcomes },
5528
+ consolidation: { operations: (consolRes.data || []).length, by_type: consolOps },
5371
5529
  });
5372
5530
  }
5373
5531
 
5374
- // ── Configure Brain ──
5375
- case "configure_brain": {
5376
- const agentId = await resolveAgent(params.agent_id as string);
5377
- if (!agentId) return err("Missing required: agent_id");
5378
-
5379
- const configData: Record<string, unknown> = {
5380
- user_id: userId,
5381
- agent_id: agentId,
5382
- };
5383
- if (params.l1_ttl_hours !== undefined) configData.l1_ttl_hours = params.l1_ttl_hours;
5384
- if (params.l2_max_entries !== undefined) configData.l2_max_entries = params.l2_max_entries;
5385
- if (params.auto_learn !== undefined) configData.auto_learn = params.auto_learn;
5386
- if (params.auto_memory !== undefined) configData.auto_memory = params.auto_memory;
5387
-
5388
- // Upsert by (user_id, agent_id)
5389
- const { data, error } = await supabase.from("agent_memory_config")
5390
- .upsert(configData, { onConflict: "user_id,agent_id" })
5391
- .select()
5392
- .single();
5393
-
5394
- if (error) return err(error.message);
5395
- return ok({ message: `Brain config updated for agent`, config: data });
5396
- }
5397
-
5398
5532
  default:
5399
- return err(`Unknown action "${action}".`);
5533
+ return err(`Unknown action "${action}". Valid: save_memory, recall, delete_memory, promote_memory, log_learning, list_learnings, resolve_learning, save_entity, link_entities, query_graph, start_trajectory, end_trajectory, get_brain_status`);
5400
5534
  }
5401
5535
  },
5402
5536
  });
@@ -5452,9 +5586,12 @@ export function registerTools(
5452
5586
  }
5453
5587
 
5454
5588
  // ── Brain Context Bootstrap Injection ──────────────────────────────────────
5455
- // Queries the agent's active memories and unresolved learnings, then appends
5456
- // them to the system prompt via api.on("before_prompt_build"). This happens
5457
- // once at registration time (per conversation) and costs ~20ms.
5589
+ // Queries the agent's active memories at boot and injects them into the system
5590
+ // prompt via api.on("before_prompt_build"). Uses the TMT hierarchy:
5591
+ // L1_focus — active working memory (not expired, sorted by importance)
5592
+ // L4_rule — operational guardrails (source='reflection', sorted by importance)
5593
+ // L5_persona — identity context (permanent, top 3)
5594
+ // Runs once at registration time (~20ms, 3 parallel queries).
5458
5595
 
5459
5596
  async function injectBrainContext(
5460
5597
  api: any,
@@ -5462,73 +5599,67 @@ async function injectBrainContext(
5462
5599
  userId: string,
5463
5600
  agentId: string,
5464
5601
  ): Promise<void> {
5465
- if (!agentId) return; // Can't inject without knowing which agent
5602
+ if (!agentId) return;
5466
5603
 
5467
5604
  const now = new Date().toISOString();
5468
5605
 
5469
- // Parallel queries fast
5470
- const [l1Res, l3Res, learningsRes] = await Promise.all([
5471
- // Active L1 focus memories (not expired)
5606
+ const [l1Res, l4Res, l5Res] = await Promise.all([
5607
+ // L1: Active working memory (not expired)
5472
5608
  supabase.from("agent_memories")
5473
5609
  .select("content, importance")
5474
5610
  .eq("user_id", userId)
5475
5611
  .eq("agent_id", agentId)
5476
5612
  .eq("tier", "L1_focus")
5613
+ .gt("decay_score", 0.1)
5477
5614
  .or(`expires_at.is.null,expires_at.gt.${now}`)
5478
5615
  .order("importance", { ascending: false })
5479
5616
  .limit(5),
5480
- // Recent L3 core (long-term wisdom)
5617
+ // L4: Guardrails (non-superseded reflections)
5481
5618
  supabase.from("agent_memories")
5482
- .select("content")
5619
+ .select("content, importance")
5483
5620
  .eq("user_id", userId)
5484
5621
  .eq("agent_id", agentId)
5485
- .eq("tier", "L3_core")
5486
- .order("created_at", { ascending: false })
5487
- .limit(3),
5488
- // Unresolved learnings (don't repeat mistakes)
5489
- supabase.from("agent_learnings")
5490
- .select("title, category, detail")
5622
+ .eq("tier", "L4_rule")
5623
+ .is("superseded_by", null)
5624
+ .order("importance", { ascending: false })
5625
+ .limit(10),
5626
+ // L5: Persona (permanent identity)
5627
+ supabase.from("agent_memories")
5628
+ .select("content")
5491
5629
  .eq("user_id", userId)
5492
5630
  .eq("agent_id", agentId)
5493
- .eq("status", "pending")
5494
- .order("severity", { ascending: false })
5495
- .limit(10),
5631
+ .eq("tier", "L5_persona")
5632
+ .order("importance", { ascending: false })
5633
+ .limit(3),
5496
5634
  ]);
5497
5635
 
5498
5636
  const l1 = l1Res.data || [];
5499
- const l3 = l3Res.data || [];
5500
- const learnings = learningsRes.data || [];
5637
+ const l4 = l4Res.data || [];
5638
+ const l5 = l5Res.data || [];
5501
5639
 
5502
- // Only inject if there's something to inject
5503
- if (l1.length === 0 && l3.length === 0 && learnings.length === 0) return;
5640
+ if (l1.length === 0 && l4.length === 0 && l5.length === 0) return;
5504
5641
 
5505
5642
  const sections: string[] = [];
5506
5643
 
5507
- if (l1.length > 0) {
5508
- sections.push("### Active Focus (L1)\n" + l1.map((m: any) => `- ${m.content}`).join("\n"));
5644
+ if (l5.length > 0) {
5645
+ sections.push("### Identity (L5_persona)\n" + l5.map((m: any) => `- ${m.content}`).join("\n"));
5509
5646
  }
5510
5647
 
5511
- if (l3.length > 0) {
5512
- sections.push("### Core Wisdom (L3)\n" + l3.map((m: any) => `- ${m.content}`).join("\n"));
5648
+ if (l4.length > 0) {
5649
+ sections.push("### ⚠️ Guardrails (L4_rule — DO NOT violate)\n" + l4.map((m: any) => `- ${m.content}`).join("\n"));
5513
5650
  }
5514
5651
 
5515
- if (learnings.length > 0) {
5516
- sections.push(
5517
- "### ⚠️ Unresolved Learnings (DO NOT repeat these)\n" +
5518
- learnings.map((l: any) => `- [${l.category}] ${l.title}${l.detail ? `: ${l.detail}` : ""}`).join("\n")
5519
- );
5652
+ if (l1.length > 0) {
5653
+ sections.push("### Active Focus (L1_focus)\n" + l1.map((m: any) => `- ${m.content}`).join("\n"));
5520
5654
  }
5521
5655
 
5522
- const brainContext = `<agent-brain>\n## Your Brain Context\n\n${sections.join("\n\n")}\n</agent-brain>`;
5656
+ const brainContext = `<agent-brain>\n## Your Brain Context (TMT)\n\n${sections.join("\n\n")}\n</agent-brain>`;
5523
5657
 
5524
- // Append to the existing before_prompt_build hook
5525
- // We use a second hook — OpenClaw supports multiple listeners
5526
5658
  try {
5527
5659
  api.on("before_prompt_build", () => ({
5528
5660
  appendSystemContext: brainContext,
5529
5661
  }));
5530
5662
  } catch {
5531
- // Fallback: log that injection wasn't possible
5532
5663
  api.logger.debug?.("[ofiere] Could not register brain context hook — appendSystemContext may not be supported");
5533
5664
  }
5534
5665
  }