@byte5ai/palaia 2.3.6 → 2.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -144,7 +144,10 @@ function resolveExtensionAPIPath(): string | null {
144
144
 
145
145
  // Strategy 3: Sibling in global node_modules (plugin installed alongside openclaw)
146
146
  try {
147
- const thisFile = typeof __dirname !== "undefined" ? __dirname : path.dirname(new URL(import.meta.url).pathname);
147
+ // __dirname is always available in CJS (our tsconfig module); the ESM
148
+ // fallback via import.meta.url is kept for jiti/ESM loaders at runtime.
149
+ // @ts-expect-error import.meta is valid at runtime under jiti/ESM but TS module=commonjs rejects it
150
+ const thisFile: string = typeof __dirname !== "undefined" ? __dirname : path.dirname(new URL(import.meta.url).pathname);
148
151
  // Walk up from plugin src/dist to node_modules, then into openclaw
149
152
  let dir = thisFile;
150
153
  for (let i = 0; i < 6; i++) {
@@ -234,12 +237,8 @@ WHAT TO CAPTURE (be thorough — capture anything worth remembering):
234
237
  - Project context changes: scope changes, timeline shifts, requirement updates, priority changes
235
238
  - Workflow patterns the user established ("my process is...", "I always do X before Y")
236
239
 
237
- STRICT TASK CLASSIFICATION RULESa "task" MUST have ALL three of:
238
- 1. A clear, completable action (not just an observation or idea)
239
- 2. An identifiable responsible party (explicitly named or unambiguously inferable from context)
240
- 3. A concrete deliverable or measurable end state
241
- If ANY of these is missing, classify as "memory" instead of "task". When in doubt, use "memory".
242
- Observations, learnings, insights, opinions, and general knowledge are ALWAYS "memory", never "task".
240
+ IMPORTANT: Never classify as "task". Tasks are manually created sticky notes (post-its) they must only come from explicit user/agent intent via palaia write --type task. Auto-capture must use "memory" or "process" only.
241
+ Observations, learnings, insights, opinions, action items, and general knowledge are ALWAYS "memory" or "process", never "task".
243
242
 
244
243
  Only extract genuinely significant knowledge. Skip small talk, acknowledgments, routine exchanges.
245
244
  Do NOT extract if similar knowledge was likely captured in a recent exchange. Prefer quality over quantity. Skip routine status updates and acknowledgments.
@@ -458,7 +457,7 @@ export function trimToRecentExchanges(
458
457
  export async function extractWithLLM(
459
458
  messages: unknown[],
460
459
  config: any,
461
- pluginConfig?: { captureModel?: string },
460
+ pluginConfig?: { captureModel?: string; workspace?: string },
462
461
  knownProjects?: CachedProject[],
463
462
  ): Promise<ExtractionResult[]> {
464
463
  const runEmbeddedPiAgent = await getEmbeddedPiAgent();
@@ -469,11 +468,11 @@ export async function extractWithLLM(
469
468
  }
470
469
 
471
470
  const allTexts = extractMessageTexts(messages);
472
- // Strip Palaia-injected recall context from user messages to prevent feedback loop
471
+ // Strip palaia-injected recall context and private blocks from user messages
473
472
  const cleanedTexts = allTexts.map(t =>
474
473
  t.role === "user"
475
- ? { ...t, text: stripPalaiaInjectedContext(t.text) }
476
- : t
474
+ ? { ...t, text: stripPrivateBlocks(strippalaiaInjectedContext(t.text)) }
475
+ : { ...t, text: stripPrivateBlocks(t.text) }
477
476
  );
478
477
  // Only extract from recent exchanges — full history causes LLM timeouts
479
478
  // and dilutes extraction quality
@@ -516,7 +515,7 @@ export async function extractWithLLM(
516
515
  const result = await runEmbeddedPiAgent({
517
516
  sessionId,
518
517
  sessionFile,
519
- workspaceDir: config?.agents?.defaults?.workspace ?? process.cwd(),
518
+ workspaceDir: pluginConfig?.workspace || config?.agents?.defaults?.workspace || process.cwd(),
520
519
  config,
521
520
  prompt,
522
521
  timeoutMs: 15_000,
@@ -548,7 +547,8 @@ export async function extractWithLLM(
548
547
  const content = typeof item.content === "string" ? item.content.trim() : "";
549
548
  if (!content) continue;
550
549
 
551
- const validTypes = new Set(["memory", "process", "task"]);
550
+ const validTypes = new Set(["memory", "process"]);
551
+ // Tasks are manual-only (post-its) — auto-capture never creates tasks
552
552
  const type = validTypes.has(item.type) ? item.type : "memory";
553
553
 
554
554
  const validTags = new Set([
@@ -611,9 +611,9 @@ const SIGNIFICANCE_RULES: Array<{
611
611
  { pattern: /(?:mistake was|fehler war|should have|hätten sollen|next time)/i, tag: "lesson", type: "memory" },
612
612
  // Surprises
613
613
  { pattern: /(?:surprising|überraschend|unexpected|unerwartet|didn'?t expect|nicht erwartet|plot twist)/i, tag: "surprise", type: "memory" },
614
- // Commitments and tasks
615
- { pattern: /(?:i will|ich werde|todo:|action item|must do|muss noch|need to|commit to|verspreche)/i, tag: "commitment", type: "task" },
616
- { pattern: /(?:deadline|frist|due date|bis zum|by end of|spätestens)/i, tag: "commitment", type: "task" },
614
+ // Commitments (captured as memory — tasks are manual-only post-its)
615
+ { pattern: /(?:i will|ich werde|todo:|action item|must do|muss noch|need to|commit to|verspreche)/i, tag: "commitment", type: "memory" },
616
+ { pattern: /(?:deadline|frist|due date|bis zum|by end of|spätestens)/i, tag: "commitment", type: "memory" },
617
617
  // Processes and workflows
618
618
  { pattern: /(?:the process is|der prozess|steps?:|workflow:|how to|anleitung|recipe:|checklist)/i, tag: "process", type: "process" },
619
619
  { pattern: /(?:first,?\s.*then|schritt \d|step \d|1\.\s.*2\.\s)/i, tag: "process", type: "process" },
@@ -722,19 +722,33 @@ export function extractSignificance(
722
722
  }
723
723
 
724
724
  /**
725
- * Strip Palaia-injected recall context from message text.
725
+ * Strip palaia-injected recall context from message text.
726
726
  * The recall block is prepended to user messages by before_prompt_build via prependContext.
727
727
  * OpenClaw merges it into the user message, so agent_end sees it as user content.
728
728
  * Without stripping, auto-capture re-captures the injected memories -> feedback loop.
729
729
  *
730
730
  * The block has a stable structure:
731
- * - Starts with "## Active Memory (Palaia)"
731
+ * - Starts with "## Active Memory (palaia)"
732
732
  * - Contains [t/m], [t/pr], [t/tk] prefixed entries
733
733
  * - Ends with "[palaia] auto-capture=on..." nudge line
734
734
  */
735
- export function stripPalaiaInjectedContext(text: string): string {
736
- // Pattern: "## Active Memory (Palaia)" ... "[palaia] auto-capture=on..." + optional trailing newlines
735
+ export function strippalaiaInjectedContext(text: string): string {
736
+ // Pattern: "## Active Memory (palaia)" ... "[palaia] auto-capture=on..." + optional trailing newlines
737
737
  // The nudge line is always present and marks the end of the injected block
738
- const PALAIA_BLOCK_RE = /## Active Memory \(Palaia\)[\s\S]*?\[palaia\][^\n]*\n*/;
739
- return text.replace(PALAIA_BLOCK_RE, '').trim();
738
+ const PALAIA_BLOCK_RE = /## Active Memory \(palaia\)[\s\S]*?\[palaia\][^\n]*\n*/;
739
+ // Also strip Session Briefing blocks
740
+ const BRIEFING_BLOCK_RE = /## Session Briefing \(palaia\)[\s\S]*?(?=\n##|\n\n\n|$)/;
741
+ return text
742
+ .replace(PALAIA_BLOCK_RE, '')
743
+ .replace(BRIEFING_BLOCK_RE, '')
744
+ .trim();
745
+ }
746
+
747
+ /**
748
+ * Strip <private>...</private> blocks from text.
749
+ * Content inside private tags is excluded from memory capture.
750
+ * Inspired by claude-mem's privacy marker system.
751
+ */
752
+ export function stripPrivateBlocks(text: string): string {
753
+ return text.replace(/<private>[\s\S]*?<\/private>/gi, '').trim();
740
754
  }
@@ -1,5 +1,5 @@
1
1
  /**
2
- * Lifecycle hooks for the Palaia OpenClaw plugin.
2
+ * Lifecycle hooks for the palaia OpenClaw plugin.
3
3
  *
4
4
  * - before_prompt_build: Query-based contextual recall (Issue #65).
5
5
  * Returns appendSystemContext with brain instruction when memory is used.
@@ -81,7 +81,7 @@ export {
81
81
  isNoiseContent,
82
82
  shouldAttemptCapture,
83
83
  extractSignificance,
84
- stripPalaiaInjectedContext,
84
+ strippalaiaInjectedContext,
85
85
  } from "./capture.js";
86
86
 
87
87
  // Reaction exports
@@ -119,6 +119,8 @@ import {
119
119
  extractMessageTexts,
120
120
  buildRecallQuery,
121
121
  rerankByTypeWeight,
122
+ formatEntryLine,
123
+ shouldUseCompactMode,
122
124
  } from "./recall.js";
123
125
 
124
126
  import {
@@ -128,7 +130,7 @@ import {
128
130
  resolveCaptureModel,
129
131
  shouldAttemptCapture,
130
132
  extractSignificance,
131
- stripPalaiaInjectedContext,
133
+ strippalaiaInjectedContext,
132
134
  trimToRecentExchanges,
133
135
  setLogger as setCaptureLogger,
134
136
  getLlmImportFailureLogged,
@@ -151,6 +153,9 @@ import {
151
153
  filterBlocked,
152
154
  } from "../priorities.js";
153
155
 
156
+ import { formatBriefing } from "./session.js";
157
+ import { getOrCreateSessionState } from "./state.js";
158
+
154
159
  // ============================================================================
155
160
  // Logger (Issue: api.logger integration)
156
161
  // ============================================================================
@@ -190,6 +195,10 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
190
195
 
191
196
  const opts = buildRunnerOpts(config);
192
197
 
198
+ // Note: Session lifecycle hooks (session_start, session_end, before_reset,
199
+ // llm_input, llm_output, after_tool_call) are registered in index.ts entry
200
+ // point BEFORE this function, so they work for both ContextEngine and legacy paths.
201
+
193
202
  // ── Startup checks (H-2, H-3, captureModel validation) ────────
194
203
  (async () => {
195
204
  // H-2: Warn if no agent is configured
@@ -269,7 +278,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
269
278
  // ── /palaia status command ─────────────────────────────────────
270
279
  api.registerCommand({
271
280
  name: "palaia-status",
272
- description: "Show Palaia memory status",
281
+ description: "Show palaia memory status",
273
282
  async handler(_args: string) {
274
283
  try {
275
284
  const state = await loadPluginState(config.workspace);
@@ -284,7 +293,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
284
293
 
285
294
  return { text: formatStatusResponse(state, stats, config) };
286
295
  } catch (error) {
287
- return { text: `Palaia status error: ${error}` };
296
+ return { text: `palaia status error: ${error}` };
288
297
  }
289
298
  },
290
299
  });
@@ -326,7 +335,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
326
335
  }
327
336
  });
328
337
 
329
- // ── before_prompt_build (Issue #65: Query-based Recall) ────────
338
+ // ── before_prompt_build (Issue #65: Query-based Recall + v3.0 Session Briefing) ──
330
339
  if (config.memoryInject) {
331
340
  api.on("before_prompt_build", async (event: any, ctx: any) => {
332
341
  // Prune stale entries to prevent memory leaks from crashed sessions (C-2)
@@ -337,6 +346,34 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
337
346
  const hookOpts = buildRunnerOpts(config, { workspace: resolved.workspace });
338
347
 
339
348
  try {
349
+ // ── Session Briefing Injection (v3.0) ─────────────────────
350
+ // If a session briefing is pending (from session_start or model switch),
351
+ // prepend it to the recall context for seamless session continuity.
352
+ let briefingText = "";
353
+ let briefingSummary: string | null = null; // Kept for smart query fallback
354
+ const sessionKey = resolveSessionKeyFromCtx(ctx);
355
+ if (sessionKey) {
356
+ const sessState = getOrCreateSessionState(sessionKey);
357
+ // Wait for session_start briefing load (max 3s to avoid blocking)
358
+ if (sessState.briefingReady) {
359
+ await Promise.race([
360
+ sessState.briefingReady,
361
+ new Promise<void>(r => setTimeout(r, 3000)),
362
+ ]);
363
+ }
364
+ // Capture summary BEFORE clearing, for smart query fallback below
365
+ briefingSummary = sessState.pendingBriefing?.summary ?? null;
366
+ if (sessState.pendingBriefing && !sessState.briefingDelivered) {
367
+ briefingText = formatBriefing(sessState.pendingBriefing, config.sessionBriefingMaxChars);
368
+ sessState.briefingDelivered = true;
369
+ // Clear pending briefing after delivery (unless model switch re-triggers)
370
+ if (!sessState.modelSwitchDetected) {
371
+ sessState.pendingBriefing = null;
372
+ }
373
+ sessState.modelSwitchDetected = false;
374
+ }
375
+ }
376
+
340
377
  // Load and resolve priorities (Issue #121)
341
378
  const prio = await loadPriorities(resolved.workspace);
342
379
  const project = config.captureProject || undefined;
@@ -347,15 +384,25 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
347
384
  tier: config.tier,
348
385
  }, resolved.agentId, project);
349
386
 
350
- const maxChars = resolvedPrio.maxInjectedChars || 4000;
387
+ // Reduce recall budget by briefing size
388
+ const maxChars = Math.max((resolvedPrio.maxInjectedChars || 4000) - briefingText.length, 500);
351
389
  const limit = Math.min(config.maxResults || 10, 20);
352
390
  let entries: QueryResult["results"] = [];
353
391
 
354
392
  if (config.recallMode === "query") {
355
- const userMessage = event.messages
393
+ let userMessage = event.messages
356
394
  ? buildRecallQuery(event.messages)
357
395
  : (event.prompt || null);
358
396
 
397
+ // ── Smart Query Fallback (v3.0) ───────────────────────
398
+ // If query is too short or matches a continuation pattern,
399
+ // use the session summary as query for better recall results.
400
+ const CONTINUATION_PATTERN = /^(ja|ok|weiter|mach|genau|do it|yes|continue|go|proceed|sure|klar|passt|yep|yup|exactly|right)\b/i;
401
+ if (briefingSummary && userMessage && (userMessage.length < 10 || CONTINUATION_PATTERN.test(userMessage.trim()))) {
402
+ userMessage = briefingSummary.slice(0, 500);
403
+ logger.info("[palaia] Smart query fallback: using session summary as recall query");
404
+ }
405
+
359
406
  if (userMessage && userMessage.length >= 5) {
360
407
  // Try embed server first (fast path: ~0.5s), then CLI fallback (~3-14s)
361
408
  let serverQueried = false;
@@ -372,6 +419,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
372
419
  text: userMessage,
373
420
  top_k: limit,
374
421
  include_cold: resolvedPrio.tier === "all",
422
+ ...(resolvedPrio.scopeVisibility ? { scope_visibility: resolvedPrio.scopeVisibility } : {}),
375
423
  }, config.timeoutMs || 3000);
376
424
  if (resp?.result?.results && Array.isArray(resp.result.results)) {
377
425
  entries = resp.result.results;
@@ -417,36 +465,37 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
417
465
  entries = result.results;
418
466
  }
419
467
  } catch {
468
+ // Still deliver briefing even if recall fails completely
469
+ if (briefingText) {
470
+ return { prependContext: briefingText };
471
+ }
420
472
  return;
421
473
  }
422
474
  }
423
475
 
424
- if (entries.length === 0) return;
476
+ // If no recall entries but briefing exists, deliver briefing alone
477
+ if (entries.length === 0) {
478
+ if (briefingText) {
479
+ return { prependContext: briefingText };
480
+ }
481
+ return;
482
+ }
425
483
 
426
484
  // Apply type-weighted reranking and blocked filtering (Issue #121)
427
- const rankedRaw = rerankByTypeWeight(entries, resolvedPrio.recallTypeWeight);
485
+ const rankedRaw = rerankByTypeWeight(entries, resolvedPrio.recallTypeWeight, config.recallRecencyBoost, config.manualEntryBoost);
428
486
  const ranked = filterBlocked(rankedRaw, resolvedPrio.blocked);
429
487
 
430
- // Build context string with char budget (compact format for token efficiency)
431
- const SCOPE_SHORT: Record<string, string> = { team: "t", private: "p", public: "pub" };
432
- const TYPE_SHORT: Record<string, string> = { memory: "m", process: "pr", task: "tk" };
433
-
434
- let text = "## Active Memory (Palaia)\n\n";
488
+ // Build context string with char budget
489
+ // Progressive disclosure: compact mode for large stores (title + first line + ID)
490
+ const compact = shouldUseCompactMode(ranked.length);
491
+ let text = "## Active Memory (palaia)\n\n";
492
+ if (compact) {
493
+ text += "_Compact mode — use `memory_get <id>` for full details._\n\n";
494
+ }
435
495
  let chars = text.length;
436
496
 
437
497
  for (const entry of ranked) {
438
- const scopeKey = SCOPE_SHORT[entry.scope] || entry.scope;
439
- const typeKey = TYPE_SHORT[entry.type] || entry.type;
440
- const prefix = `[${scopeKey}/${typeKey}]`;
441
-
442
- // If body starts with title (common), skip title to save tokens
443
- let line: string;
444
- if (entry.body.toLowerCase().startsWith(entry.title.toLowerCase())) {
445
- line = `${prefix} ${entry.body}\n\n`;
446
- } else {
447
- line = `${prefix} ${entry.title}\n${entry.body}\n\n`;
448
- }
449
-
498
+ const line = formatEntryLine(entry, compact);
450
499
  if (chars + line.length > maxChars) break;
451
500
  text += line;
452
501
  chars += line.length;
@@ -463,7 +512,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
463
512
  }
464
513
  const { nudges } = checkNudges(pluginState);
465
514
  if (nudges.length > 0) {
466
- nudgeContext = "\n\n## Agent Nudge (Palaia)\n\n" + nudges.join("\n\n");
515
+ nudgeContext = "\n\n## Agent Nudge (palaia)\n\n" + nudges.join("\n\n");
467
516
  }
468
517
  await savePluginState(pluginState, resolved.workspace);
469
518
  } catch {
@@ -483,7 +532,6 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
483
532
  const hasRelevantRecall = !isListFallback && entries.some(
484
533
  (e) => typeof e.score === "number" && e.score >= resolvedPrio.recallMinScore,
485
534
  );
486
- const sessionKey = resolveSessionKeyFromCtx(ctx);
487
535
  if (sessionKey && hasRelevantRecall) {
488
536
  const turnState = getOrCreateTurnState(sessionKey);
489
537
  turnState.recallOccurred = true;
@@ -506,9 +554,9 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
506
554
 
507
555
  // Return prependContext + appendSystemContext for recall emoji
508
556
  return {
509
- prependContext: text + nudgeContext,
557
+ prependContext: briefingText + text,
510
558
  appendSystemContext: config.showMemorySources
511
- ? "You used Palaia memory in this turn. Add \u{1f9e0} at the very end of your response (after everything else, on its own line)."
559
+ ? "You used palaia memory in this turn. Add \u{1f9e0} at the very end of your response (after everything else, on its own line)."
512
560
  : undefined,
513
561
  };
514
562
  } catch (error) {
@@ -539,6 +587,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
539
587
  const hookOpts = buildRunnerOpts(config, { workspace: resolved.workspace });
540
588
 
541
589
  if (!event.success || !event.messages || event.messages.length === 0) {
590
+ logger.info(`[palaia] Auto-capture skipped: success=${event.success}, messages=${event.messages?.length ?? 0}`);
542
591
  return;
543
592
  }
544
593
 
@@ -549,6 +598,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
549
598
 
550
599
  const userTurns = allTexts.filter((t) => t.role === "user").length;
551
600
  if (userTurns < config.captureMinTurns) {
601
+ logger.info(`[palaia] Auto-capture skipped: ${userTurns} user turns < captureMinTurns=${config.captureMinTurns}`);
552
602
  return;
553
603
  }
554
604
 
@@ -559,14 +609,17 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
559
609
  collectedHints.push(...hints);
560
610
  }
561
611
 
562
- // Strip Palaia-injected recall context from user messages to prevent feedback loop.
612
+ // Strip palaia-injected recall context and private blocks from messages.
563
613
  // The recall block is prepended to user messages by before_prompt_build.
564
614
  // Without stripping, auto-capture would re-capture previously recalled memories.
565
- const cleanedTexts = allTexts.map(t =>
566
- t.role === "user"
567
- ? { ...t, text: stripPalaiaInjectedContext(t.text) }
568
- : t
569
- );
615
+ // Private blocks (<private>...</private>) must be excluded from capture.
616
+ const { stripPrivateBlocks } = await import("./capture.js");
617
+ const cleanedTexts = allTexts.map(t => ({
618
+ ...t,
619
+ text: stripPrivateBlocks(
620
+ t.role === "user" ? strippalaiaInjectedContext(t.text) : t.text
621
+ ),
622
+ }));
570
623
 
571
624
  // Only extract from recent exchanges — full history causes LLM timeouts
572
625
  // and dilutes extraction quality
@@ -581,11 +634,29 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
581
634
  const exchangeText = exchangeParts.join("\n");
582
635
 
583
636
  if (!shouldAttemptCapture(exchangeText)) {
637
+ logger.info(`[palaia] Auto-capture skipped: content did not pass significance filter (${exchangeText.length} chars)`);
584
638
  return;
585
639
  }
586
640
 
587
641
  const knownProjects = await loadProjects(hookOpts);
588
642
 
643
+ // Resolve effective capture scope from priorities (per-agent override, #147)
644
+ let effectiveCaptureScope = config.captureScope || "";
645
+ try {
646
+ const prio = await loadPriorities(resolved.workspace);
647
+ const resolvedCapturePrio = resolvePriorities(prio, {
648
+ recallTypeWeight: config.recallTypeWeight,
649
+ recallMinScore: config.recallMinScore,
650
+ maxInjectedChars: config.maxInjectedChars,
651
+ tier: config.tier,
652
+ }, agentName);
653
+ if (resolvedCapturePrio.captureScope) {
654
+ effectiveCaptureScope = resolvedCapturePrio.captureScope;
655
+ }
656
+ } catch {
657
+ // Fall through to config default
658
+ }
659
+
589
660
  // Helper: build CLI args with metadata
590
661
  const buildWriteArgs = (
591
662
  content: string,
@@ -601,9 +672,9 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
601
672
  "--tags", tags.join(",") || "auto-capture",
602
673
  ];
603
674
 
604
- // Scope guardrail: config.captureScope overrides everything; otherwise max team (no public)
605
- const scope = config.captureScope
606
- ? sanitizeScope(config.captureScope, "team", true)
675
+ // Scope guardrail: priorities captureScope > config.captureScope > hint/LLM scope
676
+ const scope = effectiveCaptureScope
677
+ ? sanitizeScope(effectiveCaptureScope, "team", true)
607
678
  : sanitizeScope(itemScope, "team", false);
608
679
  args.push("--scope", scope);
609
680
 
@@ -665,6 +736,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
665
736
  try {
666
737
  const results = await extractWithLLM(event.messages, api.config, {
667
738
  captureModel: config.captureModel,
739
+ workspace: resolved.workspace,
668
740
  }, knownProjects);
669
741
 
670
742
  await storeLLMResults(results);
@@ -684,6 +756,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
684
756
  // Retry without captureModel -> resolveCaptureModel will use primary model
685
757
  const fallbackResults = await extractWithLLM(event.messages, api.config, {
686
758
  captureModel: undefined,
759
+ workspace: resolved.workspace,
687
760
  }, knownProjects);
688
761
  await storeLLMResults(fallbackResults);
689
762
  llmHandled = true;
@@ -708,6 +781,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
708
781
  if (config.captureFrequency === "significant") {
709
782
  const significance = extractSignificance(exchangeText);
710
783
  if (!significance) {
784
+ logger.info("[palaia] Auto-capture skipped: rule-based extraction found no significance (need ≥2 distinct tags)");
711
785
  return;
712
786
  }
713
787
  captureData = significance;
@@ -821,7 +895,7 @@ export function registerHooks(api: OpenClawPluginApi, config: PalaiaPluginConfig
821
895
  // ── Startup Recovery Service ───────────────────────────────────
822
896
  api.registerService({
823
897
  id: "palaia-recovery",
824
- start: async () => {
898
+ start: async (_ctx) => {
825
899
  const result = await recover(opts);
826
900
  if (result.replayed > 0) {
827
901
  logger.info(`[palaia] WAL recovery: replayed ${result.replayed} entries`);
@@ -6,7 +6,7 @@
6
6
  */
7
7
 
8
8
  import type { RecallTypeWeights } from "../config.js";
9
- import { stripPalaiaInjectedContext } from "./capture.js";
9
+ import { strippalaiaInjectedContext } from "./capture.js";
10
10
 
11
11
  // ============================================================================
12
12
  // Types
@@ -26,6 +26,7 @@ export interface QueryResult {
26
26
  title?: string;
27
27
  type?: string;
28
28
  tags?: string[];
29
+ created?: string;
29
30
  }>;
30
31
  }
31
32
 
@@ -78,7 +79,7 @@ export function buildFootnote(
78
79
  const dateStr = formatShortDate(e.date);
79
80
  return dateStr ? `"${e.title}" (${dateStr})` : `"${e.title}"`;
80
81
  });
81
- return `\n\n\u{1f4ce} Palaia: ${parts.join(", ")}`;
82
+ return `\n\n\u{1f4ce} palaia: ${parts.join(", ")}`;
82
83
  }
83
84
 
84
85
  // Re-export formatShortDate from state for use here
@@ -95,7 +96,7 @@ const TRANSPARENCY_RECALL_THRESHOLD = 50;
95
96
  const TRANSPARENCY_DAYS_THRESHOLD = 7;
96
97
 
97
98
  const SATISFACTION_NUDGE_TEXT =
98
- "Your user has been using Palaia for a while now. " +
99
+ "Your user has been using palaia for a while now. " +
99
100
  "Ask them casually if they're happy with the memory system. " +
100
101
  "If there are issues, suggest `palaia doctor`.";
101
102
 
@@ -267,7 +268,7 @@ function isSystemOnlyContent(text: string): boolean {
267
268
  export function buildRecallQuery(messages: unknown[]): string {
268
269
  const texts = extractMessageTexts(messages).map(t =>
269
270
  t.role === "user"
270
- ? { ...t, text: stripPalaiaInjectedContext(t.text) }
271
+ ? { ...t, text: strippalaiaInjectedContext(t.text) }
271
272
  : t
272
273
  );
273
274
 
@@ -342,16 +343,41 @@ export interface RankedEntry {
342
343
  bm25Score?: number;
343
344
  embedScore?: number;
344
345
  weightedScore: number;
346
+ created?: string;
347
+ tags?: string[];
348
+ }
349
+
350
+ /**
351
+ * Calculate recency boost factor.
352
+ * Returns a multiplier: 1.0 (no boost) to 1.0 + boostFactor (max boost for very recent).
353
+ * Formula: 1 + boostFactor * exp(-hoursAgo / 24)
354
+ */
355
+ function calcRecencyBoost(created: string | undefined, boostFactor: number): number {
356
+ if (!boostFactor || !created) return 1.0;
357
+ try {
358
+ const hoursAgo = (Date.now() - new Date(created).getTime()) / (1000 * 60 * 60);
359
+ if (hoursAgo < 0 || isNaN(hoursAgo)) return 1.0;
360
+ return 1.0 + boostFactor * Math.exp(-hoursAgo / 24);
361
+ } catch {
362
+ return 1.0;
363
+ }
345
364
  }
346
365
 
347
366
  export function rerankByTypeWeight(
348
367
  results: QueryResult["results"],
349
- weights: RecallTypeWeights,
368
+ weights: Record<string, number>,
369
+ recencyBoost = 0,
370
+ manualEntryBoost = 1.3,
350
371
  ): RankedEntry[] {
351
372
  return results
352
373
  .map((r) => {
353
374
  const type = r.type || "memory";
354
375
  const weight = weights[type] ?? 1.0;
376
+ const recency = calcRecencyBoost(r.created, recencyBoost);
377
+ // Manual entries (no auto-capture tag) get a boost over auto-captured ones.
378
+ // This ensures intentionally stored knowledge ranks higher than conversation noise.
379
+ const isAutoCapture = r.tags?.includes("auto-capture") ?? false;
380
+ const sourceBoost = isAutoCapture ? 1.0 : manualEntryBoost;
355
381
  return {
356
382
  id: r.id,
357
383
  body: r.content || r.body || "",
@@ -362,8 +388,50 @@ export function rerankByTypeWeight(
362
388
  score: r.score,
363
389
  bm25Score: r.bm25_score,
364
390
  embedScore: r.embed_score,
365
- weightedScore: r.score * weight,
391
+ weightedScore: r.score * weight * recency * sourceBoost,
392
+ created: r.created,
393
+ tags: r.tags,
366
394
  };
367
395
  })
368
396
  .sort((a, b) => b.weightedScore - a.weightedScore);
369
397
  }
398
+
399
+ // ── Context Formatting ──────────────────────────────────────────────────
400
+
401
+ const SCOPE_SHORT: Record<string, string> = { team: "t", private: "p", public: "pub" };
402
+ const TYPE_SHORT: Record<string, string> = { memory: "m", process: "pr", task: "tk" };
403
+
404
+ /**
405
+ * Format a ranked entry as an injectable context line.
406
+ *
407
+ * In compact mode (progressive disclosure), only title + first line + ID are shown.
408
+ * The agent can use `memory_get <id>` for the full entry.
409
+ */
410
+ export function formatEntryLine(entry: RankedEntry, compact: boolean): string {
411
+ const scopeKey = SCOPE_SHORT[entry.scope] || entry.scope;
412
+ const typeKey = TYPE_SHORT[entry.type] || entry.type;
413
+ const prefix = `[${scopeKey}/${typeKey}]`;
414
+
415
+ if (compact) {
416
+ // Compact: title + first line of body + ID reference
417
+ const firstLine = entry.body.split("\n")[0]?.slice(0, 120) || "";
418
+ const titlePart = entry.body.toLowerCase().startsWith(entry.title.toLowerCase())
419
+ ? firstLine
420
+ : `${entry.title} — ${firstLine}`;
421
+ return `${prefix} ${titlePart} [id:${entry.id}]\n`;
422
+ }
423
+
424
+ // Full: title + complete body
425
+ if (entry.body.toLowerCase().startsWith(entry.title.toLowerCase())) {
426
+ return `${prefix} ${entry.body}\n\n`;
427
+ }
428
+ return `${prefix} ${entry.title}\n${entry.body}\n\n`;
429
+ }
430
+
431
+ /**
432
+ * Determine if compact mode should be used based on result count.
433
+ * Above threshold, use compact mode to fit more entries in budget.
434
+ */
435
+ export function shouldUseCompactMode(totalResults: number, threshold = 100): boolean {
436
+ return totalResults > threshold;
437
+ }