clementine-agent 1.18.26 → 1.18.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,6 +33,7 @@ export declare function contextThrashRecoveryNotice(): string;
33
33
  export declare function buildContextThrashRecoveryPrompt(userRequest: string, priorFailureText?: string): string;
34
34
  /** Format a millisecond duration as a human-friendly "X ago" string. */
35
35
  export declare function formatTimeAgo(ms: number): string;
36
+ export declare function scrubInternalContextBlocks(text: string): string;
36
37
  export declare function looksLikeOneMillionContextError(value: unknown): boolean;
37
38
  export declare function oneMillionContextRecoveryMessage(): string;
38
39
  export declare function looksLikeProviderApiErrorResponse(value: unknown): boolean;
@@ -123,7 +124,7 @@ export declare class PersonalAssistant {
123
124
  }>;
124
125
  updatedAt: string;
125
126
  };
126
- /** Inject a background work result into the session so the next chat naturally references it. */
127
+ /** Inject a background work result into the session as silent follow-up context. */
127
128
  injectPendingContext(sessionKey: string, userPrompt: string, result: string): void;
128
129
  private initMemoryStore;
129
130
  /**
@@ -321,7 +322,9 @@ export declare class PersonalAssistant {
321
322
  * a query. Used to give the DM session visibility of cron/heartbeat outputs
322
323
  * so follow-up conversation has context.
323
324
  */
324
- injectContext(sessionKey: string, userText: string, assistantText: string): void;
325
+ injectContext(sessionKey: string, userText: string, assistantText: string, opts?: {
326
+ pending?: boolean;
327
+ }): void;
325
328
  getRecentActivity(sinceIso: string, maxEntries?: number): Array<{
326
329
  sessionKey: string;
327
330
  role: string;
@@ -373,6 +373,13 @@ function getContextWindow(model) {
373
373
  function capContextBlock(text, maxChars) {
374
374
  return capOutput(String(text ?? ''), maxChars);
375
375
  }
376
+ export function scrubInternalContextBlocks(text) {
377
+ return text
378
+ .replace(/\[Context governance:[^\]]*\][\s\S]*?\[\/Context governance:[^\]]*\]\s*/gi, '')
379
+ .replace(/\[Active working set\][\s\S]*?\[\/Active working set\]\s*/gi, '')
380
+ .replace(/\[Recent proactive notification context\][\s\S]*?\[\/Recent proactive notification context\]\s*/gi, '')
381
+ .trim();
382
+ }
376
383
  function capContextItem(text) {
377
384
  return capContextBlock(text, CRON_PROGRESS_ITEM_MAX_CHARS).replace(/\s+/g, ' ').trim();
378
385
  }
@@ -1182,7 +1189,7 @@ export class PersonalAssistant {
1182
1189
  getMcpStatus() {
1183
1190
  return { servers: this._lastMcpStatus, updatedAt: this._lastMcpStatusTime };
1184
1191
  }
1185
- /** Inject a background work result into the session so the next chat naturally references it. */
1192
+ /** Inject a background work result into the session as silent follow-up context. */
1186
1193
  injectPendingContext(sessionKey, userPrompt, result) {
1187
1194
  const pending = this.pendingContext.get(sessionKey) ?? [];
1188
1195
  pending.push({ user: userPrompt.slice(0, 500), assistant: result.slice(0, 2000) });
@@ -3043,7 +3050,10 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
3043
3050
  contextLines.push(`[${user}]\n${assistant}`);
3044
3051
  }
3045
3052
  effectivePrompt =
3046
- `[Since we last talked, you did some background work. Naturally mention what happened lead with anything that needs attention, briefly note routine completions. Don't dump raw tool calls or list job names. Be conversational.\nBackground:\n${contextLines.join('\n\n')}]\n\n${effectivePrompt}`;
3053
+ `[Background work contextREFERENCE ONLY, not new user input.\n` +
3054
+ `Use this silently to understand follow-ups. Mention it only if the user asks about status, results, fixes, or what changed, or if it is a new urgent blocker. ` +
3055
+ `Do not lead greetings or casual small talk with stale heartbeat, cron, or background-task details.\n` +
3056
+ `Background:\n${contextLines.join('\n\n')}]\n\n${effectivePrompt}`;
3047
3057
  }
3048
3058
  }
3049
3059
  // Inject stall nudge if the previous query for this session showed stall signals
@@ -3070,6 +3080,7 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
3070
3080
  const CHAT_TIMEOUT_MS = 30 * 60 * 1000;
3071
3081
  const guard = new StallGuard();
3072
3082
  let [responseText, sessionId] = await this.runQuery(effectivePrompt, key, onText, model, profile, securityAnnotation, effectiveMaxTurns, projectOverride, onToolActivity, verboseLevel, abortController, guard, CHAT_TIMEOUT_MS, intent, turnPolicy, toolset);
3083
+ responseText = scrubInternalContextBlocks(responseText);
3073
3084
  // If we got a context-length / prompt-too-long error, retry with a fresh session
3074
3085
  const errLower = responseText.toLowerCase();
3075
3086
  const isContextOverflow = errLower.includes('prompt is too long') ||
@@ -6184,7 +6195,7 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
6184
6195
  * a query. Used to give the DM session visibility of cron/heartbeat outputs
6185
6196
  * so follow-up conversation has context.
6186
6197
  */
6187
- injectContext(sessionKey, userText, assistantText) {
6198
+ injectContext(sessionKey, userText, assistantText, opts = {}) {
6188
6199
  const trimmedUser = capContextBlock(userText, INJECTED_CONTEXT_MAX_CHARS);
6189
6200
  const trimmedAssistant = capContextBlock(assistantText, INJECTED_CONTEXT_MAX_CHARS);
6190
6201
  // Add to in-memory exchange history
@@ -6199,12 +6210,14 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
6199
6210
  // Queue as pending context so the next chat() prepends it even
6200
6211
  // when an active SDK session exists (session recovery alone won't
6201
6212
  // help because the SDK session has no knowledge of this exchange).
6202
- const pending = this.pendingContext.get(sessionKey) ?? [];
6203
- pending.push({ user: trimmedUser, assistant: trimmedAssistant });
6204
- // Keep at most 3 pending to avoid bloating the next prompt
6205
- if (pending.length > 3)
6206
- pending.shift();
6207
- this.pendingContext.set(sessionKey, pending);
6213
+ if (opts.pending !== false) {
6214
+ const pending = this.pendingContext.get(sessionKey) ?? [];
6215
+ pending.push({ user: trimmedUser, assistant: trimmedAssistant });
6216
+ // Keep at most 3 pending to avoid bloating the next prompt
6217
+ if (pending.length > 3)
6218
+ pending.shift();
6219
+ this.pendingContext.set(sessionKey, pending);
6220
+ }
6208
6221
  this.sessionTimestamps.set(sessionKey, new Date());
6209
6222
  this.saveSessions();
6210
6223
  // Persist to transcript store
@@ -67,7 +67,7 @@ export const TOOL_BUNDLES = [
67
67
  },
68
68
  {
69
69
  id: 'github',
70
- patterns: [/\b(github|pull request|pull requests|prs?|issues?)\b/i],
70
+ patterns: [/\b(github|pull request|pull requests|prs?)\b/i, /\b(repo|github)\s+issues?\b/i],
71
71
  externalMcpServers: ['github'],
72
72
  composioToolkits: ['github'],
73
73
  },
@@ -7044,6 +7044,29 @@ If the tool returns nothing or errors, return an empty array \`[]\`.`,
7044
7044
  res.status(500).json({ error: String(err) });
7045
7045
  }
7046
7046
  });
7047
+ // Coverage + recall telemetry for both chunks and transcripts. Powers the
7048
+ // Memory Coverage card showing whether dense recall is actually earning its
7049
+ // keep on the current corpus.
7050
+ app.get('/api/memory/coverage', async (_req, res) => {
7051
+ try {
7052
+ const gateway = await getGateway();
7053
+ const store = gateway.assistant?.memoryStore;
7054
+ if (!store) {
7055
+ res.status(503).json({ error: 'Memory store not available' });
7056
+ return;
7057
+ }
7058
+ const transcripts = typeof store.getTranscriptDenseCoverage === 'function'
7059
+ ? store.getTranscriptDenseCoverage()
7060
+ : { embedded: 0, total: 0, model: null };
7061
+ const recall = typeof store.getRecallTelemetrySummary === 'function'
7062
+ ? store.getRecallTelemetrySummary(7)
7063
+ : { total: 0, semanticOnly: 0, lexicalOnly: 0, bothModes: 0, avgTopScore: 0 };
7064
+ res.json({ ok: true, transcripts, recall });
7065
+ }
7066
+ catch (err) {
7067
+ res.status(500).json({ error: String(err) });
7068
+ }
7069
+ });
7047
7070
  // Quick-add: append a sentence to today's daily note from the dashboard.
7048
7071
  // Mirrors the agent's memory_write({action:'append_daily'}) path so the
7049
7072
  // note gets indexed identically.
@@ -24903,6 +24926,38 @@ async function refreshCoverageStrip() {
24903
24926
  html += '<span style="margin-left:auto;color:var(--text-muted)">All chunks indexed</span>';
24904
24927
  }
24905
24928
  html += '</div>';
24929
+
24930
+ // Second strip: transcripts dense coverage + 7-day recall hit-rate.
24931
+ try {
24932
+ var rc = await apiFetch('/api/memory/coverage');
24933
+ var dc = await rc.json();
24934
+ if (dc.ok) {
24935
+ var tx = dc.transcripts || { embedded: 0, total: 0 };
24936
+ var rec = dc.recall || { total: 0, semanticOnly: 0, lexicalOnly: 0, bothModes: 0, avgTopScore: 0 };
24937
+ var txPct = tx.total > 0 ? Math.round((tx.embedded / tx.total) * 100) : 0;
24938
+ var txColor = txPct >= 95 ? '#10b981' : txPct >= 50 ? '#f59e0b' : '#ef4444';
24939
+ var recallTotal = rec.total || 0;
24940
+ var hit = function(n){ return recallTotal > 0 ? Math.round((n / recallTotal) * 100) : 0; };
24941
+ html += '<div style="display:flex;align-items:center;gap:14px;padding:10px 14px;margin-top:8px;background:var(--bg-secondary);border:1px solid var(--border);border-radius:8px;flex-wrap:wrap;font-size:12px">';
24942
+ html += '<span style="color:var(--text-muted)">Conversation recall:</span>';
24943
+ html += '<span><span style="color:' + txColor + '">●</span> Transcripts ' + txPct + '% '
24944
+ + '<span style="color:var(--text-muted)">(' + tx.embedded.toLocaleString() + '/' + tx.total.toLocaleString() + ')</span></span>';
24945
+ if (recallTotal > 0) {
24946
+ html += '<span style="color:var(--text-muted)">|</span>';
24947
+ html += '<span title="Last 7 days">Hit-rate: '
24948
+ + 'semantic ' + hit(rec.semanticOnly) + '% · '
24949
+ + 'lexical ' + hit(rec.lexicalOnly) + '% · '
24950
+ + 'both ' + hit(rec.bothModes) + '% '
24951
+ + '<span style="color:var(--text-muted)">(' + recallTotal.toLocaleString() + ' queries)</span></span>';
24952
+ } else {
24953
+ html += '<span style="color:var(--text-muted)">No recall queries logged in the last 7 days.</span>';
24954
+ }
24955
+ if (tx.total > 0 && tx.embedded < tx.total) {
24956
+ html += '<span style="margin-left:auto;color:var(--text-muted)">' + (tx.total - tx.embedded).toLocaleString() + ' transcripts unembedded — run <code>clementine memory reembed --target transcripts</code></span>';
24957
+ }
24958
+ html += '</div>';
24959
+ }
24960
+ } catch (e) { /* coverage strip is best-effort */ }
24906
24961
  el.innerHTML = html;
24907
24962
  } catch (err) {
24908
24963
  el.innerHTML = '';
package/dist/cli/index.js CHANGED
@@ -3276,9 +3276,10 @@ memoryModelCmd
3276
3276
  });
3277
3277
  memoryCmd
3278
3278
  .command('reembed')
3279
- .description('Backfill dense neural embeddings for all chunks (or all stale chunks if model changed). Default model: Snowflake/snowflake-arctic-embed-m-v1.5 — first run downloads ~440MB to ~/.clementine/models/.')
3280
- .option('--limit <n>', 'Max chunks to embed in this run (default: all)')
3279
+ .description('Backfill dense neural embeddings for chunks and/or transcripts. Default model: Snowflake/snowflake-arctic-embed-m-v1.5 — first run downloads ~440MB to ~/.clementine/models/.')
3280
+ .option('--limit <n>', 'Max items to embed in this run (default: all)')
3281
3281
  .option('--model <id>', 'Override embedding model id (e.g. Xenova/bge-base-en-v1.5)')
3282
+ .option('--target <kind>', 'What to backfill: chunks | transcripts | all', 'all')
3282
3283
  .action(async (opts) => {
3283
3284
  const BOLD = '\x1b[1m';
3284
3285
  const DIM = '\x1b[0;90m';
@@ -3290,6 +3291,11 @@ memoryCmd
3290
3291
  if (opts.model) {
3291
3292
  process.env.EMBEDDING_DENSE_MODEL = opts.model;
3292
3293
  }
3294
+ const target = (opts.target ?? 'all').toLowerCase();
3295
+ if (!['chunks', 'transcripts', 'all'].includes(target)) {
3296
+ console.error(` ${RED}Invalid --target${RESET}: "${opts.target}". Use chunks | transcripts | all.`);
3297
+ process.exit(1);
3298
+ }
3293
3299
  const limit = opts.limit ? parseInt(opts.limit, 10) : undefined;
3294
3300
  const { MemoryStore } = await import('../memory/store.js');
3295
3301
  const embeddings = await import('../memory/embeddings.js');
@@ -3298,7 +3304,7 @@ memoryCmd
3298
3304
  const store = new MemoryStore(DB_PATH, VAULT_DIR);
3299
3305
  store.initialize();
3300
3306
  console.log();
3301
- console.log(` ${BOLD}Dense embedding backfill${RESET}`);
3307
+ console.log(` ${BOLD}Dense embedding backfill${RESET} ${DIM}(target: ${target})${RESET}`);
3302
3308
  console.log(` Model: ${embeddings.currentDenseModel()}`);
3303
3309
  console.log(` ${DIM}Loading model (first run downloads ~440MB)…${RESET}`);
3304
3310
  const ready = await embeddings.probeDenseReady();
@@ -3309,28 +3315,59 @@ memoryCmd
3309
3315
  }
3310
3316
  console.log(` ${GREEN}✓${RESET} Model ready (${embeddings.denseDimension()}-dim).`);
3311
3317
  console.log();
3312
- const startTime = Date.now();
3313
- let lastReport = 0;
3314
- const result = await store.backfillDenseEmbeddings({
3315
- limit,
3316
- onProgress: (done, total) => {
3317
- // Throttle to avoid spamming
3318
+ const reportProgress = (label, startTime) => {
3319
+ let lastReport = 0;
3320
+ return (done, total) => {
3318
3321
  const now = Date.now();
3319
3322
  if (now - lastReport < 500 && done < total)
3320
3323
  return;
3321
3324
  lastReport = now;
3322
3325
  const pct = total > 0 ? Math.round((done / total) * 100) : 0;
3323
3326
  const elapsed = Math.round((now - startTime) / 1000);
3324
- process.stdout.write(`\r Progress: ${BOLD}${done.toLocaleString()}${RESET}/${total.toLocaleString()} (${pct}%) ${DIM}${elapsed}s${RESET} `);
3325
- },
3326
- });
3327
- process.stdout.write('\n\n');
3328
- const elapsed = Math.round((Date.now() - startTime) / 1000);
3329
- console.log(` ${GREEN}✓${RESET} Embedded ${BOLD}${result.embedded.toLocaleString()}${RESET} chunks ${DIM}(${elapsed}s elapsed)${RESET}`);
3330
- if (result.failed > 0) {
3331
- console.log(` ${YELLOW}!${RESET} Failed: ${result.failed.toLocaleString()} ${DIM}(model returned null — usually empty or invalid input)${RESET}`);
3327
+ process.stdout.write(`\r ${label}: ${BOLD}${done.toLocaleString()}${RESET}/${total.toLocaleString()} (${pct}%) ${DIM}${elapsed}s${RESET} `);
3328
+ };
3329
+ };
3330
+ let totalEmbedded = 0;
3331
+ let totalFailed = 0;
3332
+ let lastModel = '';
3333
+ if (target === 'chunks' || target === 'all') {
3334
+ const startTime = Date.now();
3335
+ const result = await store.backfillDenseEmbeddings({
3336
+ limit,
3337
+ onProgress: reportProgress('Chunks', startTime),
3338
+ });
3339
+ process.stdout.write('\n');
3340
+ const elapsed = Math.round((Date.now() - startTime) / 1000);
3341
+ console.log(` ${GREEN}✓${RESET} Embedded ${BOLD}${result.embedded.toLocaleString()}${RESET} chunks ${DIM}(${elapsed}s elapsed)${RESET}`);
3342
+ if (result.failed > 0) {
3343
+ console.log(` ${YELLOW}!${RESET} Chunk failures: ${result.failed.toLocaleString()}`);
3344
+ }
3345
+ totalEmbedded += result.embedded;
3346
+ totalFailed += result.failed;
3347
+ lastModel = result.model;
3348
+ }
3349
+ if (target === 'transcripts' || target === 'all') {
3350
+ const startTime = Date.now();
3351
+ const result = await store.backfillTranscriptDenseEmbeddings({
3352
+ limit,
3353
+ onProgress: reportProgress('Transcripts', startTime),
3354
+ });
3355
+ process.stdout.write('\n');
3356
+ const elapsed = Math.round((Date.now() - startTime) / 1000);
3357
+ console.log(` ${GREEN}✓${RESET} Embedded ${BOLD}${result.embedded.toLocaleString()}${RESET} transcripts ${DIM}(${elapsed}s elapsed)${RESET}`);
3358
+ if (result.failed > 0) {
3359
+ console.log(` ${YELLOW}!${RESET} Transcript failures: ${result.failed.toLocaleString()}`);
3360
+ }
3361
+ totalEmbedded += result.embedded;
3362
+ totalFailed += result.failed;
3363
+ lastModel = result.model;
3364
+ }
3365
+ console.log();
3366
+ console.log(` Total embedded: ${BOLD}${totalEmbedded.toLocaleString()}${RESET}`);
3367
+ if (totalFailed > 0) {
3368
+ console.log(` ${DIM}(model returned null on ${totalFailed.toLocaleString()} — usually empty or invalid input)${RESET}`);
3332
3369
  }
3333
- console.log(` Model: ${result.model}`);
3370
+ console.log(` Model: ${lastModel}`);
3334
3371
  console.log();
3335
3372
  console.log(` ${DIM}Run \`clementine memory status\` to see updated coverage.${RESET}`);
3336
3373
  console.log();
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Active working set context.
3
+ *
4
+ * This is the short-lived "what matters right now" layer above semantic
5
+ * memory. It is deliberately bounded and deterministic so every channel can
6
+ * stay aware of active operational state without dragging full transcripts or
7
+ * run logs into the model.
8
+ */
9
+ export interface ActiveContextItem {
10
+ source: 'notification' | 'background-task' | 'unleashed' | 'turn-ledger';
11
+ label: string;
12
+ detail: string;
13
+ priority: number;
14
+ timestamp?: string;
15
+ eventId?: string;
16
+ sourceId?: string;
17
+ alreadyLogged?: boolean;
18
+ alreadySurfaced?: boolean;
19
+ acknowledged?: boolean;
20
+ resolved?: boolean;
21
+ greetingEligible?: boolean;
22
+ }
23
+ export interface ActiveContextSnapshot {
24
+ sessionKey: string;
25
+ items: ActiveContextItem[];
26
+ promptBlock: string | null;
27
+ greetingLine: string | null;
28
+ }
29
+ export interface ActiveContextOptions {
30
+ baseDir: string;
31
+ now?: number;
32
+ maxItems?: number;
33
+ recordEvents?: boolean;
34
+ /**
35
+ * Dense embedding coverage for transcripts. When < 50%, the prompt block
36
+ * carries an inline note so the model knows recall is degraded.
37
+ */
38
+ transcriptCoverage?: {
39
+ embedded: number;
40
+ total: number;
41
+ };
42
+ }
43
+ export declare function buildActiveContextSnapshot(sessionKey: string, opts: ActiveContextOptions): ActiveContextSnapshot;
44
+ //# sourceMappingURL=active-context.d.ts.map
@@ -0,0 +1,330 @@
1
+ /**
2
+ * Active working set context.
3
+ *
4
+ * This is the short-lived "what matters right now" layer above semantic
5
+ * memory. It is deliberately bounded and deterministic so every channel can
6
+ * stay aware of active operational state without dragging full transcripts or
7
+ * run logs into the model.
8
+ */
9
+ import { existsSync, readdirSync, readFileSync, statSync } from 'node:fs';
10
+ import path from 'node:path';
11
+ import { listBackgroundTasks } from '../agent/background-tasks.js';
12
+ import { listRecentNotificationEvents } from './notification-context.js';
13
+ import { readRecentTurnLedger } from './turn-ledger.js';
14
+ import { isLiveUnleashedStatus } from './unleashed-status.js';
15
+ import { recordContextEvent } from './context-events.js';
16
+ const RECENT_TASK_TTL_MS = 24 * 60 * 60 * 1000;
17
+ const RECENT_UNLEASHED_ERROR_TTL_MS = 12 * 60 * 60 * 1000;
18
+ const FRESH_ACTIVE_TASK_GREETING_TTL_MS = 20 * 60 * 1000;
19
+ const MAX_DETAIL_CHARS = 220;
20
+ function compactWhitespace(text) {
21
+ return text.replace(/\s+/g, ' ').trim();
22
+ }
23
+ function cap(text, max = MAX_DETAIL_CHARS) {
24
+ const normalized = compactWhitespace(text);
25
+ return normalized.length <= max ? normalized : `${normalized.slice(0, max - 3)}...`;
26
+ }
27
+ function timestampMs(value) {
28
+ const ms = value ? Date.parse(value) : NaN;
29
+ return Number.isFinite(ms) ? ms : 0;
30
+ }
31
+ function normalizeForSurface(text) {
32
+ return text
33
+ .toLowerCase()
34
+ .replace(/[^\p{L}\p{N}]+/gu, ' ')
35
+ .replace(/\s+/g, ' ')
36
+ .trim();
37
+ }
38
+ function buildSurfaceHistory(sessionKey, opts) {
39
+ let entries = [];
40
+ try {
41
+ entries = readRecentTurnLedger(sessionKey, 12, opts.baseDir);
42
+ }
43
+ catch {
44
+ entries = [];
45
+ }
46
+ const loggedText = entries
47
+ .map((entry) => [
48
+ entry.userMessagePreview,
49
+ entry.responsePreview ?? '',
50
+ entry.errorPreview ?? '',
51
+ ].join(' '))
52
+ .join(' ');
53
+ const surfacedText = entries
54
+ .map((entry) => [
55
+ entry.responsePreview ?? '',
56
+ entry.errorPreview ?? '',
57
+ ].join(' '))
58
+ .join(' ');
59
+ return {
60
+ entries,
61
+ loggedText: normalizeForSurface(loggedText),
62
+ surfacedText: normalizeForSurface(surfacedText),
63
+ };
64
+ }
65
+ function mentionsAny(haystack, needles) {
66
+ if (!haystack)
67
+ return false;
68
+ return needles.some((needle) => {
69
+ const normalized = normalizeForSurface(needle ?? '');
70
+ return normalized.length >= 4 && haystack.includes(normalized);
71
+ });
72
+ }
73
+ function taskAgeMs(task, now) {
74
+ const ms = timestampMs(task.completedAt ?? task.startedAt ?? task.createdAt);
75
+ return ms > 0 ? now - ms : Number.POSITIVE_INFINITY;
76
+ }
77
+ function taskMatchesSession(task, sessionKey) {
78
+ return task.sessionKey === sessionKey;
79
+ }
80
+ function maybeRecordContextEvent(opts, input) {
81
+ if (opts.recordEvents === false)
82
+ return null;
83
+ return recordContextEvent(input, { baseDir: opts.baseDir, now: opts.now });
84
+ }
85
+ function backgroundTaskItems(sessionKey, opts, surfaceHistory) {
86
+ const now = opts.now ?? Date.now();
87
+ const dir = path.join(opts.baseDir, 'background-tasks');
88
+ return listBackgroundTasks({}, { dir })
89
+ .filter((task) => taskMatchesSession(task, sessionKey))
90
+ .filter((task) => task.status === 'pending' || task.status === 'running' || taskAgeMs(task, now) <= RECENT_TASK_TTL_MS)
91
+ .slice(0, 5)
92
+ .map((task) => {
93
+ const active = task.status === 'pending' || task.status === 'running';
94
+ const failed = task.status === 'failed' || task.status === 'aborted';
95
+ const blocker = /usage limit|billing|credit balance|monthly usage|auth/i.test(task.error ?? '');
96
+ const ageMs = taskAgeMs(task, now);
97
+ const detail = failed
98
+ ? `Failed: ${cap(task.error ?? task.prompt)}`
99
+ : task.status === 'done'
100
+ ? `Done: ${cap(task.result ?? task.prompt)}`
101
+ : cap(task.prompt);
102
+ const terminalPriority = failed ? (blocker ? 65 : 60) : 30;
103
+ const severity = blocker ? 'urgent' : failed ? 'warning' : active ? 'normal' : 'low';
104
+ const eventAt = task.completedAt ?? task.startedAt ?? task.createdAt;
105
+ const event = maybeRecordContextEvent(opts, {
106
+ source: 'background-task',
107
+ sourceId: task.id,
108
+ sessionKey,
109
+ title: `${task.id} ${task.status}`,
110
+ summary: detail,
111
+ status: task.status,
112
+ severity,
113
+ eventAt,
114
+ loggedAt: eventAt,
115
+ ...(task.status === 'done' ? { resolvedAt: task.completedAt ?? eventAt } : {}),
116
+ fingerprintParts: [sessionKey, 'background-task', task.id],
117
+ metadata: { fromAgent: task.fromAgent },
118
+ });
119
+ const alreadyLogged = Boolean(event?.loggedAt) || mentionsAny(surfaceHistory.loggedText, [task.id]);
120
+ const alreadySurfaced = Boolean(event?.surfacedAt) || mentionsAny(surfaceHistory.surfacedText, [task.id]);
121
+ return {
122
+ source: 'background-task',
123
+ label: `${task.id} ${task.status}`,
124
+ detail,
125
+ priority: active ? (blocker ? 85 : 78) : terminalPriority,
126
+ timestamp: eventAt,
127
+ eventId: event?.id,
128
+ sourceId: task.id,
129
+ alreadyLogged,
130
+ alreadySurfaced,
131
+ acknowledged: Boolean(event?.acknowledgedAt),
132
+ resolved: Boolean(event?.resolvedAt),
133
+ greetingEligible: active
134
+ && !alreadySurfaced
135
+ && ageMs <= FRESH_ACTIVE_TASK_GREETING_TTL_MS,
136
+ };
137
+ });
138
+ }
139
+ function notificationItems(sessionKey, opts) {
140
+ return listRecentNotificationEvents(sessionKey, { baseDir: opts.baseDir, now: opts.now }, 5)
141
+ .map((event) => {
142
+ const jobs = event.jobNames?.length ? ` (${event.jobNames.join(', ')})` : '';
143
+ const body = `${event.summary} ${event.textPreview}`;
144
+ const noisyOrRecovered = /\b0\/0 recent runs failed\b/i.test(body)
145
+ || /\b(no active failures|has recovered|fully recovered|running healthy|no fix needed)\b/i.test(body);
146
+ const basePriority = event.type === 'cron_failure' ? 72 : event.type === 'cron_sla' ? 65 : 40;
147
+ const contextEvent = maybeRecordContextEvent(opts, {
148
+ source: 'notification',
149
+ sourceId: event.id,
150
+ sessionKey: event.sessionKey ?? sessionKey,
151
+ title: `${event.type}: ${event.title}${jobs}`,
152
+ summary: event.summary || event.textPreview,
153
+ status: event.type === 'cron_failure' ? 'failed' : 'active',
154
+ severity: noisyOrRecovered ? 'low' : event.type === 'cron_failure' ? 'warning' : 'normal',
155
+ eventAt: event.sentAt,
156
+ loggedAt: event.sentAt,
157
+ surfacedAt: event.sentAt,
158
+ fingerprintParts: [
159
+ event.sessionKey ?? sessionKey,
160
+ 'notification',
161
+ event.type,
162
+ event.jobNames?.join(',') ?? event.title,
163
+ event.summary || event.textPreview,
164
+ ],
165
+ metadata: { notificationId: event.id, jobNames: event.jobNames ?? [] },
166
+ });
167
+ return {
168
+ source: 'notification',
169
+ label: `${event.type}: ${event.title}${jobs}`,
170
+ detail: cap(event.summary || event.textPreview),
171
+ priority: noisyOrRecovered ? Math.min(basePriority, 30) : basePriority,
172
+ timestamp: event.sentAt,
173
+ eventId: contextEvent?.id,
174
+ sourceId: event.id,
175
+ alreadyLogged: true,
176
+ alreadySurfaced: true,
177
+ acknowledged: Boolean(contextEvent?.acknowledgedAt),
178
+ resolved: Boolean(contextEvent?.resolvedAt),
179
+ greetingEligible: false,
180
+ };
181
+ });
182
+ }
183
+ function readJsonFile(file) {
184
+ try {
185
+ return JSON.parse(readFileSync(file, 'utf-8'));
186
+ }
187
+ catch {
188
+ return null;
189
+ }
190
+ }
191
+ function unleashedItems(opts, surfaceHistory) {
192
+ const now = opts.now ?? Date.now();
193
+ const dir = path.join(opts.baseDir, 'unleashed');
194
+ if (!existsSync(dir))
195
+ return [];
196
+ const out = [];
197
+ let names = [];
198
+ try {
199
+ names = readdirSync(dir)
200
+ .filter((name) => {
201
+ try {
202
+ return statSync(path.join(dir, name)).isDirectory();
203
+ }
204
+ catch {
205
+ return false;
206
+ }
207
+ });
208
+ }
209
+ catch {
210
+ return [];
211
+ }
212
+ for (const name of names) {
213
+ const status = readJsonFile(path.join(dir, name, 'status.json'));
214
+ if (!status)
215
+ continue;
216
+ const rawStatus = String(status.status ?? 'running');
217
+ const updatedAt = String(status.updatedAt ?? status.finishedAt ?? status.startedAt ?? '');
218
+ const updatedMs = timestampMs(updatedAt);
219
+ const live = isLiveUnleashedStatus(status, now);
220
+ const recentError = rawStatus === 'error' && updatedMs > 0 && now - updatedMs <= RECENT_UNLEASHED_ERROR_TTL_MS;
221
+ if (!live && !recentError)
222
+ continue;
223
+ const phase = status.phase == null ? '' : ` phase ${String(status.phase)}`;
224
+ const liveWork = live && rawStatus !== 'error';
225
+ const label = `${name} ${rawStatus}${phase}`;
226
+ const detail = live ? 'Active long-running work.' : 'Recent long-running job error.';
227
+ const contextEvent = maybeRecordContextEvent(opts, {
228
+ source: 'unleashed',
229
+ sourceId: name,
230
+ title: label,
231
+ summary: detail,
232
+ status: rawStatus === 'error' ? 'failed' : liveWork ? 'running' : 'unknown',
233
+ severity: rawStatus === 'error' ? 'warning' : 'normal',
234
+ eventAt: updatedAt || new Date(now).toISOString(),
235
+ loggedAt: updatedAt || new Date(now).toISOString(),
236
+ fingerprintParts: ['unleashed', name],
237
+ metadata: { phase: status.phase ?? null },
238
+ });
239
+ const alreadyLogged = Boolean(contextEvent?.loggedAt) || mentionsAny(surfaceHistory.loggedText, [name]);
240
+ const alreadySurfaced = Boolean(contextEvent?.surfacedAt) || mentionsAny(surfaceHistory.surfacedText, [name]);
241
+ out.push({
242
+ source: 'unleashed',
243
+ label,
244
+ detail,
245
+ priority: rawStatus === 'error' ? 62 : 70,
246
+ timestamp: updatedAt,
247
+ eventId: contextEvent?.id,
248
+ sourceId: name,
249
+ alreadyLogged,
250
+ alreadySurfaced,
251
+ acknowledged: Boolean(contextEvent?.acknowledgedAt),
252
+ resolved: Boolean(contextEvent?.resolvedAt),
253
+ greetingEligible: liveWork && !alreadySurfaced,
254
+ });
255
+ }
256
+ return out;
257
+ }
258
+ function turnLedgerItems(surfaceHistory) {
259
+ return surfaceHistory.entries
260
+ .filter((entry) => entry.deliveryStatus === 'failed' || entry.actionExpected || entry.toolCallsMade > 0)
261
+ .slice(0, 2)
262
+ .map((entry) => ({
263
+ source: 'turn-ledger',
264
+ label: `recent turn ${entry.deliveryStatus}`,
265
+ detail: cap(entry.responsePreview || entry.errorPreview || entry.userMessagePreview),
266
+ priority: entry.deliveryStatus === 'failed' ? 60 : entry.actionExpected ? 45 : 25,
267
+ timestamp: entry.createdAt,
268
+ alreadyLogged: true,
269
+ alreadySurfaced: true,
270
+ acknowledged: true,
271
+ greetingEligible: false,
272
+ }));
273
+ }
274
+ function formatPromptBlock(items, coverage) {
275
+ if (items.length === 0)
276
+ return null;
277
+ const lines = items.map((item) => {
278
+ const tags = [
279
+ item.greetingEligible ? 'fresh' : 'context only',
280
+ item.alreadySurfaced ? 'already surfaced' : null,
281
+ item.alreadyLogged ? 'logged' : null,
282
+ item.acknowledged ? 'acknowledged' : null,
283
+ item.resolved ? 'resolved' : null,
284
+ ].filter(Boolean).join(', ');
285
+ const id = item.eventId ? ` id=${item.eventId}` : '';
286
+ return `- ${item.source}${id}: ${item.label}${tags ? ` (${tags})` : ''} — ${item.detail}`;
287
+ });
288
+ const coverageNote = coverage && coverage.total > 0 && coverage.embedded / coverage.total < 0.5
289
+ ? `Recall note: memory partially indexed (${coverage.embedded.toLocaleString()} of ${coverage.total.toLocaleString()} turns embedded). Paraphrased recall may miss; lexical recall still works.`
290
+ : null;
291
+ return [
292
+ '[Context governance: active working set]',
293
+ 'REFERENCE ONLY — recalled operational context, not new user input.',
294
+ 'Use this before deciding whether a vague/casual turn is new work, a follow-up, or a status request.',
295
+ 'Context-only, logged, acknowledged, resolved, or already-surfaced items are memory anchors for explicit follow-ups. Do not repeat heartbeat/alert details on greetings or casual small talk unless the user asks for status, a fix, or what changed.',
296
+ ...(coverageNote ? [coverageNote] : []),
297
+ ...lines,
298
+ '[/Context governance: active working set]',
299
+ ].join('\n');
300
+ }
301
+ function formatGreetingLine(items) {
302
+ const top = items.find((item) => item.greetingEligible && item.priority >= 75);
303
+ if (!top)
304
+ return null;
305
+ return `Hey. I am here. Still working on ${top.label}: ${top.detail}`;
306
+ }
307
+ export function buildActiveContextSnapshot(sessionKey, opts) {
308
+ const maxItems = Math.max(1, opts.maxItems ?? 6);
309
+ const surfaceHistory = buildSurfaceHistory(sessionKey, opts);
310
+ const items = [
311
+ ...backgroundTaskItems(sessionKey, opts, surfaceHistory),
312
+ ...notificationItems(sessionKey, opts),
313
+ ...unleashedItems(opts, surfaceHistory),
314
+ ...turnLedgerItems(surfaceHistory),
315
+ ]
316
+ .sort((a, b) => {
317
+ const priority = b.priority - a.priority;
318
+ if (priority !== 0)
319
+ return priority;
320
+ return timestampMs(b.timestamp) - timestampMs(a.timestamp);
321
+ })
322
+ .slice(0, maxItems);
323
+ return {
324
+ sessionKey,
325
+ items,
326
+ promptBlock: formatPromptBlock(items, opts.transcriptCoverage),
327
+ greetingLine: formatGreetingLine(items),
328
+ };
329
+ }
330
+ //# sourceMappingURL=active-context.js.map