clementine-agent 1.18.26 → 1.18.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,6 +33,7 @@ export declare function contextThrashRecoveryNotice(): string;
33
33
  export declare function buildContextThrashRecoveryPrompt(userRequest: string, priorFailureText?: string): string;
34
34
  /** Format a millisecond duration as a human-friendly "X ago" string. */
35
35
  export declare function formatTimeAgo(ms: number): string;
36
+ export declare function scrubInternalContextBlocks(text: string): string;
36
37
  export declare function looksLikeOneMillionContextError(value: unknown): boolean;
37
38
  export declare function oneMillionContextRecoveryMessage(): string;
38
39
  export declare function looksLikeProviderApiErrorResponse(value: unknown): boolean;
@@ -123,7 +124,7 @@ export declare class PersonalAssistant {
123
124
  }>;
124
125
  updatedAt: string;
125
126
  };
126
- /** Inject a background work result into the session so the next chat naturally references it. */
127
+ /** Inject a background work result into the session as silent follow-up context. */
127
128
  injectPendingContext(sessionKey: string, userPrompt: string, result: string): void;
128
129
  private initMemoryStore;
129
130
  /**
@@ -321,7 +322,9 @@ export declare class PersonalAssistant {
321
322
  * a query. Used to give the DM session visibility of cron/heartbeat outputs
322
323
  * so follow-up conversation has context.
323
324
  */
324
- injectContext(sessionKey: string, userText: string, assistantText: string): void;
325
+ injectContext(sessionKey: string, userText: string, assistantText: string, opts?: {
326
+ pending?: boolean;
327
+ }): void;
325
328
  getRecentActivity(sinceIso: string, maxEntries?: number): Array<{
326
329
  sessionKey: string;
327
330
  role: string;
@@ -373,6 +373,13 @@ function getContextWindow(model) {
373
373
  function capContextBlock(text, maxChars) {
374
374
  return capOutput(String(text ?? ''), maxChars);
375
375
  }
376
+ export function scrubInternalContextBlocks(text) {
377
+ return text
378
+ .replace(/\[Context governance:[^\]]*\][\s\S]*?\[\/Context governance:[^\]]*\]\s*/gi, '')
379
+ .replace(/\[Active working set\][\s\S]*?\[\/Active working set\]\s*/gi, '')
380
+ .replace(/\[Recent proactive notification context\][\s\S]*?\[\/Recent proactive notification context\]\s*/gi, '')
381
+ .trim();
382
+ }
376
383
  function capContextItem(text) {
377
384
  return capContextBlock(text, CRON_PROGRESS_ITEM_MAX_CHARS).replace(/\s+/g, ' ').trim();
378
385
  }
@@ -1182,7 +1189,7 @@ export class PersonalAssistant {
1182
1189
  getMcpStatus() {
1183
1190
  return { servers: this._lastMcpStatus, updatedAt: this._lastMcpStatusTime };
1184
1191
  }
1185
- /** Inject a background work result into the session so the next chat naturally references it. */
1192
+ /** Inject a background work result into the session as silent follow-up context. */
1186
1193
  injectPendingContext(sessionKey, userPrompt, result) {
1187
1194
  const pending = this.pendingContext.get(sessionKey) ?? [];
1188
1195
  pending.push({ user: userPrompt.slice(0, 500), assistant: result.slice(0, 2000) });
@@ -3043,7 +3050,10 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
3043
3050
  contextLines.push(`[${user}]\n${assistant}`);
3044
3051
  }
3045
3052
  effectivePrompt =
3046
- `[Since we last talked, you did some background work. Naturally mention what happened lead with anything that needs attention, briefly note routine completions. Don't dump raw tool calls or list job names. Be conversational.\nBackground:\n${contextLines.join('\n\n')}]\n\n${effectivePrompt}`;
3053
+ `[Background work contextREFERENCE ONLY, not new user input.\n` +
3054
+ `Use this silently to understand follow-ups. Mention it only if the user asks about status, results, fixes, or what changed, or if it is a new urgent blocker. ` +
3055
+ `Do not lead greetings or casual small talk with stale heartbeat, cron, or background-task details.\n` +
3056
+ `Background:\n${contextLines.join('\n\n')}]\n\n${effectivePrompt}`;
3047
3057
  }
3048
3058
  }
3049
3059
  // Inject stall nudge if the previous query for this session showed stall signals
@@ -3070,6 +3080,7 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
3070
3080
  const CHAT_TIMEOUT_MS = 30 * 60 * 1000;
3071
3081
  const guard = new StallGuard();
3072
3082
  let [responseText, sessionId] = await this.runQuery(effectivePrompt, key, onText, model, profile, securityAnnotation, effectiveMaxTurns, projectOverride, onToolActivity, verboseLevel, abortController, guard, CHAT_TIMEOUT_MS, intent, turnPolicy, toolset);
3083
+ responseText = scrubInternalContextBlocks(responseText);
3073
3084
  // If we got a context-length / prompt-too-long error, retry with a fresh session
3074
3085
  const errLower = responseText.toLowerCase();
3075
3086
  const isContextOverflow = errLower.includes('prompt is too long') ||
@@ -6184,7 +6195,7 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
6184
6195
  * a query. Used to give the DM session visibility of cron/heartbeat outputs
6185
6196
  * so follow-up conversation has context.
6186
6197
  */
6187
- injectContext(sessionKey, userText, assistantText) {
6198
+ injectContext(sessionKey, userText, assistantText, opts = {}) {
6188
6199
  const trimmedUser = capContextBlock(userText, INJECTED_CONTEXT_MAX_CHARS);
6189
6200
  const trimmedAssistant = capContextBlock(assistantText, INJECTED_CONTEXT_MAX_CHARS);
6190
6201
  // Add to in-memory exchange history
@@ -6199,12 +6210,14 @@ You have a cost budget per message — not a hard turn limit. Work until the tas
6199
6210
  // Queue as pending context so the next chat() prepends it even
6200
6211
  // when an active SDK session exists (session recovery alone won't
6201
6212
  // help because the SDK session has no knowledge of this exchange).
6202
- const pending = this.pendingContext.get(sessionKey) ?? [];
6203
- pending.push({ user: trimmedUser, assistant: trimmedAssistant });
6204
- // Keep at most 3 pending to avoid bloating the next prompt
6205
- if (pending.length > 3)
6206
- pending.shift();
6207
- this.pendingContext.set(sessionKey, pending);
6213
+ if (opts.pending !== false) {
6214
+ const pending = this.pendingContext.get(sessionKey) ?? [];
6215
+ pending.push({ user: trimmedUser, assistant: trimmedAssistant });
6216
+ // Keep at most 3 pending to avoid bloating the next prompt
6217
+ if (pending.length > 3)
6218
+ pending.shift();
6219
+ this.pendingContext.set(sessionKey, pending);
6220
+ }
6208
6221
  this.sessionTimestamps.set(sessionKey, new Date());
6209
6222
  this.saveSessions();
6210
6223
  // Persist to transcript store
@@ -67,7 +67,7 @@ export const TOOL_BUNDLES = [
67
67
  },
68
68
  {
69
69
  id: 'github',
70
- patterns: [/\b(github|pull request|pull requests|prs?|issues?)\b/i],
70
+ patterns: [/\b(github|pull request|pull requests|prs?)\b/i, /\b(repo|github)\s+issues?\b/i],
71
71
  externalMcpServers: ['github'],
72
72
  composioToolkits: ['github'],
73
73
  },
@@ -7044,6 +7044,58 @@ If the tool returns nothing or errors, return an empty array \`[]\`.`,
7044
7044
  res.status(500).json({ error: String(err) });
7045
7045
  }
7046
7046
  });
7047
+ // Recent episodes — durable consolidated session summaries.
7048
+ app.get('/api/memory/episodes', async (req, res) => {
7049
+ try {
7050
+ const gateway = await getGateway();
7051
+ const store = gateway.assistant?.memoryStore;
7052
+ if (!store || typeof store.listRecentEpisodes !== 'function') {
7053
+ res.status(503).json({ error: 'Episodes store not available' });
7054
+ return;
7055
+ }
7056
+ const limit = Math.min(parseInt(String(req.query.limit ?? '30'), 10) || 30, 200);
7057
+ const sessionKey = req.query.session ? String(req.query.session) : undefined;
7058
+ const sinceParam = req.query.since ? String(req.query.since) : '';
7059
+ // since: '24h' | '7d' | '30d' | '' (all) | ISO string
7060
+ let sinceIso;
7061
+ if (sinceParam === '24h')
7062
+ sinceIso = new Date(Date.now() - 24 * 3600_000).toISOString();
7063
+ else if (sinceParam === '7d')
7064
+ sinceIso = new Date(Date.now() - 7 * 24 * 3600_000).toISOString();
7065
+ else if (sinceParam === '30d')
7066
+ sinceIso = new Date(Date.now() - 30 * 24 * 3600_000).toISOString();
7067
+ else if (sinceParam)
7068
+ sinceIso = sinceParam;
7069
+ const episodes = store.listRecentEpisodes({ limit, sessionKey, sinceIso });
7070
+ res.json({ ok: true, episodes });
7071
+ }
7072
+ catch (err) {
7073
+ res.status(500).json({ error: String(err) });
7074
+ }
7075
+ });
7076
+ // Coverage + recall telemetry for both chunks and transcripts. Powers the
7077
+ // Memory Coverage card showing whether dense recall is actually earning its
7078
+ // keep on the current corpus.
7079
+ app.get('/api/memory/coverage', async (_req, res) => {
7080
+ try {
7081
+ const gateway = await getGateway();
7082
+ const store = gateway.assistant?.memoryStore;
7083
+ if (!store) {
7084
+ res.status(503).json({ error: 'Memory store not available' });
7085
+ return;
7086
+ }
7087
+ const transcripts = typeof store.getTranscriptDenseCoverage === 'function'
7088
+ ? store.getTranscriptDenseCoverage()
7089
+ : { embedded: 0, total: 0, model: null };
7090
+ const recall = typeof store.getRecallTelemetrySummary === 'function'
7091
+ ? store.getRecallTelemetrySummary(7)
7092
+ : { total: 0, semanticOnly: 0, lexicalOnly: 0, bothModes: 0, avgTopScore: 0 };
7093
+ res.json({ ok: true, transcripts, recall });
7094
+ }
7095
+ catch (err) {
7096
+ res.status(500).json({ error: String(err) });
7097
+ }
7098
+ });
7047
7099
  // Quick-add: append a sentence to today's daily note from the dashboard.
7048
7100
  // Mirrors the agent's memory_write({action:'append_daily'}) path so the
7049
7101
  // note gets indexed identically.
@@ -14975,6 +15027,23 @@ if('serviceWorker' in navigator){navigator.serviceWorker.getRegistrations().then
14975
15027
  <div class="skel-block" style="padding:14px"><div class="skel-row med"></div><div class="skel-row short"></div></div>
14976
15028
  </div>
14977
15029
  </div>
15030
+ <div class="card" style="margin-bottom:14px">
15031
+ <div class="card-header" style="display:flex;align-items:center;justify-content:space-between;gap:8px;flex-wrap:wrap">
15032
+ <span>Recent episodes</span>
15033
+ <div style="display:flex;align-items:center;gap:8px">
15034
+ <select id="episodes-filter-since" onchange="refreshRecentEpisodes()" style="font-size:12px;padding:4px 6px;border:1px solid var(--border);border-radius:4px;background:var(--bg-input);color:var(--text)">
15035
+ <option value="24h">Last 24h</option>
15036
+ <option value="7d" selected>Last 7d</option>
15037
+ <option value="30d">Last 30d</option>
15038
+ <option value="">All</option>
15039
+ </select>
15040
+ <span style="font-size:11px;color:var(--text-muted)">Consolidated session summaries</span>
15041
+ </div>
15042
+ </div>
15043
+ <div class="card-body" id="panel-recent-episodes" style="padding:0">
15044
+ <div class="skel-block" style="padding:14px"><div class="skel-row med"></div><div class="skel-row short"></div></div>
15045
+ </div>
15046
+ </div>
14978
15047
  <div class="card">
14979
15048
  <div class="card-header" style="display:flex;align-items:center;justify-content:space-between">
14980
15049
  <span>Self-correction (supersedes)</span>
@@ -18485,6 +18554,7 @@ function switchTab(group, tab) {
18485
18554
  // Consolidated Memory tab: search results + stats + MEMORY.md + recent writes + supersedes + coverage strip.
18486
18555
  refreshMemory();
18487
18556
  if (typeof refreshRecentWrites === 'function') refreshRecentWrites();
18557
+ if (typeof refreshRecentEpisodes === 'function') refreshRecentEpisodes();
18488
18558
  if (typeof refreshSupersedes === 'function') refreshSupersedes();
18489
18559
  if (typeof refreshCoverageStrip === 'function') refreshCoverageStrip();
18490
18560
  }
@@ -24842,6 +24912,7 @@ async function submitQuickAddMemory() {
24842
24912
  setTimeout(function() {
24843
24913
  closeQuickAddMemory();
24844
24914
  if (typeof refreshRecentWrites === 'function') refreshRecentWrites();
24915
+ if (typeof refreshRecentEpisodes === 'function') refreshRecentEpisodes();
24845
24916
  if (typeof refreshMemory === 'function') refreshMemory();
24846
24917
  }, 600);
24847
24918
  } catch (err) {
@@ -24903,6 +24974,38 @@ async function refreshCoverageStrip() {
24903
24974
  html += '<span style="margin-left:auto;color:var(--text-muted)">All chunks indexed</span>';
24904
24975
  }
24905
24976
  html += '</div>';
24977
+
24978
+ // Second strip: transcripts dense coverage + 7-day recall hit-rate.
24979
+ try {
24980
+ var rc = await apiFetch('/api/memory/coverage');
24981
+ var dc = await rc.json();
24982
+ if (dc.ok) {
24983
+ var tx = dc.transcripts || { embedded: 0, total: 0 };
24984
+ var rec = dc.recall || { total: 0, semanticOnly: 0, lexicalOnly: 0, bothModes: 0, avgTopScore: 0 };
24985
+ var txPct = tx.total > 0 ? Math.round((tx.embedded / tx.total) * 100) : 0;
24986
+ var txColor = txPct >= 95 ? '#10b981' : txPct >= 50 ? '#f59e0b' : '#ef4444';
24987
+ var recallTotal = rec.total || 0;
24988
+ var hit = function(n){ return recallTotal > 0 ? Math.round((n / recallTotal) * 100) : 0; };
24989
+ html += '<div style="display:flex;align-items:center;gap:14px;padding:10px 14px;margin-top:8px;background:var(--bg-secondary);border:1px solid var(--border);border-radius:8px;flex-wrap:wrap;font-size:12px">';
24990
+ html += '<span style="color:var(--text-muted)">Conversation recall:</span>';
24991
+ html += '<span><span style="color:' + txColor + '">●</span> Transcripts ' + txPct + '% '
24992
+ + '<span style="color:var(--text-muted)">(' + tx.embedded.toLocaleString() + '/' + tx.total.toLocaleString() + ')</span></span>';
24993
+ if (recallTotal > 0) {
24994
+ html += '<span style="color:var(--text-muted)">|</span>';
24995
+ html += '<span title="Last 7 days">Hit-rate: '
24996
+ + 'semantic ' + hit(rec.semanticOnly) + '% · '
24997
+ + 'lexical ' + hit(rec.lexicalOnly) + '% · '
24998
+ + 'both ' + hit(rec.bothModes) + '% '
24999
+ + '<span style="color:var(--text-muted)">(' + recallTotal.toLocaleString() + ' queries)</span></span>';
25000
+ } else {
25001
+ html += '<span style="color:var(--text-muted)">No recall queries logged in the last 7 days.</span>';
25002
+ }
25003
+ if (tx.total > 0 && tx.embedded < tx.total) {
25004
+ html += '<span style="margin-left:auto;color:var(--text-muted)">' + (tx.total - tx.embedded).toLocaleString() + ' transcripts unembedded — run <code>clementine memory reembed --target transcripts</code></span>';
25005
+ }
25006
+ html += '</div>';
25007
+ }
25008
+ } catch (e) { /* coverage strip is best-effort */ }
24906
25009
  el.innerHTML = html;
24907
25010
  } catch (err) {
24908
25011
  el.innerHTML = '';
@@ -24963,6 +25066,55 @@ async function refreshRecentWrites() {
24963
25066
  }
24964
25067
  }
24965
25068
 
25069
+ async function refreshRecentEpisodes() {
25070
+ var el = document.getElementById('panel-recent-episodes');
25071
+ if (!el) return;
25072
+ try {
25073
+ var sel = document.getElementById('episodes-filter-since');
25074
+ var since = sel ? sel.value : '7d';
25075
+ var url = '/api/memory/episodes?limit=30' + (since ? '&since=' + encodeURIComponent(since) : '');
25076
+ var r = await apiFetch(url);
25077
+ var d = await r.json();
25078
+ if (!d.ok || !Array.isArray(d.episodes)) {
25079
+ el.innerHTML = '<div class="empty-state" style="padding:14px">' + esc(d.error || 'No data') + '</div>';
25080
+ return;
25081
+ }
25082
+ if (d.episodes.length === 0) {
25083
+ el.innerHTML = '<div class="empty-state" style="padding:14px">No episodes yet. They land automatically when a session has been idle for ~20 min with at least 3 exchanges.</div>';
25084
+ return;
25085
+ }
25086
+ var html = '<table class="data-table" style="width:100%">';
25087
+ html += '<thead><tr>'
25088
+ + '<th style="width:120px">When</th>'
25089
+ + '<th style="width:160px">Session</th>'
25090
+ + '<th>Summary</th>'
25091
+ + '<th style="width:140px">Topics</th>'
25092
+ + '<th style="width:120px">Outcome</th>'
25093
+ + '<th style="width:50px;text-align:right">Open</th>'
25094
+ + '</tr></thead><tbody>';
25095
+ for (var i = 0; i < d.episodes.length; i++) {
25096
+ var ep = d.episodes[i];
25097
+ var when = '';
25098
+ try { when = new Date(ep.createdAt + 'Z').toLocaleString(); } catch { when = ep.createdAt; }
25099
+ var topics = (ep.topics || []).slice(0, 3).map(esc).join(', ');
25100
+ var openCount = (ep.openLoops || []).length;
25101
+ var openColor = openCount > 0 ? '#f59e0b' : 'var(--text-muted)';
25102
+ html += '<tr>'
25103
+ + '<td style="font-size:11px;color:var(--text-muted)">' + esc(when) + '</td>'
25104
+ + '<td style="font-size:11px">' + esc(ep.sessionKey) + '</td>'
25105
+ + '<td style="font-size:12px">' + esc(ep.summary) + '</td>'
25106
+ + '<td style="font-size:11px;color:var(--text-muted)">' + (topics || '—') + '</td>'
25107
+ + '<td style="font-size:11px">' + esc(ep.outcome || '—') + '</td>'
25108
+ + '<td style="text-align:right;font-weight:600;color:' + openColor + '">' + openCount + '</td>'
25109
+ + '</tr>';
25110
+ }
25111
+ html += '</tbody></table>';
25112
+ el.innerHTML = html;
25113
+ } catch (err) {
25114
+ el.innerHTML = '<div class="empty-state" style="padding:14px">Failed to load: ' + esc(String(err)) + '</div>';
25115
+ }
25116
+ }
25117
+
24966
25118
  async function memoryHealthAction(action, extra) {
24967
25119
  var labels = { 'janitor': 'cleanup', 'rebuild-fts': 'FTS rebuild', 'fix-orphans': 'orphan fix', 'install-dense-model': 'local embedding model install/verify', 'reembed-dense': 'dense embedding backfill' };
24968
25120
  if (!confirm('Run ' + (labels[action] || action) + ' now?')) return;
package/dist/cli/index.js CHANGED
@@ -3276,9 +3276,10 @@ memoryModelCmd
3276
3276
  });
3277
3277
  memoryCmd
3278
3278
  .command('reembed')
3279
- .description('Backfill dense neural embeddings for all chunks (or all stale chunks if model changed). Default model: Snowflake/snowflake-arctic-embed-m-v1.5 — first run downloads ~440MB to ~/.clementine/models/.')
3280
- .option('--limit <n>', 'Max chunks to embed in this run (default: all)')
3279
+ .description('Backfill dense neural embeddings for chunks and/or transcripts. Default model: Snowflake/snowflake-arctic-embed-m-v1.5 — first run downloads ~440MB to ~/.clementine/models/.')
3280
+ .option('--limit <n>', 'Max items to embed in this run (default: all)')
3281
3281
  .option('--model <id>', 'Override embedding model id (e.g. Xenova/bge-base-en-v1.5)')
3282
+ .option('--target <kind>', 'What to backfill: chunks | transcripts | all', 'all')
3282
3283
  .action(async (opts) => {
3283
3284
  const BOLD = '\x1b[1m';
3284
3285
  const DIM = '\x1b[0;90m';
@@ -3290,6 +3291,11 @@ memoryCmd
3290
3291
  if (opts.model) {
3291
3292
  process.env.EMBEDDING_DENSE_MODEL = opts.model;
3292
3293
  }
3294
+ const target = (opts.target ?? 'all').toLowerCase();
3295
+ if (!['chunks', 'transcripts', 'all'].includes(target)) {
3296
+ console.error(` ${RED}Invalid --target${RESET}: "${opts.target}". Use chunks | transcripts | all.`);
3297
+ process.exit(1);
3298
+ }
3293
3299
  const limit = opts.limit ? parseInt(opts.limit, 10) : undefined;
3294
3300
  const { MemoryStore } = await import('../memory/store.js');
3295
3301
  const embeddings = await import('../memory/embeddings.js');
@@ -3298,7 +3304,7 @@ memoryCmd
3298
3304
  const store = new MemoryStore(DB_PATH, VAULT_DIR);
3299
3305
  store.initialize();
3300
3306
  console.log();
3301
- console.log(` ${BOLD}Dense embedding backfill${RESET}`);
3307
+ console.log(` ${BOLD}Dense embedding backfill${RESET} ${DIM}(target: ${target})${RESET}`);
3302
3308
  console.log(` Model: ${embeddings.currentDenseModel()}`);
3303
3309
  console.log(` ${DIM}Loading model (first run downloads ~440MB)…${RESET}`);
3304
3310
  const ready = await embeddings.probeDenseReady();
@@ -3309,28 +3315,59 @@ memoryCmd
3309
3315
  }
3310
3316
  console.log(` ${GREEN}✓${RESET} Model ready (${embeddings.denseDimension()}-dim).`);
3311
3317
  console.log();
3312
- const startTime = Date.now();
3313
- let lastReport = 0;
3314
- const result = await store.backfillDenseEmbeddings({
3315
- limit,
3316
- onProgress: (done, total) => {
3317
- // Throttle to avoid spamming
3318
+ const reportProgress = (label, startTime) => {
3319
+ let lastReport = 0;
3320
+ return (done, total) => {
3318
3321
  const now = Date.now();
3319
3322
  if (now - lastReport < 500 && done < total)
3320
3323
  return;
3321
3324
  lastReport = now;
3322
3325
  const pct = total > 0 ? Math.round((done / total) * 100) : 0;
3323
3326
  const elapsed = Math.round((now - startTime) / 1000);
3324
- process.stdout.write(`\r Progress: ${BOLD}${done.toLocaleString()}${RESET}/${total.toLocaleString()} (${pct}%) ${DIM}${elapsed}s${RESET} `);
3325
- },
3326
- });
3327
- process.stdout.write('\n\n');
3328
- const elapsed = Math.round((Date.now() - startTime) / 1000);
3329
- console.log(` ${GREEN}✓${RESET} Embedded ${BOLD}${result.embedded.toLocaleString()}${RESET} chunks ${DIM}(${elapsed}s elapsed)${RESET}`);
3330
- if (result.failed > 0) {
3331
- console.log(` ${YELLOW}!${RESET} Failed: ${result.failed.toLocaleString()} ${DIM}(model returned null — usually empty or invalid input)${RESET}`);
3327
+ process.stdout.write(`\r ${label}: ${BOLD}${done.toLocaleString()}${RESET}/${total.toLocaleString()} (${pct}%) ${DIM}${elapsed}s${RESET} `);
3328
+ };
3329
+ };
3330
+ let totalEmbedded = 0;
3331
+ let totalFailed = 0;
3332
+ let lastModel = '';
3333
+ if (target === 'chunks' || target === 'all') {
3334
+ const startTime = Date.now();
3335
+ const result = await store.backfillDenseEmbeddings({
3336
+ limit,
3337
+ onProgress: reportProgress('Chunks', startTime),
3338
+ });
3339
+ process.stdout.write('\n');
3340
+ const elapsed = Math.round((Date.now() - startTime) / 1000);
3341
+ console.log(` ${GREEN}✓${RESET} Embedded ${BOLD}${result.embedded.toLocaleString()}${RESET} chunks ${DIM}(${elapsed}s elapsed)${RESET}`);
3342
+ if (result.failed > 0) {
3343
+ console.log(` ${YELLOW}!${RESET} Chunk failures: ${result.failed.toLocaleString()}`);
3344
+ }
3345
+ totalEmbedded += result.embedded;
3346
+ totalFailed += result.failed;
3347
+ lastModel = result.model;
3348
+ }
3349
+ if (target === 'transcripts' || target === 'all') {
3350
+ const startTime = Date.now();
3351
+ const result = await store.backfillTranscriptDenseEmbeddings({
3352
+ limit,
3353
+ onProgress: reportProgress('Transcripts', startTime),
3354
+ });
3355
+ process.stdout.write('\n');
3356
+ const elapsed = Math.round((Date.now() - startTime) / 1000);
3357
+ console.log(` ${GREEN}✓${RESET} Embedded ${BOLD}${result.embedded.toLocaleString()}${RESET} transcripts ${DIM}(${elapsed}s elapsed)${RESET}`);
3358
+ if (result.failed > 0) {
3359
+ console.log(` ${YELLOW}!${RESET} Transcript failures: ${result.failed.toLocaleString()}`);
3360
+ }
3361
+ totalEmbedded += result.embedded;
3362
+ totalFailed += result.failed;
3363
+ lastModel = result.model;
3364
+ }
3365
+ console.log();
3366
+ console.log(` Total embedded: ${BOLD}${totalEmbedded.toLocaleString()}${RESET}`);
3367
+ if (totalFailed > 0) {
3368
+ console.log(` ${DIM}(model returned null on ${totalFailed.toLocaleString()} — usually empty or invalid input)${RESET}`);
3332
3369
  }
3333
- console.log(` Model: ${result.model}`);
3370
+ console.log(` Model: ${lastModel}`);
3334
3371
  console.log();
3335
3372
  console.log(` ${DIM}Run \`clementine memory status\` to see updated coverage.${RESET}`);
3336
3373
  console.log();
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Active working set context.
3
+ *
4
+ * This is the short-lived "what matters right now" layer above semantic
5
+ * memory. It is deliberately bounded and deterministic so every channel can
6
+ * stay aware of active operational state without dragging full transcripts or
7
+ * run logs into the model.
8
+ */
9
+ export interface ActiveContextItem {
10
+ source: 'notification' | 'background-task' | 'unleashed' | 'turn-ledger';
11
+ label: string;
12
+ detail: string;
13
+ priority: number;
14
+ timestamp?: string;
15
+ eventId?: string;
16
+ sourceId?: string;
17
+ alreadyLogged?: boolean;
18
+ alreadySurfaced?: boolean;
19
+ acknowledged?: boolean;
20
+ resolved?: boolean;
21
+ greetingEligible?: boolean;
22
+ }
23
+ export interface ActiveContextSnapshot {
24
+ sessionKey: string;
25
+ items: ActiveContextItem[];
26
+ promptBlock: string | null;
27
+ greetingLine: string | null;
28
+ }
29
+ export interface ActiveContextOptions {
30
+ baseDir: string;
31
+ now?: number;
32
+ maxItems?: number;
33
+ recordEvents?: boolean;
34
+ /**
35
+ * Dense embedding coverage for transcripts. When < 50%, the prompt block
36
+ * carries an inline note so the model knows recall is degraded.
37
+ */
38
+ transcriptCoverage?: {
39
+ embedded: number;
40
+ total: number;
41
+ };
42
+ }
43
+ export declare function buildActiveContextSnapshot(sessionKey: string, opts: ActiveContextOptions): ActiveContextSnapshot;
44
+ //# sourceMappingURL=active-context.d.ts.map