@edihasaj/recall 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +409 -0
  3. package/dist/chunk-4CV4JOE5.js +27 -0
  4. package/dist/chunk-4CV4JOE5.js.map +1 -0
  5. package/dist/chunk-A5UIRZU6.js +469 -0
  6. package/dist/chunk-A5UIRZU6.js.map +1 -0
  7. package/dist/chunk-AYHFPCGY.js +964 -0
  8. package/dist/chunk-AYHFPCGY.js.map +1 -0
  9. package/dist/chunk-DNFKAHS6.js +204 -0
  10. package/dist/chunk-DNFKAHS6.js.map +1 -0
  11. package/dist/chunk-GC5XMBG4.js +551 -0
  12. package/dist/chunk-GC5XMBG4.js.map +1 -0
  13. package/dist/chunk-IILLSHLM.js +3021 -0
  14. package/dist/chunk-IILLSHLM.js.map +1 -0
  15. package/dist/chunk-LVQW6WHK.js +146 -0
  16. package/dist/chunk-LVQW6WHK.js.map +1 -0
  17. package/dist/chunk-LZ6PMQRX.js +955 -0
  18. package/dist/chunk-LZ6PMQRX.js.map +1 -0
  19. package/dist/chunk-PC43MBX5.js +2960 -0
  20. package/dist/chunk-PC43MBX5.js.map +1 -0
  21. package/dist/chunk-VEPXEHRZ.js +1763 -0
  22. package/dist/chunk-VEPXEHRZ.js.map +1 -0
  23. package/dist/cleanup-TVOX2S2S.js +28 -0
  24. package/dist/cleanup-TVOX2S2S.js.map +1 -0
  25. package/dist/cli.js +3425 -0
  26. package/dist/cli.js.map +1 -0
  27. package/dist/daemon.js +1298 -0
  28. package/dist/daemon.js.map +1 -0
  29. package/dist/dispatcher-UGMU6THT.js +15 -0
  30. package/dist/dispatcher-UGMU6THT.js.map +1 -0
  31. package/dist/keychain-5QG52ANO.js +22 -0
  32. package/dist/keychain-5QG52ANO.js.map +1 -0
  33. package/dist/mcp.js +21 -0
  34. package/dist/mcp.js.map +1 -0
  35. package/dist/quality-Z7LPMMBC.js +17 -0
  36. package/dist/quality-Z7LPMMBC.js.map +1 -0
  37. package/dist/sync-server.js +225 -0
  38. package/dist/sync-server.js.map +1 -0
  39. package/dist/tasks-UOLSPXJQ.js +61 -0
  40. package/dist/tasks-UOLSPXJQ.js.map +1 -0
  41. package/dist/usage-CY3V72YN.js +101 -0
  42. package/dist/usage-CY3V72YN.js.map +1 -0
  43. package/drizzle/0000_initial_create.sql +240 -0
  44. package/drizzle/0001_rich_liz_osborn.sql +21 -0
  45. package/drizzle/0002_unknown_spot.sql +18 -0
  46. package/drizzle/0003_red_wendigo.sql +19 -0
  47. package/drizzle/0004_early_carlie_cooper.sql +1 -0
  48. package/drizzle/0005_simple_emma_frost.sql +96 -0
  49. package/drizzle/0006_keen_mongoose.sql +2 -0
  50. package/drizzle/0007_flawless_maximus.sql +15 -0
  51. package/drizzle/meta/0000_snapshot.json +1630 -0
  52. package/drizzle/meta/0001_snapshot.json +1773 -0
  53. package/drizzle/meta/0002_snapshot.json +1891 -0
  54. package/drizzle/meta/0003_snapshot.json +2014 -0
  55. package/drizzle/meta/0004_snapshot.json +2022 -0
  56. package/drizzle/meta/0005_snapshot.json +2064 -0
  57. package/drizzle/meta/0006_snapshot.json +2078 -0
  58. package/drizzle/meta/0007_snapshot.json +2183 -0
  59. package/drizzle/meta/_journal.json +62 -0
  60. package/package.json +64 -0
  61. package/scripts/recall-claude +7 -0
  62. package/scripts/recall-codex +7 -0
  63. package/scripts/recall-session +71 -0
@@ -0,0 +1,551 @@
1
+ import {
2
+ TaskClaimConflictError,
3
+ claimTask,
4
+ listTasks,
5
+ releaseTask,
6
+ submitTask
7
+ } from "./chunk-IILLSHLM.js";
8
+ import {
9
+ llmUsage
10
+ } from "./chunk-A5UIRZU6.js";
11
+ import {
12
+ getProviderConfig,
13
+ hasProviderConfigured,
14
+ init_keychain
15
+ } from "./chunk-DNFKAHS6.js";
16
+
17
+ // src/llm/client.ts
18
+ init_keychain();
19
+ import { randomUUID } from "crypto";
20
+ var LlmCredentialError = class extends Error {
21
+ };
22
+ var LlmRequestError = class extends Error {
23
+ };
24
+ var DEFAULT_MODELS = {
25
+ openai: "gpt-4o-mini",
26
+ anthropic: "claude-haiku-4-5-20251001",
27
+ // For Azure the "model" is the deployment name, set by the user when they
28
+ // provisioned the deployment. We leave it empty so we always fall through
29
+ // to the deployment from AzureOpenAiConfig.
30
+ "azure-openai": ""
31
+ };
32
+ var COST_PER_M_TOKENS = {
33
+ "gpt-4o-mini": { input: 0.15, output: 0.6 },
34
+ "gpt-4o": { input: 2.5, output: 10 },
35
+ "claude-haiku-4-5-20251001": { input: 1, output: 5 },
36
+ "claude-sonnet-4-6": { input: 3, output: 15 },
37
+ "claude-opus-4-7": { input: 15, output: 75 }
38
+ };
39
+ async function callLlm(db, input) {
40
+ const provider = input.provider;
41
+ const config = getProviderConfig(provider);
42
+ if (!config) {
43
+ throw new LlmCredentialError(missingCredentialMessage(provider));
44
+ }
45
+ const model = input.model ?? (provider === "azure-openai" ? config.deployment : DEFAULT_MODELS[provider]);
46
+ const started = Date.now();
47
+ let result = null;
48
+ let errorMessage;
49
+ try {
50
+ if (provider === "openai") {
51
+ result = await callOpenAi(config.key, model, input);
52
+ } else if (provider === "anthropic") {
53
+ result = await callAnthropic(config.key, model, input);
54
+ } else {
55
+ result = await callAzureOpenAi(config, model, input);
56
+ }
57
+ return result;
58
+ } catch (err) {
59
+ errorMessage = err instanceof Error ? err.message : String(err);
60
+ throw err;
61
+ } finally {
62
+ try {
63
+ await recordUsage(db, {
64
+ provider,
65
+ model,
66
+ task_kind: input.task_kind,
67
+ task_id: input.task_id ?? null,
68
+ repo: input.repo ?? null,
69
+ usage: result?.usage ?? { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0, cost_usd: null },
70
+ duration_ms: Date.now() - started,
71
+ ok: Boolean(result),
72
+ error: errorMessage
73
+ });
74
+ } catch {
75
+ }
76
+ }
77
+ }
78
+ async function callOpenAi(apiKey, model, input) {
79
+ const started = Date.now();
80
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
81
+ method: "POST",
82
+ headers: {
83
+ "Content-Type": "application/json",
84
+ Authorization: `Bearer ${apiKey}`
85
+ },
86
+ body: JSON.stringify({
87
+ model,
88
+ messages: [
89
+ { role: "system", content: input.system },
90
+ { role: "user", content: input.user }
91
+ ],
92
+ max_completion_tokens: input.max_output_tokens ?? 2048,
93
+ temperature: input.temperature ?? 0
94
+ })
95
+ });
96
+ if (!response.ok) {
97
+ const body = await safeText(response);
98
+ throw new LlmRequestError(`OpenAI ${response.status}: ${body.slice(0, 400)}`);
99
+ }
100
+ const payload = await response.json();
101
+ const text = payload.choices?.[0]?.message?.content?.trim() ?? "";
102
+ const prompt_tokens = payload.usage?.prompt_tokens ?? 0;
103
+ const completion_tokens = payload.usage?.completion_tokens ?? 0;
104
+ const total_tokens = payload.usage?.total_tokens ?? prompt_tokens + completion_tokens;
105
+ return {
106
+ text,
107
+ model,
108
+ provider: "openai",
109
+ duration_ms: Date.now() - started,
110
+ usage: {
111
+ prompt_tokens,
112
+ completion_tokens,
113
+ total_tokens,
114
+ cost_usd: computeCost(model, prompt_tokens, completion_tokens)
115
+ }
116
+ };
117
+ }
118
+ async function callAzureOpenAi(config, deployment, input) {
119
+ const started = Date.now();
120
+ const url = `${config.endpoint}/openai/deployments/${encodeURIComponent(deployment)}/chat/completions?api-version=${encodeURIComponent(config.api_version)}`;
121
+ const response = await fetch(url, {
122
+ method: "POST",
123
+ headers: {
124
+ "Content-Type": "application/json",
125
+ "api-key": config.key
126
+ },
127
+ body: JSON.stringify({
128
+ messages: [
129
+ { role: "system", content: input.system },
130
+ { role: "user", content: input.user }
131
+ ],
132
+ max_completion_tokens: input.max_output_tokens ?? 2048,
133
+ temperature: input.temperature ?? 0
134
+ })
135
+ });
136
+ if (!response.ok) {
137
+ const body = await safeText(response);
138
+ throw new LlmRequestError(`Azure OpenAI ${response.status}: ${body.slice(0, 400)}`);
139
+ }
140
+ const payload = await response.json();
141
+ const text = payload.choices?.[0]?.message?.content?.trim() ?? "";
142
+ const prompt_tokens = payload.usage?.prompt_tokens ?? 0;
143
+ const completion_tokens = payload.usage?.completion_tokens ?? 0;
144
+ const total_tokens = payload.usage?.total_tokens ?? prompt_tokens + completion_tokens;
145
+ return {
146
+ text,
147
+ model: deployment,
148
+ provider: "azure-openai",
149
+ duration_ms: Date.now() - started,
150
+ usage: {
151
+ prompt_tokens,
152
+ completion_tokens,
153
+ total_tokens,
154
+ cost_usd: computeCost(deployment, prompt_tokens, completion_tokens)
155
+ }
156
+ };
157
+ }
158
+ async function callAnthropic(apiKey, model, input) {
159
+ const started = Date.now();
160
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
161
+ method: "POST",
162
+ headers: {
163
+ "Content-Type": "application/json",
164
+ "x-api-key": apiKey,
165
+ "anthropic-version": "2023-06-01"
166
+ },
167
+ body: JSON.stringify({
168
+ model,
169
+ system: input.system,
170
+ messages: [{ role: "user", content: input.user }],
171
+ max_tokens: input.max_output_tokens ?? 2048,
172
+ temperature: input.temperature ?? 0
173
+ })
174
+ });
175
+ if (!response.ok) {
176
+ const body = await safeText(response);
177
+ throw new LlmRequestError(`Anthropic ${response.status}: ${body.slice(0, 400)}`);
178
+ }
179
+ const payload = await response.json();
180
+ const text = (payload.content ?? []).filter((block) => block.type === "text" && typeof block.text === "string").map((block) => block.text).join("").trim();
181
+ const prompt_tokens = payload.usage?.input_tokens ?? 0;
182
+ const completion_tokens = payload.usage?.output_tokens ?? 0;
183
+ return {
184
+ text,
185
+ model,
186
+ provider: "anthropic",
187
+ duration_ms: Date.now() - started,
188
+ usage: {
189
+ prompt_tokens,
190
+ completion_tokens,
191
+ total_tokens: prompt_tokens + completion_tokens,
192
+ cost_usd: computeCost(model, prompt_tokens, completion_tokens)
193
+ }
194
+ };
195
+ }
196
+ function computeCost(model, inputTokens, outputTokens) {
197
+ const rates = COST_PER_M_TOKENS[model];
198
+ if (!rates) return null;
199
+ return inputTokens / 1e6 * rates.input + outputTokens / 1e6 * rates.output;
200
+ }
201
+ async function recordUsage(db, row) {
202
+ await db.insert(llmUsage).values({
203
+ id: randomUUID(),
204
+ provider: row.provider,
205
+ model: row.model,
206
+ task_kind: row.task_kind,
207
+ task_id: row.task_id,
208
+ repo: row.repo,
209
+ prompt_tokens: row.usage.prompt_tokens,
210
+ completion_tokens: row.usage.completion_tokens,
211
+ total_tokens: row.usage.total_tokens,
212
+ cost_usd: row.usage.cost_usd ?? null,
213
+ duration_ms: row.duration_ms,
214
+ ok: row.ok,
215
+ error: row.error ?? null,
216
+ created_at: (/* @__PURE__ */ new Date()).toISOString()
217
+ });
218
+ }
219
+ function missingCredentialMessage(provider) {
220
+ switch (provider) {
221
+ case "openai":
222
+ return `No API key for provider "openai". Set it via \`recall maintenance credentials set openai <key>\` or the OPENAI_API_KEY env var.`;
223
+ case "anthropic":
224
+ return `No API key for provider "anthropic". Set it via \`recall maintenance credentials set anthropic <key>\` or the ANTHROPIC_API_KEY env var.`;
225
+ case "azure-openai":
226
+ return `Azure OpenAI is not fully configured. Run \`recall maintenance credentials set azure --endpoint <url> --deployment <name> --api-version <version> <key>\` or set AZURE_OPENAI_{ENDPOINT,DEPLOYMENT,API_VERSION,API_KEY}.`;
227
+ }
228
+ }
229
+ async function safeText(response) {
230
+ try {
231
+ return await response.text();
232
+ } catch {
233
+ return "";
234
+ }
235
+ }
236
+
237
+ // src/maintenance/dispatcher.ts
238
+ init_keychain();
239
+ var DISPATCH_AGENT = "recall:dispatcher";
240
+ var DEFAULT_LEASE_SECONDS = 120;
241
+ async function dispatchPendingTasks(db, options = {}) {
242
+ const provider = resolveProvider(options.provider);
243
+ const report = {
244
+ provider,
245
+ model: null,
246
+ dry_run: Boolean(options.dryRun),
247
+ attempted: 0,
248
+ applied: 0,
249
+ rejected: 0,
250
+ released: 0,
251
+ outcomes: []
252
+ };
253
+ if (!provider) return report;
254
+ const pending = listTasks(db, {
255
+ status: "pending",
256
+ kinds: options.kinds,
257
+ repo: options.repo,
258
+ limit: options.maxTasks ?? 5
259
+ });
260
+ for (const task of pending) {
261
+ if (options.dryRun) {
262
+ report.outcomes.push({
263
+ task_id: task.id,
264
+ kind: task.kind,
265
+ repo: task.repo,
266
+ status: "skipped",
267
+ reason: "dry-run"
268
+ });
269
+ continue;
270
+ }
271
+ report.attempted += 1;
272
+ const outcome = await runSingle(db, task, provider, options.model);
273
+ report.outcomes.push(outcome);
274
+ if (outcome.status === "applied") report.applied += 1;
275
+ else if (outcome.status === "rejected") report.rejected += 1;
276
+ else if (outcome.status === "released") report.released += 1;
277
+ if (outcome.prompt_tokens != null && !report.model) {
278
+ const last = report.outcomes[report.outcomes.length - 1];
279
+ report.model = last.task_id ? options.model ?? null : null;
280
+ }
281
+ }
282
+ return report;
283
+ }
284
+ async function runSingle(db, task, provider, model) {
285
+ let claimed;
286
+ try {
287
+ const claim = claimTask(db, task.id, DISPATCH_AGENT, DEFAULT_LEASE_SECONDS);
288
+ claimed = claim.task;
289
+ } catch (err) {
290
+ if (err instanceof TaskClaimConflictError) {
291
+ return {
292
+ task_id: task.id,
293
+ kind: task.kind,
294
+ repo: task.repo,
295
+ status: "skipped",
296
+ reason: err.reason
297
+ };
298
+ }
299
+ throw err;
300
+ }
301
+ const prompt = buildPrompt(claimed);
302
+ if (!prompt) {
303
+ releaseTask(db, claimed.id, DISPATCH_AGENT);
304
+ return {
305
+ task_id: claimed.id,
306
+ kind: claimed.kind,
307
+ repo: claimed.repo,
308
+ status: "released",
309
+ reason: "no prompt builder"
310
+ };
311
+ }
312
+ try {
313
+ const llmResult = await callLlm(db, {
314
+ provider,
315
+ model,
316
+ system: prompt.system,
317
+ user: prompt.user,
318
+ max_output_tokens: prompt.max_output_tokens,
319
+ task_kind: claimed.kind,
320
+ task_id: claimed.id,
321
+ repo: claimed.repo
322
+ });
323
+ const parsed = parseJson(llmResult.text);
324
+ if (!parsed) {
325
+ releaseTask(db, claimed.id, DISPATCH_AGENT);
326
+ return {
327
+ task_id: claimed.id,
328
+ kind: claimed.kind,
329
+ repo: claimed.repo,
330
+ status: "released",
331
+ reason: "llm did not return valid JSON",
332
+ prompt_tokens: llmResult.usage.prompt_tokens,
333
+ completion_tokens: llmResult.usage.completion_tokens,
334
+ cost_usd: llmResult.usage.cost_usd,
335
+ duration_ms: llmResult.duration_ms
336
+ };
337
+ }
338
+ const submit = submitTask(db, claimed.id, DISPATCH_AGENT, parsed);
339
+ if (submit.status === "applied") {
340
+ return {
341
+ task_id: claimed.id,
342
+ kind: claimed.kind,
343
+ repo: claimed.repo,
344
+ status: "applied",
345
+ target_id: submit.target_id,
346
+ changed_fields: submit.changed_fields,
347
+ prompt_tokens: llmResult.usage.prompt_tokens,
348
+ completion_tokens: llmResult.usage.completion_tokens,
349
+ cost_usd: llmResult.usage.cost_usd,
350
+ duration_ms: llmResult.duration_ms
351
+ };
352
+ }
353
+ return {
354
+ task_id: claimed.id,
355
+ kind: claimed.kind,
356
+ repo: claimed.repo,
357
+ status: "rejected",
358
+ reason: submit.reason,
359
+ prompt_tokens: llmResult.usage.prompt_tokens,
360
+ completion_tokens: llmResult.usage.completion_tokens,
361
+ cost_usd: llmResult.usage.cost_usd,
362
+ duration_ms: llmResult.duration_ms
363
+ };
364
+ } catch (err) {
365
+ releaseTask(db, claimed.id, DISPATCH_AGENT);
366
+ const reason = err instanceof LlmCredentialError ? err.message : err instanceof Error ? err.message : String(err);
367
+ return {
368
+ task_id: claimed.id,
369
+ kind: claimed.kind,
370
+ repo: claimed.repo,
371
+ status: "released",
372
+ reason
373
+ };
374
+ }
375
+ }
376
+ function resolveProvider(preferred) {
377
+ const candidates = preferred ? [preferred] : ["anthropic", "azure-openai", "openai"];
378
+ for (const provider of candidates) {
379
+ if (hasProviderConfigured(provider)) return provider;
380
+ }
381
+ return null;
382
+ }
383
+ function parseJson(text) {
384
+ const trimmed = text.trim();
385
+ if (trimmed.length === 0) return null;
386
+ const stripped = trimmed.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/i, "").trim();
387
+ try {
388
+ return JSON.parse(stripped);
389
+ } catch {
390
+ const first = stripped.indexOf("{");
391
+ const last = stripped.lastIndexOf("}");
392
+ if (first >= 0 && last > first) {
393
+ try {
394
+ return JSON.parse(stripped.slice(first, last + 1));
395
+ } catch {
396
+ return null;
397
+ }
398
+ }
399
+ return null;
400
+ }
401
+ }
402
+ function buildPrompt(task) {
403
+ switch (task.kind) {
404
+ case "verify_capture":
405
+ return buildVerifyCapturePrompt(task);
406
+ case "refine_candidate":
407
+ return buildRefineCandidatePrompt(task);
408
+ case "summarize_history":
409
+ return buildSummarizeHistoryPrompt(task);
410
+ case "merge_duplicates":
411
+ return buildMergeDuplicatesPrompt(task);
412
+ case "summarize_session":
413
+ return buildSummarizeSessionPrompt(task);
414
+ case "synthesize_repo":
415
+ return buildSynthesizeRepoPrompt(task);
416
+ default:
417
+ return null;
418
+ }
419
+ }
420
+ function buildVerifyCapturePrompt(task) {
421
+ const payload = task.payload;
422
+ const system = [
423
+ "You verify a captured candidate rule for a coding-agent memory store.",
424
+ "Decide if it is a durable rule worth saving, salvageable but needs rewriting, or noise/narration.",
425
+ "Be strict \u2014 false positives produce wrong agent behavior. When unsure, prefer reject over save.",
426
+ "Reject voice transcripts, descriptive clauses about what the user does ('things I never use'), one-shot task chatter, and any text whose intent is unclear without surrounding context.",
427
+ "When rewriting, output a single canonical sentence in imperative mood. Keep scope as tight as the evidence supports.",
428
+ 'Flag is_destructive_risky=true when the rule pairs a destructive verb (remove/delete/drop/wipe) with high-risk targets (settings/config/files/secrets/branches), OR when it is shaped as a literal-trigger rule ("when user says X, do Y") \u2014 both require explicit user confirm regardless.',
429
+ JSON_ONLY
430
+ ].join(" ");
431
+ const user = [
432
+ `Candidate text: ${JSON.stringify(payload.text ?? "")}`,
433
+ `Inferred scope: ${payload.inferred_scope ?? "repo"}`,
434
+ `Inferred path_scope: ${JSON.stringify(payload.inferred_path_scope ?? null)}`,
435
+ `Repo: ${JSON.stringify(payload.repo ?? null)}`,
436
+ `Capture context: ${JSON.stringify(payload.capture_context ?? null)}`,
437
+ "",
438
+ 'Return JSON: {"verdict": "save"|"rewrite"|"reject", "cleaned_text"?: string, "scope"?: "session"|"path"|"repo"|"team"|"global", "path_scope"?: string|null, "is_destructive_risky"?: boolean, "reason"?: string}'
439
+ ].join("\n");
440
+ return { system, user };
441
+ }
442
+ var JSON_ONLY = "Respond with a single JSON object matching the required schema, no prose, no markdown fences.";
443
+ function buildRefineCandidatePrompt(task) {
444
+ const payload = task.payload;
445
+ const system = [
446
+ "You refine candidate memories in a coding-agent memory store.",
447
+ "Keep only durable rules/commands/gotchas. Clamp scope tighter when the evidence is path-specific.",
448
+ JSON_ONLY
449
+ ].join(" ");
450
+ const user = [
451
+ `Current memory text: ${JSON.stringify(payload.text ?? "")}`,
452
+ `Current scope: ${payload.current_scope ?? "repo"}`,
453
+ `Current path_scope: ${JSON.stringify(payload.current_path_scope ?? null)}`,
454
+ `Repo: ${JSON.stringify(payload.repo ?? null)}`,
455
+ `Repetition count: ${payload.repetition_count ?? 0}`,
456
+ "",
457
+ 'Return JSON: {"refined_text": string, "scope": "session"|"path"|"repo"|"team"|"global", "path_scope": string|null, "rationale": string, "verdict"?: "rewrite"|"reject"}'
458
+ ].join("\n");
459
+ return { system, user };
460
+ }
461
+ function buildSummarizeHistoryPrompt(task) {
462
+ const payload = task.payload;
463
+ const system = [
464
+ "You compress activity snippets in a coding-agent memory store.",
465
+ "Keep the essential facts; drop filler. <= 3 short sentences.",
466
+ JSON_ONLY
467
+ ].join(" ");
468
+ const user = [
469
+ `Kind: ${payload.kind ?? "unknown"}`,
470
+ `Repo: ${JSON.stringify(payload.repo ?? null)}`,
471
+ `Current text: ${JSON.stringify(payload.current_text ?? "")}`,
472
+ "",
473
+ 'Return JSON: {"summary_text": string, "tags": [string, ...]}'
474
+ ].join("\n");
475
+ return { system, user };
476
+ }
477
+ function buildMergeDuplicatesPrompt(task) {
478
+ const payload = task.payload;
479
+ const system = [
480
+ "You pick the best memory among near-duplicates in a coding-agent memory store.",
481
+ "Choose the single winning id. You may also rewrite the winner's text for clarity, and tighten its scope if evidence supports it.",
482
+ JSON_ONLY
483
+ ].join(" ");
484
+ const user = [
485
+ `Repo: ${JSON.stringify(payload.repo ?? null)}`,
486
+ `Cluster:`,
487
+ JSON.stringify(payload.cluster ?? [], null, 2),
488
+ "",
489
+ 'Return JSON: {"winner_id": uuid, "winner_text"?: string, "winner_scope"?: "session"|"path"|"repo"|"team", "winner_path_scope"?: string|null, "rationale"?: string}'
490
+ ].join("\n");
491
+ return { system, user };
492
+ }
493
+ function buildSummarizeSessionPrompt(task) {
494
+ const payload = task.payload;
495
+ const system = [
496
+ "You condense a coding-agent session into a brief durable summary.",
497
+ "<= 5 short bullet points; no filler.",
498
+ JSON_ONLY
499
+ ].join(" ");
500
+ const user = [
501
+ `Session: ${payload.session_id ?? "unknown"}`,
502
+ `Repo: ${JSON.stringify(payload.repo ?? null)}`,
503
+ `Events: ${JSON.stringify(payload.events ?? [], null, 2).slice(0, 12e3)}`,
504
+ "",
505
+ 'Return JSON: {"summary_text": string}'
506
+ ].join("\n");
507
+ return { system, user };
508
+ }
509
+ function buildSynthesizeRepoPrompt(task) {
510
+ const payload = task.payload;
511
+ const system = [
512
+ "You synthesize a concise repo-level summary from the stable memory set.",
513
+ "Focus on commands, rules, gotchas, and decisions that repeat across sessions.",
514
+ JSON_ONLY
515
+ ].join(" ");
516
+ const user = [
517
+ `Repo: ${JSON.stringify(payload.repo ?? null)}`,
518
+ `Memory set: ${JSON.stringify(payload.memories ?? [], null, 2).slice(0, 12e3)}`,
519
+ "",
520
+ 'Return JSON: {"summary_text": string}'
521
+ ].join("\n");
522
+ return { system, user };
523
+ }
524
+ function formatDispatchReport(report) {
525
+ const lines = [
526
+ "# Recall Maintenance Dispatch",
527
+ `Provider: ${report.provider ?? "(none \u2014 no API key)"}`,
528
+ `Dry run: ${report.dry_run ? "yes" : "no"}`,
529
+ `Attempted: ${report.attempted}`,
530
+ `Applied: ${report.applied}`,
531
+ `Rejected: ${report.rejected}`,
532
+ `Released: ${report.released}`
533
+ ];
534
+ if (report.outcomes.length > 0) {
535
+ lines.push("", "## Outcomes");
536
+ for (const o of report.outcomes) {
537
+ const cost = o.cost_usd != null ? ` $${o.cost_usd.toFixed(4)}` : "";
538
+ const tokens = o.prompt_tokens != null ? ` tokens=${(o.prompt_tokens ?? 0) + (o.completion_tokens ?? 0)}` : "";
539
+ const reason = o.reason ? ` \u2014 ${o.reason}` : "";
540
+ lines.push(` ${o.task_id.slice(0, 8)} ${o.kind.padEnd(20)} ${o.status.padEnd(10)}${tokens}${cost}${reason}`);
541
+ }
542
+ }
543
+ return lines.join("\n");
544
+ }
545
+
546
+ export {
547
+ dispatchPendingTasks,
548
+ buildPrompt,
549
+ formatDispatchReport
550
+ };
551
+ //# sourceMappingURL=chunk-GC5XMBG4.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/llm/client.ts","../src/maintenance/dispatcher.ts"],"sourcesContent":["import { randomUUID } from \"node:crypto\";\nimport {\n getProviderConfig,\n type AzureOpenAiConfig,\n type LlmProvider,\n} from \"../credentials/keychain.js\";\nimport type { RecallDb } from \"../db/client.js\";\nimport { llmUsage } from \"../db/schema.js\";\n\nexport type { LlmProvider };\n\nexport interface LlmCallInput {\n provider: LlmProvider;\n model?: string;\n system: string;\n user: string;\n max_output_tokens?: number;\n temperature?: number;\n task_kind: string;\n task_id?: string | null;\n repo?: string | null;\n}\n\nexport interface LlmUsageRow {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n cost_usd: number | null;\n}\n\nexport interface LlmCallResult {\n text: string;\n usage: LlmUsageRow;\n model: string;\n provider: LlmProvider;\n duration_ms: number;\n}\n\nexport class LlmCredentialError extends Error {}\nexport class LlmRequestError extends Error {}\n\nexport const DEFAULT_MODELS: Record<LlmProvider, string> = {\n openai: \"gpt-4o-mini\",\n anthropic: \"claude-haiku-4-5-20251001\",\n // For Azure the \"model\" is the deployment name, set by the user when they\n // provisioned the deployment. We leave it empty so we always fall through\n // to the deployment from AzureOpenAiConfig.\n \"azure-openai\": \"\",\n};\n\n// Rough per-1M token rates ($). Kept conservative; tighten when model pricing shifts.\n// Map by exact model id; unknown models fall through to null cost (still tracked, just un-priced).\nconst COST_PER_M_TOKENS: Record<string, { input: number; output: number }> = {\n \"gpt-4o-mini\": { input: 0.15, output: 0.60 },\n \"gpt-4o\": { input: 2.5, output: 10.0 },\n \"claude-haiku-4-5-20251001\": { input: 1.0, output: 5.0 },\n \"claude-sonnet-4-6\": { input: 3.0, output: 15.0 },\n \"claude-opus-4-7\": { input: 15.0, output: 75.0 },\n};\n\nexport async function callLlm(\n db: RecallDb,\n input: LlmCallInput,\n): Promise<LlmCallResult> {\n const provider = input.provider;\n const config = getProviderConfig(provider);\n if (!config) {\n throw new LlmCredentialError(missingCredentialMessage(provider));\n }\n const model = input.model ?? (provider === \"azure-openai\"\n ? (config as AzureOpenAiConfig).deployment\n : DEFAULT_MODELS[provider]);\n\n const started = Date.now();\n let result: LlmCallResult | null = null;\n let errorMessage: string | undefined;\n\n try {\n if (provider === \"openai\") {\n result = await callOpenAi((config as { key: string }).key, model, input);\n } else if (provider === \"anthropic\") {\n result = await callAnthropic((config as { key: string }).key, model, input);\n } else {\n result = await callAzureOpenAi(config as AzureOpenAiConfig, model, input);\n }\n return result;\n } catch (err) {\n errorMessage = err instanceof Error ? err.message : String(err);\n throw err;\n } finally {\n try {\n await recordUsage(db, {\n provider,\n model,\n task_kind: input.task_kind,\n task_id: input.task_id ?? null,\n repo: input.repo ?? null,\n usage: result?.usage ?? { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0, cost_usd: null },\n duration_ms: Date.now() - started,\n ok: Boolean(result),\n error: errorMessage,\n });\n } catch {\n // telemetry must never break the caller\n }\n }\n}\n\nasync function callOpenAi(\n apiKey: string,\n model: string,\n input: LlmCallInput,\n): Promise<LlmCallResult> {\n const started = Date.now();\n const response = await fetch(\"https://api.openai.com/v1/chat/completions\", {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: `Bearer ${apiKey}`,\n },\n body: JSON.stringify({\n model,\n messages: [\n { role: \"system\", content: input.system },\n { role: \"user\", content: input.user },\n ],\n max_completion_tokens: input.max_output_tokens ?? 2048,\n temperature: input.temperature ?? 0,\n }),\n });\n\n if (!response.ok) {\n const body = await safeText(response);\n throw new LlmRequestError(`OpenAI ${response.status}: ${body.slice(0, 400)}`);\n }\n\n const payload = await response.json() as {\n choices?: Array<{ message?: { content?: string } }>;\n usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };\n };\n const text = payload.choices?.[0]?.message?.content?.trim() ?? \"\";\n const prompt_tokens = payload.usage?.prompt_tokens ?? 0;\n const completion_tokens = payload.usage?.completion_tokens ?? 0;\n const total_tokens = payload.usage?.total_tokens ?? prompt_tokens + completion_tokens;\n\n return {\n text,\n model,\n provider: \"openai\",\n duration_ms: Date.now() - started,\n usage: {\n prompt_tokens,\n completion_tokens,\n total_tokens,\n cost_usd: computeCost(model, prompt_tokens, completion_tokens),\n },\n };\n}\n\nasync function callAzureOpenAi(\n config: AzureOpenAiConfig,\n deployment: string,\n input: LlmCallInput,\n): Promise<LlmCallResult> {\n const started = Date.now();\n const url = `${config.endpoint}/openai/deployments/${encodeURIComponent(deployment)}/chat/completions?api-version=${encodeURIComponent(config.api_version)}`;\n const response = await fetch(url, {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n \"api-key\": config.key,\n },\n body: JSON.stringify({\n messages: [\n { role: \"system\", content: input.system },\n { role: \"user\", content: input.user },\n ],\n max_completion_tokens: input.max_output_tokens ?? 2048,\n temperature: input.temperature ?? 0,\n }),\n });\n\n if (!response.ok) {\n const body = await safeText(response);\n throw new LlmRequestError(`Azure OpenAI ${response.status}: ${body.slice(0, 400)}`);\n }\n\n const payload = await response.json() as {\n choices?: Array<{ message?: { content?: string } }>;\n usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };\n };\n const text = payload.choices?.[0]?.message?.content?.trim() ?? \"\";\n const prompt_tokens = payload.usage?.prompt_tokens ?? 0;\n const completion_tokens = payload.usage?.completion_tokens ?? 0;\n const total_tokens = payload.usage?.total_tokens ?? prompt_tokens + completion_tokens;\n\n return {\n text,\n model: deployment,\n provider: \"azure-openai\",\n duration_ms: Date.now() - started,\n usage: {\n prompt_tokens,\n completion_tokens,\n total_tokens,\n cost_usd: computeCost(deployment, prompt_tokens, completion_tokens),\n },\n };\n}\n\nasync function callAnthropic(\n apiKey: string,\n model: string,\n input: LlmCallInput,\n): Promise<LlmCallResult> {\n const started = Date.now();\n const response = await fetch(\"https://api.anthropic.com/v1/messages\", {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n \"x-api-key\": apiKey,\n \"anthropic-version\": \"2023-06-01\",\n },\n body: JSON.stringify({\n model,\n system: input.system,\n messages: [{ role: \"user\", content: input.user }],\n max_tokens: input.max_output_tokens ?? 2048,\n temperature: input.temperature ?? 0,\n }),\n });\n\n if (!response.ok) {\n const body = await safeText(response);\n throw new LlmRequestError(`Anthropic ${response.status}: ${body.slice(0, 400)}`);\n }\n\n const payload = await response.json() as {\n content?: Array<{ type?: string; text?: string }>;\n usage?: { input_tokens?: number; output_tokens?: number };\n };\n const text = (payload.content ?? [])\n .filter((block) => block.type === \"text\" && typeof block.text === \"string\")\n .map((block) => block.text)\n .join(\"\")\n .trim();\n const prompt_tokens = payload.usage?.input_tokens ?? 0;\n const completion_tokens = payload.usage?.output_tokens ?? 0;\n\n return {\n text,\n model,\n provider: \"anthropic\",\n duration_ms: Date.now() - started,\n usage: {\n prompt_tokens,\n completion_tokens,\n total_tokens: prompt_tokens + completion_tokens,\n cost_usd: computeCost(model, prompt_tokens, completion_tokens),\n },\n };\n}\n\nfunction computeCost(model: string, inputTokens: number, outputTokens: number): number | null {\n const rates = COST_PER_M_TOKENS[model];\n if (!rates) return null;\n return (\n (inputTokens / 1_000_000) * rates.input +\n (outputTokens / 1_000_000) * rates.output\n );\n}\n\nasync function recordUsage(\n db: RecallDb,\n row: {\n provider: LlmProvider;\n model: string;\n task_kind: string;\n task_id: string | null;\n repo: string | null;\n usage: LlmUsageRow;\n duration_ms: number;\n ok: boolean;\n error?: string;\n },\n): Promise<void> {\n await db.insert(llmUsage).values({\n id: randomUUID(),\n provider: row.provider,\n model: row.model,\n task_kind: row.task_kind,\n task_id: row.task_id,\n repo: row.repo,\n prompt_tokens: row.usage.prompt_tokens,\n completion_tokens: row.usage.completion_tokens,\n total_tokens: row.usage.total_tokens,\n cost_usd: row.usage.cost_usd ?? null,\n duration_ms: row.duration_ms,\n ok: row.ok,\n error: row.error ?? null,\n created_at: new Date().toISOString(),\n });\n}\n\nfunction missingCredentialMessage(provider: LlmProvider): string {\n switch (provider) {\n case \"openai\":\n return `No API key for provider \"openai\". Set it via \\`recall maintenance credentials set openai <key>\\` or the OPENAI_API_KEY env var.`;\n case \"anthropic\":\n return `No API key for provider \"anthropic\". Set it via \\`recall maintenance credentials set anthropic <key>\\` or the ANTHROPIC_API_KEY env var.`;\n case \"azure-openai\":\n return `Azure OpenAI is not fully configured. Run \\`recall maintenance credentials set azure --endpoint <url> --deployment <name> --api-version <version> <key>\\` or set AZURE_OPENAI_{ENDPOINT,DEPLOYMENT,API_VERSION,API_KEY}.`;\n }\n}\n\nasync function safeText(response: Response): Promise<string> {\n try {\n return await response.text();\n } catch {\n return \"\";\n }\n}\n","import type { RecallDb } from \"../db/client.js\";\nimport type { MaintenanceTask, MaintenanceTaskKind } from \"../types.js\";\nimport {\n TaskClaimConflictError,\n claimTask,\n listTasks,\n releaseTask,\n submitTask,\n} from \"./tasks.js\";\nimport { callLlm, LlmCredentialError, type LlmProvider } from \"../llm/client.js\";\nimport { hasProviderConfigured } from \"../credentials/keychain.js\";\n\nconst DISPATCH_AGENT = \"recall:dispatcher\";\nconst DEFAULT_LEASE_SECONDS = 120;\n\nexport interface DispatchOptions {\n provider?: LlmProvider;\n model?: string;\n maxTasks?: number;\n kinds?: MaintenanceTaskKind[];\n repo?: string;\n dryRun?: boolean;\n}\n\nexport interface DispatchOutcome {\n task_id: string;\n kind: MaintenanceTaskKind;\n repo: string | null;\n status: \"applied\" | \"rejected\" | \"released\" | \"skipped\";\n reason?: string;\n target_id?: string;\n changed_fields?: string[];\n prompt_tokens?: number;\n completion_tokens?: number;\n cost_usd?: number | null;\n duration_ms?: number;\n}\n\nexport interface DispatchReport {\n provider: LlmProvider | null;\n model: string | null;\n dry_run: boolean;\n attempted: number;\n applied: number;\n rejected: number;\n released: number;\n outcomes: DispatchOutcome[];\n}\n\nexport async function dispatchPendingTasks(\n db: RecallDb,\n options: DispatchOptions = {},\n): Promise<DispatchReport> {\n const provider = resolveProvider(options.provider);\n const report: DispatchReport = {\n provider,\n model: null,\n dry_run: Boolean(options.dryRun),\n attempted: 0,\n applied: 0,\n rejected: 0,\n released: 0,\n outcomes: [],\n };\n if (!provider) return report;\n\n const pending = listTasks(db, {\n status: \"pending\",\n kinds: options.kinds,\n repo: options.repo,\n limit: options.maxTasks ?? 5,\n });\n\n for (const task of pending) {\n if (options.dryRun) {\n report.outcomes.push({\n task_id: task.id,\n kind: task.kind,\n repo: task.repo,\n status: \"skipped\",\n reason: \"dry-run\",\n });\n continue;\n }\n report.attempted += 1;\n const outcome = await runSingle(db, task, provider, options.model);\n report.outcomes.push(outcome);\n if (outcome.status === \"applied\") report.applied += 1;\n else if (outcome.status === \"rejected\") report.rejected += 1;\n else if (outcome.status === \"released\") report.released += 1;\n if (outcome.prompt_tokens != null && !report.model) {\n // remember the model the first successful call used, for display\n const last = report.outcomes[report.outcomes.length - 1];\n report.model = (last as DispatchOutcome & { model?: string }).task_id ? options.model ?? null : null;\n }\n }\n\n return report;\n}\n\nasync function runSingle(\n db: RecallDb,\n task: MaintenanceTask,\n provider: LlmProvider,\n model?: string,\n): Promise<DispatchOutcome> {\n let claimed: MaintenanceTask;\n try {\n const claim = claimTask(db, task.id, DISPATCH_AGENT, DEFAULT_LEASE_SECONDS);\n claimed = claim.task;\n } catch (err) {\n if (err instanceof TaskClaimConflictError) {\n return {\n task_id: task.id,\n kind: task.kind,\n repo: task.repo,\n status: \"skipped\",\n reason: err.reason,\n };\n }\n throw err;\n }\n\n const prompt = buildPrompt(claimed);\n if (!prompt) {\n releaseTask(db, claimed.id, DISPATCH_AGENT);\n return {\n task_id: claimed.id,\n kind: claimed.kind,\n repo: claimed.repo,\n status: \"released\",\n reason: \"no prompt builder\",\n };\n }\n\n try {\n const llmResult = await callLlm(db, {\n provider,\n model,\n system: prompt.system,\n user: prompt.user,\n max_output_tokens: prompt.max_output_tokens,\n task_kind: claimed.kind,\n task_id: claimed.id,\n repo: claimed.repo,\n });\n\n const parsed = parseJson(llmResult.text);\n if (!parsed) {\n releaseTask(db, claimed.id, DISPATCH_AGENT);\n return {\n task_id: claimed.id,\n kind: claimed.kind,\n repo: claimed.repo,\n status: \"released\",\n reason: \"llm did not return valid JSON\",\n prompt_tokens: llmResult.usage.prompt_tokens,\n completion_tokens: llmResult.usage.completion_tokens,\n cost_usd: llmResult.usage.cost_usd,\n duration_ms: llmResult.duration_ms,\n };\n }\n\n const submit = submitTask(db, claimed.id, DISPATCH_AGENT, parsed);\n if (submit.status === \"applied\") {\n return {\n task_id: claimed.id,\n kind: claimed.kind,\n repo: claimed.repo,\n status: \"applied\",\n target_id: submit.target_id,\n changed_fields: submit.changed_fields,\n prompt_tokens: llmResult.usage.prompt_tokens,\n completion_tokens: llmResult.usage.completion_tokens,\n cost_usd: llmResult.usage.cost_usd,\n duration_ms: llmResult.duration_ms,\n };\n }\n return {\n task_id: claimed.id,\n kind: claimed.kind,\n repo: claimed.repo,\n status: \"rejected\",\n reason: submit.reason,\n prompt_tokens: llmResult.usage.prompt_tokens,\n completion_tokens: llmResult.usage.completion_tokens,\n cost_usd: llmResult.usage.cost_usd,\n duration_ms: llmResult.duration_ms,\n };\n } catch (err) {\n releaseTask(db, claimed.id, DISPATCH_AGENT);\n const reason = err instanceof LlmCredentialError\n ? err.message\n : err instanceof Error\n ? err.message\n : String(err);\n return {\n task_id: claimed.id,\n kind: claimed.kind,\n repo: claimed.repo,\n status: \"released\",\n reason,\n };\n }\n}\n\nfunction resolveProvider(preferred?: LlmProvider): LlmProvider | null {\n const candidates: LlmProvider[] = preferred\n ? [preferred]\n : [\"anthropic\", \"azure-openai\", \"openai\"];\n for (const provider of candidates) {\n if (hasProviderConfigured(provider)) return provider;\n }\n return null;\n}\n\nfunction parseJson(text: string): unknown | null {\n const trimmed = text.trim();\n if (trimmed.length === 0) return null;\n // Strip code fences if present.\n const stripped = trimmed\n .replace(/^```(?:json)?\\s*/i, \"\")\n .replace(/\\s*```$/i, \"\")\n .trim();\n try {\n return JSON.parse(stripped);\n } catch {\n // Some models return a leading sentence before the JSON. Try to locate the first {.\n const first = stripped.indexOf(\"{\");\n const last = stripped.lastIndexOf(\"}\");\n if (first >= 0 && last > first) {\n try {\n return JSON.parse(stripped.slice(first, last + 1));\n } catch {\n return null;\n }\n }\n return null;\n }\n}\n\nexport interface Prompt {\n system: string;\n user: string;\n max_output_tokens?: number;\n}\n\nexport function buildPrompt(task: MaintenanceTask): Prompt | null {\n switch (task.kind) {\n case \"verify_capture\":\n return buildVerifyCapturePrompt(task);\n case \"refine_candidate\":\n return buildRefineCandidatePrompt(task);\n case \"summarize_history\":\n return buildSummarizeHistoryPrompt(task);\n case \"merge_duplicates\":\n return buildMergeDuplicatesPrompt(task);\n case \"summarize_session\":\n return buildSummarizeSessionPrompt(task);\n case \"synthesize_repo\":\n return buildSynthesizeRepoPrompt(task);\n default:\n return null;\n }\n}\n\nfunction buildVerifyCapturePrompt(task: MaintenanceTask): Prompt {\n const payload = task.payload as {\n memory_id?: string;\n text?: string;\n inferred_scope?: string;\n inferred_path_scope?: string | null;\n repo?: string | null;\n capture_context?: unknown;\n };\n const system = [\n \"You verify a captured candidate rule for a coding-agent memory store.\",\n \"Decide if it is a durable rule worth saving, salvageable but needs rewriting, or noise/narration.\",\n \"Be strict — false positives produce wrong agent behavior. When unsure, prefer reject over save.\",\n \"Reject voice transcripts, descriptive clauses about what the user does ('things I never use'), one-shot task chatter, and any text whose intent is unclear without surrounding context.\",\n \"When rewriting, output a single canonical sentence in imperative mood. Keep scope as tight as the evidence supports.\",\n \"Flag is_destructive_risky=true when the rule pairs a destructive verb (remove/delete/drop/wipe) with high-risk targets (settings/config/files/secrets/branches), OR when it is shaped as a literal-trigger rule (\\\"when user says X, do Y\\\") — both require explicit user confirm regardless.\",\n JSON_ONLY,\n ].join(\" \");\n const user = [\n `Candidate text: ${JSON.stringify(payload.text ?? \"\")}`,\n `Inferred scope: ${payload.inferred_scope ?? \"repo\"}`,\n `Inferred path_scope: ${JSON.stringify(payload.inferred_path_scope ?? null)}`,\n `Repo: ${JSON.stringify(payload.repo ?? null)}`,\n `Capture context: ${JSON.stringify(payload.capture_context ?? null)}`,\n \"\",\n 'Return JSON: {\"verdict\": \"save\"|\"rewrite\"|\"reject\", \"cleaned_text\"?: string, \"scope\"?: \"session\"|\"path\"|\"repo\"|\"team\"|\"global\", \"path_scope\"?: string|null, \"is_destructive_risky\"?: boolean, \"reason\"?: string}',\n ].join(\"\\n\");\n return { system, user };\n}\n\nconst JSON_ONLY = \"Respond with a single JSON object matching the required schema, no prose, no markdown fences.\";\n\nfunction buildRefineCandidatePrompt(task: MaintenanceTask): Prompt {\n const payload = task.payload as {\n memory_id?: string;\n text?: string;\n current_scope?: string;\n current_path_scope?: string | null;\n repo?: string | null;\n repetition_count?: number;\n };\n const system = [\n \"You refine candidate memories in a coding-agent memory store.\",\n \"Keep only durable rules/commands/gotchas. Clamp scope tighter when the evidence is path-specific.\",\n JSON_ONLY,\n ].join(\" \");\n const user = [\n `Current memory text: ${JSON.stringify(payload.text ?? \"\")}`,\n `Current scope: ${payload.current_scope ?? \"repo\"}`,\n `Current path_scope: ${JSON.stringify(payload.current_path_scope ?? null)}`,\n `Repo: ${JSON.stringify(payload.repo ?? null)}`,\n `Repetition count: ${payload.repetition_count ?? 0}`,\n \"\",\n 'Return JSON: {\"refined_text\": string, \"scope\": \"session\"|\"path\"|\"repo\"|\"team\"|\"global\", \"path_scope\": string|null, \"rationale\": string, \"verdict\"?: \"rewrite\"|\"reject\"}',\n ].join(\"\\n\");\n return { system, user };\n}\n\nfunction buildSummarizeHistoryPrompt(task: MaintenanceTask): Prompt {\n const payload = task.payload as {\n current_text?: string;\n kind?: string;\n repo?: string | null;\n };\n const system = [\n \"You compress activity snippets in a coding-agent memory store.\",\n \"Keep the essential facts; drop filler. <= 3 short sentences.\",\n JSON_ONLY,\n ].join(\" \");\n const user = [\n `Kind: ${payload.kind ?? \"unknown\"}`,\n `Repo: ${JSON.stringify(payload.repo ?? null)}`,\n `Current text: ${JSON.stringify(payload.current_text ?? \"\")}`,\n \"\",\n 'Return JSON: {\"summary_text\": string, \"tags\": [string, ...]}',\n ].join(\"\\n\");\n return { system, user };\n}\n\nfunction buildMergeDuplicatesPrompt(task: MaintenanceTask): Prompt {\n const payload = task.payload as {\n cluster?: Array<{ id: string; text: string; confidence?: number; scope?: string; path_scope?: string | null }>;\n repo?: string | null;\n };\n const system = [\n \"You pick the best memory among near-duplicates in a coding-agent memory store.\",\n \"Choose the single winning id. You may also rewrite the winner's text for clarity, and tighten its scope if evidence supports it.\",\n JSON_ONLY,\n ].join(\" \");\n const user = [\n `Repo: ${JSON.stringify(payload.repo ?? null)}`,\n `Cluster:`,\n JSON.stringify(payload.cluster ?? [], null, 2),\n \"\",\n 'Return JSON: {\"winner_id\": uuid, \"winner_text\"?: string, \"winner_scope\"?: \"session\"|\"path\"|\"repo\"|\"team\", \"winner_path_scope\"?: string|null, \"rationale\"?: string}',\n ].join(\"\\n\");\n return { system, user };\n}\n\nfunction buildSummarizeSessionPrompt(task: MaintenanceTask): Prompt {\n const payload = task.payload as { events?: unknown[]; session_id?: string; repo?: string | null };\n const system = [\n \"You condense a coding-agent session into a brief durable summary.\",\n \"<= 5 short bullet points; no filler.\",\n JSON_ONLY,\n ].join(\" \");\n const user = [\n `Session: ${payload.session_id ?? \"unknown\"}`,\n `Repo: ${JSON.stringify(payload.repo ?? null)}`,\n `Events: ${JSON.stringify(payload.events ?? [], null, 2).slice(0, 12_000)}`,\n \"\",\n 'Return JSON: {\"summary_text\": string}',\n ].join(\"\\n\");\n return { system, user };\n}\n\nfunction buildSynthesizeRepoPrompt(task: MaintenanceTask): Prompt {\n const payload = task.payload as { repo?: string | null; memories?: unknown[] };\n const system = [\n \"You synthesize a concise repo-level summary from the stable memory set.\",\n \"Focus on commands, rules, gotchas, and decisions that repeat across sessions.\",\n JSON_ONLY,\n ].join(\" \");\n const user = [\n `Repo: ${JSON.stringify(payload.repo ?? null)}`,\n `Memory set: ${JSON.stringify(payload.memories ?? [], null, 2).slice(0, 12_000)}`,\n \"\",\n 'Return JSON: {\"summary_text\": string}',\n ].join(\"\\n\");\n return { system, user };\n}\n\nexport function formatDispatchReport(report: DispatchReport): string {\n const lines: string[] = [\n \"# Recall Maintenance Dispatch\",\n `Provider: ${report.provider ?? \"(none — no API key)\"}`,\n `Dry run: ${report.dry_run ? \"yes\" : \"no\"}`,\n `Attempted: ${report.attempted}`,\n `Applied: ${report.applied}`,\n `Rejected: ${report.rejected}`,\n `Released: ${report.released}`,\n ];\n if (report.outcomes.length > 0) {\n lines.push(\"\", \"## Outcomes\");\n for (const o of report.outcomes) {\n const cost = o.cost_usd != null ? ` $${o.cost_usd.toFixed(4)}` : \"\";\n const tokens = o.prompt_tokens != null ? ` tokens=${(o.prompt_tokens ?? 0) + (o.completion_tokens ?? 0)}` : \"\";\n const reason = o.reason ? ` — ${o.reason}` : \"\";\n lines.push(` ${o.task_id.slice(0, 8)} ${o.kind.padEnd(20)} ${o.status.padEnd(10)}${tokens}${cost}${reason}`);\n }\n }\n return lines.join(\"\\n\");\n}\n"],"mappings":";;;;;;;;;;;;;;;;;AACA;AADA,SAAS,kBAAkB;AAsCpB,IAAM,qBAAN,cAAiC,MAAM;AAAC;AACxC,IAAM,kBAAN,cAA8B,MAAM;AAAC;AAErC,IAAM,iBAA8C;AAAA,EACzD,QAAQ;AAAA,EACR,WAAW;AAAA;AAAA;AAAA;AAAA,EAIX,gBAAgB;AAClB;AAIA,IAAM,oBAAuE;AAAA,EAC3E,eAAe,EAAE,OAAO,MAAM,QAAQ,IAAK;AAAA,EAC3C,UAAU,EAAE,OAAO,KAAK,QAAQ,GAAK;AAAA,EACrC,6BAA6B,EAAE,OAAO,GAAK,QAAQ,EAAI;AAAA,EACvD,qBAAqB,EAAE,OAAO,GAAK,QAAQ,GAAK;AAAA,EAChD,mBAAmB,EAAE,OAAO,IAAM,QAAQ,GAAK;AACjD;AAEA,eAAsB,QACpB,IACA,OACwB;AACxB,QAAM,WAAW,MAAM;AACvB,QAAM,SAAS,kBAAkB,QAAQ;AACzC,MAAI,CAAC,QAAQ;AACX,UAAM,IAAI,mBAAmB,yBAAyB,QAAQ,CAAC;AAAA,EACjE;AACA,QAAM,QAAQ,MAAM,UAAU,aAAa,iBACtC,OAA6B,aAC9B,eAAe,QAAQ;AAE3B,QAAM,UAAU,KAAK,IAAI;AACzB,MAAI,SAA+B;AACnC,MAAI;AAEJ,MAAI;AACF,QAAI,aAAa,UAAU;AACzB,eAAS,MAAM,WAAY,OAA2B,KAAK,OAAO,KAAK;AAAA,IACzE,WAAW,aAAa,aAAa;AACnC,eAAS,MAAM,cAAe,OAA2B,KAAK,OAAO,KAAK;AAAA,IAC5E,OAAO;AACL,eAAS,MAAM,gBAAgB,QAA6B,OAAO,KAAK;AAAA,IAC1E;AACA,WAAO;AAAA,EACT,SAAS,KAAK;AACZ,mBAAe,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAC9D,UAAM;AAAA,EACR,UAAE;AACA,QAAI;AACF,YAAM,YAAY,IAAI;AAAA,QACpB;AAAA,QACA;AAAA,QACA,WAAW,MAAM;AAAA,QACjB,SAAS,MAAM,WAAW;AAAA,QAC1B,MAAM,MAAM,QAAQ;AAAA,QACpB,OAAO,QAAQ,SAAS,EAAE,eAAe,GAAG,mBAAmB,GAAG,cAAc,GAAG,UAAU,KAAK;AAAA,QAClG,aAAa,KAAK,IAAI,IAAI;AAAA,QAC1B,IAAI,QAAQ,MAAM;AAAA,QAClB,OAAO;AAAA,MACT,CAAC;AAAA,IACH,QAAQ;AAAA,IAER;AAAA,EACF;AACF;AAEA,eAAe,WACb,QACA,OACA,OACwB;AACxB,QAAM,UAAU,KAAK,IAAI;AACzB,QAAM,WAAW,MAAM,MAAM,8CAA8C;AAAA,IACzE,QAAQ;AAAA,IACR,SAAS;AAAA,MACP,gBAAgB;AAAA,MAChB,eAAe,UAAU,MAAM;AAAA,IACjC;AAAA,IACA,MAAM,KAAK,UAAU;AAAA,MACnB;AAAA,MACA,UAAU;AAAA,QACR,EAAE,MAAM,UAAU,SAAS,MAAM,OAAO;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,MACtC;AAAA,MACA,uBAAuB,MAAM,qBAAqB;AAAA,MAClD,aAAa,MAAM,eAAe;AAAA,IACpC,CAAC;AAAA,EACH,CAAC;AAED,MAAI,CAAC,SAAS,IAAI;AAChB,UAAM,OAAO,MAAM,SAAS,QAAQ;AACpC,UAAM,IAAI,gBAAgB,UAAU,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,EAC9E;AAEA,QAAM,UAAU,MAAM,SAAS,KAAK;AAIpC,QAAM,OAAO,QAAQ,UAAU,CAAC,GAAG,SAAS,SAAS,KAAK,KAAK;AAC/D,QAAM,gBAAgB,QAAQ,OAAO,iBAAiB;AACtD,QAAM,oBAAoB,QAAQ,OAAO,qBAAqB;AAC9D,QAAM,eAAe,QAAQ,OAAO,gBAAgB,gBAAgB;AAEpE,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,UAAU;AAAA,IACV,aAAa,KAAK,IAAI,IAAI;AAAA,IAC1B,OAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU,YAAY,OAAO,eAAe,iBAAiB;AAAA,IAC/D;AAAA,EACF;AACF;AAEA,eAAe,gBACb,QACA,YACA,OACwB;AACxB,QAAM,UAAU,KAAK,IAAI;AACzB,QAAM,MAAM,GAAG,OAAO,QAAQ,uBAAuB,mBAAmB,UAAU,CAAC,iCAAiC,mBAAmB,OAAO,WAAW,CAAC;AAC1J,QAAM,WAAW,MAAM,MAAM,KAAK;AAAA,IAChC,QAAQ;AAAA,IACR,SAAS;AAAA,MACP,gBAAgB;AAAA,MAChB,WAAW,OAAO;AAAA,IACpB;AAAA,IACA,MAAM,KAAK,UAAU;AAAA,MACnB,UAAU;AAAA,QACR,EAAE,MAAM,UAAU,SAAS,MAAM,OAAO;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,MACtC;AAAA,MACA,uBAAuB,MAAM,qBAAqB;AAAA,MAClD,aAAa,MAAM,eAAe;AAAA,IACpC,CAAC;AAAA,EACH,CAAC;AAED,MAAI,CAAC,SAAS,IAAI;AAChB,UAAM,OAAO,MAAM,SAAS,QAAQ;AACpC,UAAM,IAAI,gBAAgB,gBAAgB,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,EACpF;AAEA,QAAM,UAAU,MAAM,SAAS,KAAK;AAIpC,QAAM,OAAO,QAAQ,UAAU,CAAC,GAAG,SAAS,SAAS,KAAK,KAAK;AAC/D,QAAM,gBAAgB,QAAQ,OAAO,iBAAiB;AACtD,QAAM,oBAAoB,QAAQ,OAAO,qBAAqB;AAC9D,QAAM,eAAe,QAAQ,OAAO,gBAAgB,gBAAgB;AAEpE,SAAO;AAAA,IACL;AAAA,IACA,OAAO;AAAA,IACP,UAAU;AAAA,IACV,aAAa,KAAK,IAAI,IAAI;AAAA,IAC1B,OAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU,YAAY,YAAY,eAAe,iBAAiB;AAAA,IACpE;AAAA,EACF;AACF;AAEA,eAAe,cACb,QACA,OACA,OACwB;AACxB,QAAM,UAAU,KAAK,IAAI;AACzB,QAAM,WAAW,MAAM,MAAM,yCAAyC;AAAA,IACpE,QAAQ;AAAA,IACR,SAAS;AAAA,MACP,gBAAgB;AAAA,MAChB,aAAa;AAAA,MACb,qBAAqB;AAAA,IACvB;AAAA,IACA,MAAM,KAAK,UAAU;AAAA,MACnB;AAAA,MACA,QAAQ,MAAM;AAAA,MACd,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK,CAAC;AAAA,MAChD,YAAY,MAAM,qBAAqB;AAAA,MACvC,aAAa,MAAM,eAAe;AAAA,IACpC,CAAC;AAAA,EACH,CAAC;AAED,MAAI,CAAC,SAAS,IAAI;AAChB,UAAM,OAAO,MAAM,SAAS,QAAQ;AACpC,UAAM,IAAI,gBAAgB,aAAa,SAAS,MAAM,KAAK,KAAK,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,EACjF;AAEA,QAAM,UAAU,MAAM,SAAS,KAAK;AAIpC,QAAM,QAAQ,QAAQ,WAAW,CAAC,GAC/B,OAAO,CAAC,UAAU,MAAM,SAAS,UAAU,OAAO,MAAM,SAAS,QAAQ,EACzE,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,EAAE,EACP,KAAK;AACR,QAAM,gBAAgB,QAAQ,OAAO,gBAAgB;AACrD,QAAM,oBAAoB,QAAQ,OAAO,iBAAiB;AAE1D,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,UAAU;AAAA,IACV,aAAa,KAAK,IAAI,IAAI;AAAA,IAC1B,OAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA,cAAc,gBAAgB;AAAA,MAC9B,UAAU,YAAY,OAAO,eAAe,iBAAiB;AAAA,IAC/D;AAAA,EACF;AACF;AAEA,SAAS,YAAY,OAAe,aAAqB,cAAqC;AAC5F,QAAM,QAAQ,kBAAkB,KAAK;AACrC,MAAI,CAAC,MAAO,QAAO;AACnB,SACG,cAAc,MAAa,MAAM,QACjC,eAAe,MAAa,MAAM;AAEvC;AAEA,eAAe,YACb,IACA,KAWe;AACf,QAAM,GAAG,OAAO,QAAQ,EAAE,OAAO;AAAA,IAC/B,IAAI,WAAW;AAAA,IACf,UAAU,IAAI;AAAA,IACd,OAAO,IAAI;AAAA,IACX,WAAW,IAAI;AAAA,IACf,SAAS,IAAI;AAAA,IACb,MAAM,IAAI;AAAA,IACV,eAAe,IAAI,MAAM;AAAA,IACzB,mBAAmB,IAAI,MAAM;AAAA,IAC7B,cAAc,IAAI,MAAM;AAAA,IACxB,UAAU,IAAI,MAAM,YAAY;AAAA,IAChC,aAAa,IAAI;AAAA,IACjB,IAAI,IAAI;AAAA,IACR,OAAO,IAAI,SAAS;AAAA,IACpB,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,EACrC,CAAC;AACH;AAEA,SAAS,yBAAyB,UAA+B;AAC/D,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,EACX;AACF;AAEA,eAAe,SAAS,UAAqC;AAC3D,MAAI;AACF,WAAO,MAAM,SAAS,KAAK;AAAA,EAC7B,QAAQ;AACN,WAAO;AAAA,EACT;AACF;;;ACvTA;AAEA,IAAM,iBAAiB;AACvB,IAAM,wBAAwB;AAoC9B,eAAsB,qBACpB,IACA,UAA2B,CAAC,GACH;AACzB,QAAM,WAAW,gBAAgB,QAAQ,QAAQ;AACjD,QAAM,SAAyB;AAAA,IAC7B;AAAA,IACA,OAAO;AAAA,IACP,SAAS,QAAQ,QAAQ,MAAM;AAAA,IAC/B,WAAW;AAAA,IACX,SAAS;AAAA,IACT,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU,CAAC;AAAA,EACb;AACA,MAAI,CAAC,SAAU,QAAO;AAEtB,QAAM,UAAU,UAAU,IAAI;AAAA,IAC5B,QAAQ;AAAA,IACR,OAAO,QAAQ;AAAA,IACf,MAAM,QAAQ;AAAA,IACd,OAAO,QAAQ,YAAY;AAAA,EAC7B,CAAC;AAED,aAAW,QAAQ,SAAS;AAC1B,QAAI,QAAQ,QAAQ;AAClB,aAAO,SAAS,KAAK;AAAA,QACnB,SAAS,KAAK;AAAA,QACd,MAAM,KAAK;AAAA,QACX,MAAM,KAAK;AAAA,QACX,QAAQ;AAAA,QACR,QAAQ;AAAA,MACV,CAAC;AACD;AAAA,IACF;AACA,WAAO,aAAa;AACpB,UAAM,UAAU,MAAM,UAAU,IAAI,MAAM,UAAU,QAAQ,KAAK;AACjE,WAAO,SAAS,KAAK,OAAO;AAC5B,QAAI,QAAQ,WAAW,UAAW,QAAO,WAAW;AAAA,aAC3C,QAAQ,WAAW,WAAY,QAAO,YAAY;AAAA,aAClD,QAAQ,WAAW,WAAY,QAAO,YAAY;AAC3D,QAAI,QAAQ,iBAAiB,QAAQ,CAAC,OAAO,OAAO;AAElD,YAAM,OAAO,OAAO,SAAS,OAAO,SAAS,SAAS,CAAC;AACvD,aAAO,QAAS,KAA8C,UAAU,QAAQ,SAAS,OAAO;AAAA,IAClG;AAAA,EACF;AAEA,SAAO;AACT;AAEA,eAAe,UACb,IACA,MACA,UACA,OAC0B;AAC1B,MAAI;AACJ,MAAI;AACF,UAAM,QAAQ,UAAU,IAAI,KAAK,IAAI,gBAAgB,qBAAqB;AAC1E,cAAU,MAAM;AAAA,EAClB,SAAS,KAAK;AACZ,QAAI,eAAe,wBAAwB;AACzC,aAAO;AAAA,QACL,SAAS,KAAK;AAAA,QACd,MAAM,KAAK;AAAA,QACX,MAAM,KAAK;AAAA,QACX,QAAQ;AAAA,QACR,QAAQ,IAAI;AAAA,MACd;AAAA,IACF;AACA,UAAM;AAAA,EACR;AAEA,QAAM,SAAS,YAAY,OAAO;AAClC,MAAI,CAAC,QAAQ;AACX,gBAAY,IAAI,QAAQ,IAAI,cAAc;AAC1C,WAAO;AAAA,MACL,SAAS,QAAQ;AAAA,MACjB,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,QAAQ;AAAA,MACR,QAAQ;AAAA,IACV;AAAA,EACF;AAEA,MAAI;AACF,UAAM,YAAY,MAAM,QAAQ,IAAI;AAAA,MAClC;AAAA,MACA;AAAA,MACA,QAAQ,OAAO;AAAA,MACf,MAAM,OAAO;AAAA,MACb,mBAAmB,OAAO;AAAA,MAC1B,WAAW,QAAQ;AAAA,MACnB,SAAS,QAAQ;AAAA,MACjB,MAAM,QAAQ;AAAA,IAChB,CAAC;AAED,UAAM,SAAS,UAAU,UAAU,IAAI;AACvC,QAAI,CAAC,QAAQ;AACX,kBAAY,IAAI,QAAQ,IAAI,cAAc;AAC1C,aAAO;AAAA,QACL,SAAS,QAAQ;AAAA,QACjB,MAAM,QAAQ;AAAA,QACd,MAAM,QAAQ;AAAA,QACd,QAAQ;AAAA,QACR,QAAQ;AAAA,QACR,eAAe,UAAU,MAAM;AAAA,QAC/B,mBAAmB,UAAU,MAAM;AAAA,QACnC,UAAU,UAAU,MAAM;AAAA,QAC1B,aAAa,UAAU;AAAA,MACzB;AAAA,IACF;AAEA,UAAM,SAAS,WAAW,IAAI,QAAQ,IAAI,gBAAgB,MAAM;AAChE,QAAI,OAAO,WAAW,WAAW;AAC/B,aAAO;AAAA,QACL,SAAS,QAAQ;AAAA,QACjB,MAAM,QAAQ;AAAA,QACd,MAAM,QAAQ;AAAA,QACd,QAAQ;AAAA,QACR,WAAW,OAAO;AAAA,QAClB,gBAAgB,OAAO;AAAA,QACvB,eAAe,UAAU,MAAM;AAAA,QAC/B,mBAAmB,UAAU,MAAM;AAAA,QACnC,UAAU,UAAU,MAAM;AAAA,QAC1B,aAAa,UAAU;AAAA,MACzB;AAAA,IACF;AACA,WAAO;AAAA,MACL,SAAS,QAAQ;AAAA,MACjB,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,QAAQ;AAAA,MACR,QAAQ,OAAO;AAAA,MACf,eAAe,UAAU,MAAM;AAAA,MAC/B,mBAAmB,UAAU,MAAM;AAAA,MACnC,UAAU,UAAU,MAAM;AAAA,MAC1B,aAAa,UAAU;AAAA,IACzB;AAAA,EACF,SAAS,KAAK;AACZ,gBAAY,IAAI,QAAQ,IAAI,cAAc;AAC1C,UAAM,SAAS,eAAe,qBAC1B,IAAI,UACJ,eAAe,QACb,IAAI,UACJ,OAAO,GAAG;AAChB,WAAO;AAAA,MACL,SAAS,QAAQ;AAAA,MACjB,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,QAAQ;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACF;AAEA,SAAS,gBAAgB,WAA6C;AACpE,QAAM,aAA4B,YAC9B,CAAC,SAAS,IACV,CAAC,aAAa,gBAAgB,QAAQ;AAC1C,aAAW,YAAY,YAAY;AACjC,QAAI,sBAAsB,QAAQ,EAAG,QAAO;AAAA,EAC9C;AACA,SAAO;AACT;AAEA,SAAS,UAAU,MAA8B;AAC/C,QAAM,UAAU,KAAK,KAAK;AAC1B,MAAI,QAAQ,WAAW,EAAG,QAAO;AAEjC,QAAM,WAAW,QACd,QAAQ,qBAAqB,EAAE,EAC/B,QAAQ,YAAY,EAAE,EACtB,KAAK;AACR,MAAI;AACF,WAAO,KAAK,MAAM,QAAQ;AAAA,EAC5B,QAAQ;AAEN,UAAM,QAAQ,SAAS,QAAQ,GAAG;AAClC,UAAM,OAAO,SAAS,YAAY,GAAG;AACrC,QAAI,SAAS,KAAK,OAAO,OAAO;AAC9B,UAAI;AACF,eAAO,KAAK,MAAM,SAAS,MAAM,OAAO,OAAO,CAAC,CAAC;AAAA,MACnD,QAAQ;AACN,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;AAQO,SAAS,YAAY,MAAsC;AAChE,UAAQ,KAAK,MAAM;AAAA,IACjB,KAAK;AACH,aAAO,yBAAyB,IAAI;AAAA,IACtC,KAAK;AACH,aAAO,2BAA2B,IAAI;AAAA,IACxC,KAAK;AACH,aAAO,4BAA4B,IAAI;AAAA,IACzC,KAAK;AACH,aAAO,2BAA2B,IAAI;AAAA,IACxC,KAAK;AACH,aAAO,4BAA4B,IAAI;AAAA,IACzC,KAAK;AACH,aAAO,0BAA0B,IAAI;AAAA,IACvC;AACE,aAAO;AAAA,EACX;AACF;AAEA,SAAS,yBAAyB,MAA+B;AAC/D,QAAM,UAAU,KAAK;AAQrB,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,GAAG;AACV,QAAM,OAAO;AAAA,IACX,mBAAmB,KAAK,UAAU,QAAQ,QAAQ,EAAE,CAAC;AAAA,IACrD,mBAAmB,QAAQ,kBAAkB,MAAM;AAAA,IACnD,wBAAwB,KAAK,UAAU,QAAQ,uBAAuB,IAAI,CAAC;AAAA,IAC3E,SAAS,KAAK,UAAU,QAAQ,QAAQ,IAAI,CAAC;AAAA,IAC7C,oBAAoB,KAAK,UAAU,QAAQ,mBAAmB,IAAI,CAAC;AAAA,IACnE;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACX,SAAO,EAAE,QAAQ,KAAK;AACxB;AAEA,IAAM,YAAY;AAElB,SAAS,2BAA2B,MAA+B;AACjE,QAAM,UAAU,KAAK;AAQrB,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,GAAG;AACV,QAAM,OAAO;AAAA,IACX,wBAAwB,KAAK,UAAU,QAAQ,QAAQ,EAAE,CAAC;AAAA,IAC1D,kBAAkB,QAAQ,iBAAiB,MAAM;AAAA,IACjD,uBAAuB,KAAK,UAAU,QAAQ,sBAAsB,IAAI,CAAC;AAAA,IACzE,SAAS,KAAK,UAAU,QAAQ,QAAQ,IAAI,CAAC;AAAA,IAC7C,qBAAqB,QAAQ,oBAAoB,CAAC;AAAA,IAClD;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACX,SAAO,EAAE,QAAQ,KAAK;AACxB;AAEA,SAAS,4BAA4B,MAA+B;AAClE,QAAM,UAAU,KAAK;AAKrB,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,GAAG;AACV,QAAM,OAAO;AAAA,IACX,SAAS,QAAQ,QAAQ,SAAS;AAAA,IAClC,SAAS,KAAK,UAAU,QAAQ,QAAQ,IAAI,CAAC;AAAA,IAC7C,iBAAiB,KAAK,UAAU,QAAQ,gBAAgB,EAAE,CAAC;AAAA,IAC3D;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACX,SAAO,EAAE,QAAQ,KAAK;AACxB;AAEA,SAAS,2BAA2B,MAA+B;AACjE,QAAM,UAAU,KAAK;AAIrB,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,GAAG;AACV,QAAM,OAAO;AAAA,IACX,SAAS,KAAK,UAAU,QAAQ,QAAQ,IAAI,CAAC;AAAA,IAC7C;AAAA,IACA,KAAK,UAAU,QAAQ,WAAW,CAAC,GAAG,MAAM,CAAC;AAAA,IAC7C;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACX,SAAO,EAAE,QAAQ,KAAK;AACxB;AAEA,SAAS,4BAA4B,MAA+B;AAClE,QAAM,UAAU,KAAK;AACrB,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,GAAG;AACV,QAAM,OAAO;AAAA,IACX,YAAY,QAAQ,cAAc,SAAS;AAAA,IAC3C,SAAS,KAAK,UAAU,QAAQ,QAAQ,IAAI,CAAC;AAAA,IAC7C,WAAW,KAAK,UAAU,QAAQ,UAAU,CAAC,GAAG,MAAM,CAAC,EAAE,MAAM,GAAG,IAAM,CAAC;AAAA,IACzE;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACX,SAAO,EAAE,QAAQ,KAAK;AACxB;AAEA,SAAS,0BAA0B,MAA+B;AAChE,QAAM,UAAU,KAAK;AACrB,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,GAAG;AACV,QAAM,OAAO;AAAA,IACX,SAAS,KAAK,UAAU,QAAQ,QAAQ,IAAI,CAAC;AAAA,IAC7C,eAAe,KAAK,UAAU,QAAQ,YAAY,CAAC,GAAG,MAAM,CAAC,EAAE,MAAM,GAAG,IAAM,CAAC;AAAA,IAC/E;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACX,SAAO,EAAE,QAAQ,KAAK;AACxB;AAEO,SAAS,qBAAqB,QAAgC;AACnE,QAAM,QAAkB;AAAA,IACtB;AAAA,IACA,eAAe,OAAO,YAAY,0BAAqB;AAAA,IACvD,eAAe,OAAO,UAAU,QAAQ,IAAI;AAAA,IAC5C,eAAe,OAAO,SAAS;AAAA,IAC/B,eAAe,OAAO,OAAO;AAAA,IAC7B,eAAe,OAAO,QAAQ;AAAA,IAC9B,eAAe,OAAO,QAAQ;AAAA,EAChC;AACA,MAAI,OAAO,SAAS,SAAS,GAAG;AAC9B,UAAM,KAAK,IAAI,aAAa;AAC5B,eAAW,KAAK,OAAO,UAAU;AAC/B,YAAM,OAAO,EAAE,YAAY,OAAO,KAAK,EAAE,SAAS,QAAQ,CAAC,CAAC,KAAK;AACjE,YAAM,SAAS,EAAE,iBAAiB,OAAO,YAAY,EAAE,iBAAiB,MAAM,EAAE,qBAAqB,EAAE,KAAK;AAC5G,YAAM,SAAS,EAAE,SAAS,WAAM,EAAE,MAAM,KAAK;AAC7C,YAAM,KAAK,KAAK,EAAE,QAAQ,MAAM,GAAG,CAAC,CAAC,IAAI,EAAE,KAAK,OAAO,EAAE,CAAC,IAAI,EAAE,OAAO,OAAO,EAAE,CAAC,GAAG,MAAM,GAAG,IAAI,GAAG,MAAM,EAAE;AAAA,IAC9G;AAAA,EACF;AACA,SAAO,MAAM,KAAK,IAAI;AACxB;","names":[]}