@vellumai/assistant 0.5.2 → 0.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +109 -0
- package/docs/skills.md +100 -0
- package/package.json +1 -1
- package/src/__tests__/conversation-agent-loop-overflow.test.ts +7 -0
- package/src/__tests__/conversation-agent-loop.test.ts +7 -0
- package/src/__tests__/conversation-memory-dirty-tail.test.ts +150 -0
- package/src/__tests__/conversation-provider-retry-repair.test.ts +7 -0
- package/src/__tests__/conversation-wipe.test.ts +226 -0
- package/src/__tests__/db-memory-archive-migration.test.ts +372 -0
- package/src/__tests__/db-memory-brief-state-migration.test.ts +213 -0
- package/src/__tests__/db-memory-reducer-checkpoints.test.ts +273 -0
- package/src/__tests__/inline-command-runner.test.ts +311 -0
- package/src/__tests__/inline-skill-authoring-guard.test.ts +220 -0
- package/src/__tests__/inline-skill-load-permissions.test.ts +435 -0
- package/src/__tests__/list-messages-attachments.test.ts +96 -0
- package/src/__tests__/memory-brief-open-loops.test.ts +530 -0
- package/src/__tests__/memory-brief-time.test.ts +285 -0
- package/src/__tests__/memory-brief-wrapper.test.ts +311 -0
- package/src/__tests__/memory-chunk-archive.test.ts +400 -0
- package/src/__tests__/memory-chunk-dual-write.test.ts +453 -0
- package/src/__tests__/memory-episode-archive.test.ts +370 -0
- package/src/__tests__/memory-episode-dual-write.test.ts +626 -0
- package/src/__tests__/memory-observation-archive.test.ts +375 -0
- package/src/__tests__/memory-observation-dual-write.test.ts +318 -0
- package/src/__tests__/memory-recall-quality.test.ts +2 -2
- package/src/__tests__/memory-reducer-store.test.ts +728 -0
- package/src/__tests__/memory-reducer-types.test.ts +699 -0
- package/src/__tests__/memory-reducer.test.ts +698 -0
- package/src/__tests__/memory-regressions.test.ts +6 -4
- package/src/__tests__/memory-simplified-config.test.ts +281 -0
- package/src/__tests__/parse-identity-fields.test.ts +129 -0
- package/src/__tests__/skill-load-inline-command.test.ts +598 -0
- package/src/__tests__/skill-load-inline-includes.test.ts +644 -0
- package/src/__tests__/skills-inline-command-expansions.test.ts +301 -0
- package/src/__tests__/skills-transitive-hash.test.ts +333 -0
- package/src/__tests__/vellum-self-knowledge-inline-command.test.ts +320 -0
- package/src/__tests__/workspace-migration-backfill-installation-id.test.ts +4 -4
- package/src/config/bundled-skills/app-builder/SKILL.md +8 -8
- package/src/config/bundled-skills/skill-management/SKILL.md +1 -1
- package/src/config/bundled-skills/skill-management/TOOLS.json +2 -2
- package/src/config/feature-flag-registry.json +16 -0
- package/src/config/loader.ts +1 -0
- package/src/config/raw-config-utils.ts +28 -0
- package/src/config/schema.ts +12 -0
- package/src/config/schemas/memory-simplified.ts +101 -0
- package/src/config/schemas/memory.ts +4 -0
- package/src/config/skills.ts +50 -4
- package/src/daemon/conversation-agent-loop-handlers.ts +8 -3
- package/src/daemon/conversation-agent-loop.ts +71 -1
- package/src/daemon/conversation-lifecycle.ts +11 -1
- package/src/daemon/conversation-runtime-assembly.ts +2 -1
- package/src/daemon/conversation-surfaces.ts +31 -8
- package/src/daemon/conversation.ts +40 -23
- package/src/daemon/handlers/config-embeddings.ts +10 -2
- package/src/daemon/handlers/config-model.ts +0 -9
- package/src/daemon/handlers/identity.ts +12 -1
- package/src/daemon/lifecycle.ts +9 -1
- package/src/daemon/message-types/conversations.ts +0 -1
- package/src/daemon/server.ts +1 -1
- package/src/followups/followup-store.ts +47 -1
- package/src/memory/archive-store.ts +400 -0
- package/src/memory/brief-formatting.ts +33 -0
- package/src/memory/brief-open-loops.ts +266 -0
- package/src/memory/brief-time.ts +161 -0
- package/src/memory/brief.ts +75 -0
- package/src/memory/conversation-crud.ts +245 -101
- package/src/memory/db-init.ts +12 -0
- package/src/memory/indexer.ts +106 -15
- package/src/memory/job-handlers/embedding.test.ts +1 -0
- package/src/memory/job-handlers/embedding.ts +83 -0
- package/src/memory/job-utils.ts +1 -1
- package/src/memory/jobs-store.ts +6 -0
- package/src/memory/jobs-worker.ts +12 -0
- package/src/memory/migrations/185-memory-brief-state.ts +52 -0
- package/src/memory/migrations/186-memory-archive.ts +109 -0
- package/src/memory/migrations/187-memory-reducer-checkpoints.ts +19 -0
- package/src/memory/migrations/index.ts +3 -0
- package/src/memory/qdrant-client.ts +23 -4
- package/src/memory/reducer-store.ts +271 -0
- package/src/memory/reducer-types.ts +99 -0
- package/src/memory/reducer.ts +453 -0
- package/src/memory/schema/conversations.ts +3 -0
- package/src/memory/schema/index.ts +2 -0
- package/src/memory/schema/memory-archive.ts +121 -0
- package/src/memory/schema/memory-brief.ts +55 -0
- package/src/memory/search/semantic.ts +17 -4
- package/src/oauth/oauth-store.ts +3 -1
- package/src/permissions/checker.ts +89 -6
- package/src/permissions/defaults.ts +14 -0
- package/src/runtime/routes/conversation-management-routes.ts +6 -0
- package/src/runtime/routes/conversation-query-routes.ts +7 -0
- package/src/runtime/routes/conversation-routes.ts +52 -5
- package/src/runtime/routes/identity-routes.ts +2 -35
- package/src/runtime/routes/llm-context-normalization.ts +14 -1
- package/src/runtime/routes/memory-item-routes.ts +90 -5
- package/src/runtime/routes/secret-routes.ts +2 -0
- package/src/runtime/routes/surface-action-routes.ts +68 -1
- package/src/schedule/schedule-store.ts +21 -0
- package/src/skills/inline-command-expansions.ts +204 -0
- package/src/skills/inline-command-render.ts +127 -0
- package/src/skills/inline-command-runner.ts +242 -0
- package/src/skills/transitive-version-hash.ts +88 -0
- package/src/tasks/task-store.ts +43 -1
- package/src/tools/permission-checker.ts +8 -1
- package/src/tools/skills/load.ts +140 -6
- package/src/util/platform.ts +18 -0
- package/src/workspace/migrations/{002-backfill-installation-id.ts → 011-backfill-installation-id.ts} +1 -1
- package/src/workspace/migrations/registry.ts +1 -1
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { and, eq, lte, or } from "drizzle-orm";
|
|
1
|
+
import { and, desc, eq, lte, or } from "drizzle-orm";
|
|
2
2
|
import { v4 as uuid } from "uuid";
|
|
3
3
|
|
|
4
4
|
import { getDb } from "../memory/db.js";
|
|
@@ -176,3 +176,49 @@ export function markNudged(id: string): FollowUp {
|
|
|
176
176
|
|
|
177
177
|
return getFollowUp(id)!;
|
|
178
178
|
}
|
|
179
|
+
|
|
180
|
+
// ── Brief Helpers ──────────────────────────────────────────────────────
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Lightweight projection of a follow-up used by the brief compiler.
|
|
184
|
+
*/
|
|
185
|
+
export interface BriefFollowUp {
|
|
186
|
+
id: string;
|
|
187
|
+
channel: string;
|
|
188
|
+
conversationId: string;
|
|
189
|
+
contactId: string | null;
|
|
190
|
+
expectedResponseBy: number | null;
|
|
191
|
+
status: FollowUpStatus;
|
|
192
|
+
updatedAt: number;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Return pending and overdue follow-ups for the brief compiler.
|
|
197
|
+
* Ordered by expectedResponseBy ascending (most urgent first).
|
|
198
|
+
*/
|
|
199
|
+
export function getPendingAndOverdueFollowUps(): BriefFollowUp[] {
|
|
200
|
+
const db = getDb();
|
|
201
|
+
|
|
202
|
+
const rows = db
|
|
203
|
+
.select({
|
|
204
|
+
id: followups.id,
|
|
205
|
+
channel: followups.channel,
|
|
206
|
+
conversationId: followups.conversationId,
|
|
207
|
+
contactId: followups.contactId,
|
|
208
|
+
expectedResponseBy: followups.expectedResponseBy,
|
|
209
|
+
status: followups.status,
|
|
210
|
+
updatedAt: followups.updatedAt,
|
|
211
|
+
})
|
|
212
|
+
.from(followups)
|
|
213
|
+
.where(
|
|
214
|
+
or(
|
|
215
|
+
eq(followups.status, "pending"),
|
|
216
|
+
eq(followups.status, "overdue"),
|
|
217
|
+
eq(followups.status, "nudged"),
|
|
218
|
+
),
|
|
219
|
+
)
|
|
220
|
+
.orderBy(desc(followups.expectedResponseBy))
|
|
221
|
+
.all();
|
|
222
|
+
|
|
223
|
+
return rows as BriefFollowUp[];
|
|
224
|
+
}
|
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
import { createHash } from "node:crypto";
|
|
2
|
+
|
|
3
|
+
import { eq } from "drizzle-orm";
|
|
4
|
+
import { v4 as uuid } from "uuid";
|
|
5
|
+
|
|
6
|
+
import { estimateTextTokens } from "../context/token-estimator.js";
|
|
7
|
+
import { getLogger } from "../util/logger.js";
|
|
8
|
+
import { getDb, rawChanges } from "./db.js";
|
|
9
|
+
import { enqueueMemoryJob, type MemoryJobType } from "./jobs-store.js";
|
|
10
|
+
import {
|
|
11
|
+
memoryChunks,
|
|
12
|
+
memoryEpisodes,
|
|
13
|
+
memoryObservations,
|
|
14
|
+
} from "./schema.js";
|
|
15
|
+
|
|
16
|
+
const log = getLogger("memory-archive-store");
|
|
17
|
+
|
|
18
|
+
// ── Content hashing ─────────────────────────────────────────────────
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Compute a SHA-256 content hash for a chunk's content, scoped by scopeId.
|
|
22
|
+
* Used for idempotent upserts — if the hash already exists within the same
|
|
23
|
+
* scope, the chunk is skipped.
|
|
24
|
+
*/
|
|
25
|
+
export function computeChunkContentHash(
|
|
26
|
+
scopeId: string,
|
|
27
|
+
content: string,
|
|
28
|
+
): string {
|
|
29
|
+
return createHash("sha256").update(`${scopeId}|${content}`).digest("hex");
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Compute a SHA-256 hash of the observation content, scoped by scopeId.
|
|
34
|
+
* Used for idempotent chunk deduplication.
|
|
35
|
+
*/
|
|
36
|
+
export function computeObservationContentHash(
|
|
37
|
+
scopeId: string,
|
|
38
|
+
content: string,
|
|
39
|
+
): string {
|
|
40
|
+
return createHash("sha256").update(`${scopeId}|${content}`).digest("hex");
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// ── Token estimation ────────────────────────────────────────────────
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Rough token count estimate based on character length.
|
|
47
|
+
* Uses the common ~4 chars/token heuristic for English text.
|
|
48
|
+
*/
|
|
49
|
+
export function estimateTokens(text: string): number {
|
|
50
|
+
return Math.max(1, Math.ceil(text.length / 4));
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// ── Chunk upsert ────────────────────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
export interface UpsertChunkInput {
|
|
56
|
+
/** Scope for memory isolation. Defaults to "default". */
|
|
57
|
+
scopeId?: string;
|
|
58
|
+
/** FK to the parent observation. */
|
|
59
|
+
observationId: string;
|
|
60
|
+
/** The chunk text to embed and recall. */
|
|
61
|
+
content: string;
|
|
62
|
+
/** Optional pre-computed token estimate. If omitted, estimated from content length. */
|
|
63
|
+
tokenEstimate?: number;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export interface UpsertChunkResult {
|
|
67
|
+
chunkId: string;
|
|
68
|
+
/** True if a new row was inserted; false if an existing row matched the content hash. */
|
|
69
|
+
inserted: boolean;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Idempotently upsert a chunk into the archive. If a chunk with the same
|
|
74
|
+
* (scopeId, contentHash) already exists, the insert is skipped and the
|
|
75
|
+
* existing row's id is returned. Otherwise a new row is inserted and an
|
|
76
|
+
* `embed_chunk` job is enqueued.
|
|
77
|
+
*/
|
|
78
|
+
export function upsertChunk(input: UpsertChunkInput): UpsertChunkResult {
|
|
79
|
+
const scopeId = input.scopeId ?? "default";
|
|
80
|
+
const contentHash = computeChunkContentHash(scopeId, input.content);
|
|
81
|
+
const tokenEstimate = input.tokenEstimate ?? estimateTokens(input.content);
|
|
82
|
+
const db = getDb();
|
|
83
|
+
|
|
84
|
+
// Check for an existing chunk with the same content hash in this scope
|
|
85
|
+
const existing = db
|
|
86
|
+
.select({ id: memoryChunks.id })
|
|
87
|
+
.from(memoryChunks)
|
|
88
|
+
.where(eq(memoryChunks.contentHash, contentHash))
|
|
89
|
+
.get();
|
|
90
|
+
|
|
91
|
+
if (existing) {
|
|
92
|
+
log.debug(
|
|
93
|
+
{ scopeId, contentHash, existingId: existing.id },
|
|
94
|
+
"Chunk already exists, skipping insert",
|
|
95
|
+
);
|
|
96
|
+
return { chunkId: existing.id, inserted: false };
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const chunkId = uuid();
|
|
100
|
+
const now = Date.now();
|
|
101
|
+
|
|
102
|
+
db.insert(memoryChunks)
|
|
103
|
+
.values({
|
|
104
|
+
id: chunkId,
|
|
105
|
+
scopeId,
|
|
106
|
+
observationId: input.observationId,
|
|
107
|
+
content: input.content,
|
|
108
|
+
tokenEstimate,
|
|
109
|
+
contentHash,
|
|
110
|
+
createdAt: now,
|
|
111
|
+
})
|
|
112
|
+
.run();
|
|
113
|
+
|
|
114
|
+
// Enqueue an embedding job for the new chunk
|
|
115
|
+
enqueueMemoryJob("embed_chunk", {
|
|
116
|
+
chunkId,
|
|
117
|
+
scopeId,
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
log.debug(
|
|
121
|
+
{ chunkId, scopeId, contentHash },
|
|
122
|
+
"Inserted new chunk and enqueued embed_chunk job",
|
|
123
|
+
);
|
|
124
|
+
|
|
125
|
+
return { chunkId, inserted: true };
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Upsert multiple chunks for a single observation. Returns results for
|
|
130
|
+
* each input in the same order.
|
|
131
|
+
*/
|
|
132
|
+
export function upsertChunks(inputs: UpsertChunkInput[]): UpsertChunkResult[] {
|
|
133
|
+
return inputs.map((input) => upsertChunk(input));
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// ── Chunk queries ───────────────────────────────────────────────────
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Get a chunk by its ID.
|
|
140
|
+
*/
|
|
141
|
+
export function getChunkById(
|
|
142
|
+
chunkId: string,
|
|
143
|
+
): typeof memoryChunks.$inferSelect | undefined {
|
|
144
|
+
const db = getDb();
|
|
145
|
+
return db
|
|
146
|
+
.select()
|
|
147
|
+
.from(memoryChunks)
|
|
148
|
+
.where(eq(memoryChunks.id, chunkId))
|
|
149
|
+
.get();
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Get all chunks for a given observation.
|
|
154
|
+
*/
|
|
155
|
+
export function getChunksByObservationId(
|
|
156
|
+
observationId: string,
|
|
157
|
+
): Array<typeof memoryChunks.$inferSelect> {
|
|
158
|
+
const db = getDb();
|
|
159
|
+
return db
|
|
160
|
+
.select()
|
|
161
|
+
.from(memoryChunks)
|
|
162
|
+
.where(eq(memoryChunks.observationId, observationId))
|
|
163
|
+
.all();
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// ── Episode insertion helpers ───────────────────────────────────────
|
|
167
|
+
|
|
168
|
+
export interface InsertEpisodeParams {
|
|
169
|
+
scopeId?: string;
|
|
170
|
+
conversationId: string;
|
|
171
|
+
title: string;
|
|
172
|
+
summary: string;
|
|
173
|
+
tokenEstimate: number;
|
|
174
|
+
source?: string;
|
|
175
|
+
startAt: number;
|
|
176
|
+
endAt: number;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
/**
|
|
180
|
+
* Insert an episode row produced by conversation compaction.
|
|
181
|
+
* Compaction episodes summarize a contiguous block of turns that was
|
|
182
|
+
* compressed to free context-window space.
|
|
183
|
+
*
|
|
184
|
+
* An `embed_episode` job is enqueued automatically so the episode
|
|
185
|
+
* becomes searchable via vector recall.
|
|
186
|
+
*/
|
|
187
|
+
export function insertCompactionEpisode(params: InsertEpisodeParams): {
|
|
188
|
+
episodeId: string;
|
|
189
|
+
jobId: string;
|
|
190
|
+
} {
|
|
191
|
+
return insertEpisodeAndEnqueue(params);
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/**
|
|
195
|
+
* Insert an episode row produced by resolution (end-of-conversation)
|
|
196
|
+
* summarization. Resolution episodes capture the full narrative arc
|
|
197
|
+
* of a completed conversation.
|
|
198
|
+
*
|
|
199
|
+
* An `embed_episode` job is enqueued automatically so the episode
|
|
200
|
+
* becomes searchable via vector recall.
|
|
201
|
+
*/
|
|
202
|
+
export function insertResolutionEpisode(params: InsertEpisodeParams): {
|
|
203
|
+
episodeId: string;
|
|
204
|
+
jobId: string;
|
|
205
|
+
} {
|
|
206
|
+
return insertEpisodeAndEnqueue(params);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// ── Internal (episode) ──────────────────────────────────────────────
|
|
210
|
+
|
|
211
|
+
function insertEpisodeAndEnqueue(params: InsertEpisodeParams): {
|
|
212
|
+
episodeId: string;
|
|
213
|
+
jobId: string;
|
|
214
|
+
} {
|
|
215
|
+
const db = getDb();
|
|
216
|
+
const episodeId = uuid();
|
|
217
|
+
const now = Date.now();
|
|
218
|
+
|
|
219
|
+
db.insert(memoryEpisodes)
|
|
220
|
+
.values({
|
|
221
|
+
id: episodeId,
|
|
222
|
+
scopeId: params.scopeId ?? "default",
|
|
223
|
+
conversationId: params.conversationId,
|
|
224
|
+
title: params.title,
|
|
225
|
+
summary: params.summary,
|
|
226
|
+
tokenEstimate: params.tokenEstimate,
|
|
227
|
+
source: params.source ?? null,
|
|
228
|
+
startAt: params.startAt,
|
|
229
|
+
endAt: params.endAt,
|
|
230
|
+
createdAt: now,
|
|
231
|
+
updatedAt: now,
|
|
232
|
+
})
|
|
233
|
+
.run();
|
|
234
|
+
|
|
235
|
+
const jobId = enqueueMemoryJob("embed_episode" satisfies MemoryJobType, {
|
|
236
|
+
episodeId,
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
log.debug(
|
|
240
|
+
{ episodeId, jobId, conversationId: params.conversationId },
|
|
241
|
+
"Inserted episode and enqueued embed job",
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
return { episodeId, jobId };
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// ── Observation types ───────────────────────────────────────────────
|
|
248
|
+
|
|
249
|
+
export interface InsertObservationParams {
|
|
250
|
+
conversationId: string;
|
|
251
|
+
messageId?: string | null;
|
|
252
|
+
role: string;
|
|
253
|
+
content: string;
|
|
254
|
+
scopeId?: string;
|
|
255
|
+
modality?: string;
|
|
256
|
+
source?: string | null;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
export interface InsertedObservation {
|
|
260
|
+
observationId: string;
|
|
261
|
+
chunkId: string | null;
|
|
262
|
+
contentHash: string;
|
|
263
|
+
embeddingJobId: string | null;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// ── Observation insert helpers ──────────────────────────────────────
|
|
267
|
+
|
|
268
|
+
/**
|
|
269
|
+
* Insert a raw observation row and its associated chunk. If a chunk with the
|
|
270
|
+
* same content hash already exists in the scope, the chunk insert is skipped
|
|
271
|
+
* (idempotent dual-write safety). An `embed_observation` job is enqueued when
|
|
272
|
+
* a new chunk is created.
|
|
273
|
+
*
|
|
274
|
+
* Returns the observation ID, chunk ID (null if deduplicated), content hash,
|
|
275
|
+
* and embedding job ID (null if no new chunk was created).
|
|
276
|
+
*/
|
|
277
|
+
export function insertObservation(
|
|
278
|
+
params: InsertObservationParams,
|
|
279
|
+
): InsertedObservation {
|
|
280
|
+
const db = getDb();
|
|
281
|
+
const now = Date.now();
|
|
282
|
+
const scopeId = params.scopeId ?? "default";
|
|
283
|
+
const modality = params.modality ?? "text";
|
|
284
|
+
|
|
285
|
+
const observationId = uuid();
|
|
286
|
+
const contentHash = computeObservationContentHash(scopeId, params.content);
|
|
287
|
+
|
|
288
|
+
// Insert the observation row
|
|
289
|
+
db.insert(memoryObservations)
|
|
290
|
+
.values({
|
|
291
|
+
id: observationId,
|
|
292
|
+
scopeId,
|
|
293
|
+
conversationId: params.conversationId,
|
|
294
|
+
messageId: params.messageId ?? null,
|
|
295
|
+
role: params.role,
|
|
296
|
+
content: params.content,
|
|
297
|
+
modality,
|
|
298
|
+
source: params.source ?? null,
|
|
299
|
+
createdAt: now,
|
|
300
|
+
})
|
|
301
|
+
.run();
|
|
302
|
+
|
|
303
|
+
// Attempt to insert the chunk — the unique index on (scope_id, content_hash)
|
|
304
|
+
// will cause a conflict if this content was already chunked. We use
|
|
305
|
+
// onConflictDoNothing to silently skip the duplicate.
|
|
306
|
+
const chunkId = uuid();
|
|
307
|
+
const tokenEstimate = estimateTextTokens(params.content);
|
|
308
|
+
|
|
309
|
+
db.insert(memoryChunks)
|
|
310
|
+
.values({
|
|
311
|
+
id: chunkId,
|
|
312
|
+
scopeId,
|
|
313
|
+
observationId,
|
|
314
|
+
content: params.content,
|
|
315
|
+
tokenEstimate,
|
|
316
|
+
contentHash,
|
|
317
|
+
createdAt: now,
|
|
318
|
+
})
|
|
319
|
+
.onConflictDoNothing({
|
|
320
|
+
target: [memoryChunks.scopeId, memoryChunks.contentHash],
|
|
321
|
+
})
|
|
322
|
+
.run();
|
|
323
|
+
|
|
324
|
+
// Drizzle bun:sqlite .run() returns void; use rawChanges() to detect no-ops
|
|
325
|
+
const chunkInserted = rawChanges() > 0;
|
|
326
|
+
|
|
327
|
+
let embeddingJobId: string | null = null;
|
|
328
|
+
if (chunkInserted) {
|
|
329
|
+
embeddingJobId = enqueueMemoryJob("embed_observation", {
|
|
330
|
+
observationId,
|
|
331
|
+
chunkId,
|
|
332
|
+
});
|
|
333
|
+
log.debug(
|
|
334
|
+
{ observationId, chunkId, contentHash, embeddingJobId },
|
|
335
|
+
"Inserted observation with new chunk, enqueued embed job",
|
|
336
|
+
);
|
|
337
|
+
} else {
|
|
338
|
+
log.debug(
|
|
339
|
+
{ observationId, contentHash },
|
|
340
|
+
"Inserted observation, chunk deduplicated by content hash",
|
|
341
|
+
);
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
return {
|
|
345
|
+
observationId,
|
|
346
|
+
chunkId: chunkInserted ? chunkId : null,
|
|
347
|
+
contentHash,
|
|
348
|
+
embeddingJobId,
|
|
349
|
+
};
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
/**
|
|
353
|
+
* Insert multiple observations in a single transaction.
|
|
354
|
+
* Returns the results for each insertion.
|
|
355
|
+
*/
|
|
356
|
+
export function insertObservations(
|
|
357
|
+
observations: InsertObservationParams[],
|
|
358
|
+
): InsertedObservation[] {
|
|
359
|
+
const db = getDb();
|
|
360
|
+
const results: InsertedObservation[] = [];
|
|
361
|
+
db.transaction((tx) => {
|
|
362
|
+
// We don't use `tx` directly for individual inserts because
|
|
363
|
+
// insertObservation uses getDb() internally. Instead, the transaction
|
|
364
|
+
// wrapper ensures atomicity at the SQLite level.
|
|
365
|
+
void tx;
|
|
366
|
+
for (const obs of observations) {
|
|
367
|
+
results.push(insertObservation(obs));
|
|
368
|
+
}
|
|
369
|
+
});
|
|
370
|
+
return results;
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
/**
|
|
374
|
+
* Look up an observation by ID.
|
|
375
|
+
*/
|
|
376
|
+
export function getObservation(
|
|
377
|
+
observationId: string,
|
|
378
|
+
): typeof memoryObservations.$inferSelect | undefined {
|
|
379
|
+
const db = getDb();
|
|
380
|
+
return db
|
|
381
|
+
.select()
|
|
382
|
+
.from(memoryObservations)
|
|
383
|
+
.where(eq(memoryObservations.id, observationId))
|
|
384
|
+
.get();
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
/**
|
|
388
|
+
* Look up a chunk by observation ID. Returns the first chunk linked to the
|
|
389
|
+
* given observation, or undefined if none exists.
|
|
390
|
+
*/
|
|
391
|
+
export function getChunkByObservationId(
|
|
392
|
+
observationId: string,
|
|
393
|
+
): typeof memoryChunks.$inferSelect | undefined {
|
|
394
|
+
const db = getDb();
|
|
395
|
+
return db
|
|
396
|
+
.select()
|
|
397
|
+
.from(memoryChunks)
|
|
398
|
+
.where(eq(memoryChunks.observationId, observationId))
|
|
399
|
+
.get();
|
|
400
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared formatting helpers for rendering capped markdown sections
|
|
3
|
+
* inside the `<memory_brief>` wrapper.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export interface BriefEntry {
|
|
7
|
+
/** One-line markdown bullet text (no leading `- `). */
|
|
8
|
+
text: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Render a titled markdown section with a capped number of bullet entries.
|
|
13
|
+
*
|
|
14
|
+
* Returns `null` when `entries` is empty so callers can easily omit absent
|
|
15
|
+
* sections. The output is a markdown string like:
|
|
16
|
+
*
|
|
17
|
+
* ```
|
|
18
|
+
* ### Time-Relevant Context
|
|
19
|
+
* - Meeting with Alice in 2 hours
|
|
20
|
+
* - Quarterly review deadline tomorrow
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
export function renderBriefSection(
|
|
24
|
+
title: string,
|
|
25
|
+
entries: BriefEntry[],
|
|
26
|
+
maxEntries: number,
|
|
27
|
+
): string | null {
|
|
28
|
+
if (entries.length === 0) return null;
|
|
29
|
+
|
|
30
|
+
const capped = entries.slice(0, maxEntries);
|
|
31
|
+
const bullets = capped.map((e) => `- ${e.text}`).join("\n");
|
|
32
|
+
return `### ${title}\n${bullets}`;
|
|
33
|
+
}
|