@usewhisper/mcp-server 0.1.0 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -1
- package/dist/autosubscribe-6EDKPBE2.js +4068 -0
- package/dist/autosubscribe-ISDETQIB.js +436 -0
- package/dist/autosubscribe-ISDETQIB.js.map +1 -0
- package/dist/chunk-3WGYBAYR.js +8387 -0
- package/dist/chunk-5KIJNY6Z.js +370 -0
- package/dist/chunk-B3VWOHUA.js +271 -0
- package/dist/chunk-C57DHKTL.js +459 -0
- package/dist/chunk-FTWUJBAH.js +387 -0
- package/dist/chunk-FTWUJBAH.js.map +1 -0
- package/dist/chunk-H3HSKH2P.js +4841 -0
- package/dist/chunk-L6DXSM2U.js +457 -0
- package/dist/chunk-L6DXSM2U.js.map +1 -0
- package/dist/chunk-OBLI4FE4.js +276 -0
- package/dist/chunk-OBLI4FE4.js.map +1 -0
- package/dist/chunk-QGM4M3NI.js +37 -0
- package/dist/chunk-UYWE7HSU.js +369 -0
- package/dist/chunk-UYWE7HSU.js.map +1 -0
- package/dist/chunk-X2DL2GWT.js +33 -0
- package/dist/chunk-X2DL2GWT.js.map +1 -0
- package/dist/chunk-X7HNNNJJ.js +1079 -0
- package/dist/consolidation-FOVQTWNQ.js +222 -0
- package/dist/consolidation-IFQ52E44.js +210 -0
- package/dist/consolidation-IFQ52E44.js.map +1 -0
- package/dist/context-sharing-6CCFIAKL.js +276 -0
- package/dist/context-sharing-6CCFIAKL.js.map +1 -0
- package/dist/context-sharing-PH64JTXS.js +308 -0
- package/dist/cost-optimization-6OIKRSBV.js +196 -0
- package/dist/cost-optimization-6OIKRSBV.js.map +1 -0
- package/dist/cost-optimization-BH5NAX33.js +287 -0
- package/dist/cost-optimization-BH5NAX33.js.map +1 -0
- package/dist/cost-optimization-F3L5BS5F.js +303 -0
- package/dist/ingest-2LPTWUUM.js +16 -0
- package/dist/ingest-QE2BTV72.js +15 -0
- package/dist/ingest-QE2BTV72.js.map +1 -0
- package/dist/oracle-J47QCSEW.js +263 -0
- package/dist/oracle-MDP5MZRC.js +257 -0
- package/dist/oracle-MDP5MZRC.js.map +1 -0
- package/dist/search-BLVHWLWC.js +14 -0
- package/dist/search-CZ5NYL5B.js +13 -0
- package/dist/search-CZ5NYL5B.js.map +1 -0
- package/dist/server.d.ts +2 -0
- package/dist/server.js +686 -1000
- package/dist/server.js.map +1 -1
- package/package.json +5 -2
|
@@ -0,0 +1,457 @@
|
|
|
1
|
+
import {
|
|
2
|
+
detectRelations,
|
|
3
|
+
extractEventDate,
|
|
4
|
+
shouldInvalidateMemory
|
|
5
|
+
} from "./chunk-UYWE7HSU.js";
|
|
6
|
+
import {
|
|
7
|
+
db,
|
|
8
|
+
embedSingle
|
|
9
|
+
} from "./chunk-X2DL2GWT.js";
|
|
10
|
+
|
|
11
|
+
// src/engine/memory/extractor.ts
|
|
12
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
13
|
+
var anthropic = new Anthropic({
|
|
14
|
+
apiKey: process.env.ANTHROPIC_API_KEY || ""
|
|
15
|
+
});
|
|
16
|
+
var EXTRACTION_PROMPT = `You are an expert memory extraction system. Your job is to extract atomic, unambiguous memories from conversation chunks.
|
|
17
|
+
|
|
18
|
+
**Critical Rules:**
|
|
19
|
+
1. Each memory must be a SINGLE fact/preference/event/relationship
|
|
20
|
+
2. Resolve ALL pronouns (he/she/it/they/them) to actual names using context
|
|
21
|
+
3. Resolve ALL ambiguous references ("the company", "that project") to specific entities
|
|
22
|
+
4. Extract temporal information when events occurred (not when mentioned)
|
|
23
|
+
5. Be conservative - only extract high-confidence memories
|
|
24
|
+
|
|
25
|
+
**Memory Types:**
|
|
26
|
+
- factual: Objective facts ("John works at Google")
|
|
27
|
+
- preference: User preferences ("Sarah prefers dark mode")
|
|
28
|
+
- event: Events with timestamps ("Team met on Jan 15, 2024")
|
|
29
|
+
- relationship: Relationships ("Alex reports to Maria")
|
|
30
|
+
- opinion: Subjective views ("User thinks Python is easier than Rust")
|
|
31
|
+
- goal: Future intentions ("User wants to learn machine learning")
|
|
32
|
+
- instruction: Persistent instructions ("Always use formal tone with clients")
|
|
33
|
+
|
|
34
|
+
**Disambiguation:**
|
|
35
|
+
- Replace "he" \u2192 actual name using context
|
|
36
|
+
- Replace "she" \u2192 actual name using context
|
|
37
|
+
- Replace "it" \u2192 specific thing using context
|
|
38
|
+
- Replace "the company" \u2192 company name
|
|
39
|
+
- Replace "that project" \u2192 project name
|
|
40
|
+
|
|
41
|
+
**Example:**
|
|
42
|
+
Input: "He said he prefers using React. The project will launch next week."
|
|
43
|
+
Context: Previous message: "Alex joined the team yesterday. He's working on the dashboard project."
|
|
44
|
+
|
|
45
|
+
Bad Output:
|
|
46
|
+
- "He prefers React" \u274C (ambiguous)
|
|
47
|
+
- "The project launches next week" \u274C (what project?)
|
|
48
|
+
|
|
49
|
+
Good Output:
|
|
50
|
+
- content: "Alex prefers using React for development"
|
|
51
|
+
type: preference
|
|
52
|
+
entities: ["Alex", "React"]
|
|
53
|
+
eventDate: null
|
|
54
|
+
|
|
55
|
+
- content: "Dashboard project launch scheduled for [specific date if mentioned]"
|
|
56
|
+
type: event
|
|
57
|
+
entities: ["Dashboard project"]
|
|
58
|
+
eventDate: [calculated date] or null if not specific`;
|
|
59
|
+
async function extractMemories(chunk, context) {
|
|
60
|
+
const contextStr = buildContextString(context);
|
|
61
|
+
const prompt = `${EXTRACTION_PROMPT}
|
|
62
|
+
|
|
63
|
+
${contextStr}
|
|
64
|
+
|
|
65
|
+
**Current chunk to analyze:**
|
|
66
|
+
${chunk}
|
|
67
|
+
|
|
68
|
+
**Document Date (when this was said):** ${context.documentDate.toISOString()}
|
|
69
|
+
|
|
70
|
+
Extract memories and return a JSON array. For each memory:
|
|
71
|
+
{
|
|
72
|
+
"content": "clear, unambiguous statement with no pronouns",
|
|
73
|
+
"memoryType": "factual|preference|event|relationship|opinion|goal|instruction",
|
|
74
|
+
"entityMentions": ["list", "of", "entities", "mentioned"],
|
|
75
|
+
"eventDate": "ISO date string or null",
|
|
76
|
+
"confidence": 0.0-1.0,
|
|
77
|
+
"reasoning": "brief explanation of extraction"
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
Return ONLY the JSON array, no other text.`;
|
|
81
|
+
try {
|
|
82
|
+
const response = await anthropic.messages.create({
|
|
83
|
+
model: "claude-sonnet-4.5",
|
|
84
|
+
max_tokens: 4096,
|
|
85
|
+
temperature: 0,
|
|
86
|
+
// Deterministic for extraction
|
|
87
|
+
messages: [
|
|
88
|
+
{
|
|
89
|
+
role: "user",
|
|
90
|
+
content: prompt
|
|
91
|
+
}
|
|
92
|
+
]
|
|
93
|
+
});
|
|
94
|
+
const textContent = response.content.find((c) => c.type === "text");
|
|
95
|
+
if (!textContent || textContent.type !== "text") {
|
|
96
|
+
throw new Error("No text response from Claude");
|
|
97
|
+
}
|
|
98
|
+
const text = textContent.text.trim();
|
|
99
|
+
const jsonMatch = text.match(/```json\n?([\s\S]*?)\n?```/) || text.match(/\[[\s\S]*\]/);
|
|
100
|
+
const jsonStr = jsonMatch ? jsonMatch[1] || jsonMatch[0] : text;
|
|
101
|
+
const rawMemories = JSON.parse(jsonStr);
|
|
102
|
+
if (!Array.isArray(rawMemories)) {
|
|
103
|
+
console.error("Expected array of memories, got:", rawMemories);
|
|
104
|
+
return [];
|
|
105
|
+
}
|
|
106
|
+
return rawMemories.map((m) => ({
|
|
107
|
+
content: m.content,
|
|
108
|
+
memoryType: m.memoryType,
|
|
109
|
+
entityMentions: m.entityMentions || [],
|
|
110
|
+
eventDate: m.eventDate ? new Date(m.eventDate) : null,
|
|
111
|
+
confidence: m.confidence || 0.7,
|
|
112
|
+
reasoning: m.reasoning
|
|
113
|
+
}));
|
|
114
|
+
} catch (error) {
|
|
115
|
+
console.error("Memory extraction failed:", error);
|
|
116
|
+
return [];
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
function buildContextString(context) {
|
|
120
|
+
const parts = [];
|
|
121
|
+
if (context.previousMessages && context.previousMessages.length > 0) {
|
|
122
|
+
parts.push("**Context from previous messages:**");
|
|
123
|
+
parts.push(context.previousMessages.slice(-5).join("\n"));
|
|
124
|
+
}
|
|
125
|
+
if (context.entityContext && context.entityContext.size > 0) {
|
|
126
|
+
parts.push("\n**Known entities:**");
|
|
127
|
+
context.entityContext.forEach((name, pronoun) => {
|
|
128
|
+
parts.push(`- "${pronoun}" refers to ${name}`);
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
if (parts.length === 0) {
|
|
132
|
+
return "**Context:** None available";
|
|
133
|
+
}
|
|
134
|
+
return parts.join("\n");
|
|
135
|
+
}
|
|
136
|
+
function buildEntityContext(recentMemories) {
|
|
137
|
+
const entityMap = /* @__PURE__ */ new Map();
|
|
138
|
+
for (const memory of recentMemories) {
|
|
139
|
+
for (const entity of memory.entityMentions) {
|
|
140
|
+
if (/^[A-Z][a-z]+(?:\s[A-Z][a-z]+)*$/.test(entity)) {
|
|
141
|
+
entityMap.set("he", entity);
|
|
142
|
+
entityMap.set("she", entity);
|
|
143
|
+
entityMap.set("they", entity);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
return entityMap;
|
|
148
|
+
}
|
|
149
|
+
function validateMemory(memory) {
|
|
150
|
+
if (memory.confidence < 0.6) {
|
|
151
|
+
return false;
|
|
152
|
+
}
|
|
153
|
+
if (memory.content.length < 10) {
|
|
154
|
+
return false;
|
|
155
|
+
}
|
|
156
|
+
const pronouns = /\b(he|she|it|they|them|his|her|their)\b/i;
|
|
157
|
+
if (pronouns.test(memory.content)) {
|
|
158
|
+
console.warn("Memory contains unresolved pronouns:", memory.content);
|
|
159
|
+
return false;
|
|
160
|
+
}
|
|
161
|
+
const vagueRefs = /\b(the company|that project|this thing|the system)\b/i;
|
|
162
|
+
if (vagueRefs.test(memory.content)) {
|
|
163
|
+
console.warn("Memory contains vague references:", memory.content);
|
|
164
|
+
return false;
|
|
165
|
+
}
|
|
166
|
+
return true;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// src/engine/memory/ingest.ts
|
|
170
|
+
async function ingestSession(params) {
|
|
171
|
+
const { sessionId, projectId, orgId, userId, messages } = params;
|
|
172
|
+
const result = {
|
|
173
|
+
memoriesCreated: 0,
|
|
174
|
+
relationsCreated: 0,
|
|
175
|
+
memoriesInvalidated: 0,
|
|
176
|
+
errors: []
|
|
177
|
+
};
|
|
178
|
+
if (messages.length === 0) {
|
|
179
|
+
return result;
|
|
180
|
+
}
|
|
181
|
+
try {
|
|
182
|
+
const context = {
|
|
183
|
+
sessionId,
|
|
184
|
+
userId: userId || "unknown",
|
|
185
|
+
projectId,
|
|
186
|
+
orgId,
|
|
187
|
+
documentDate: messages[messages.length - 1].timestamp,
|
|
188
|
+
previousMessages: messages.slice(0, -1).map((m) => `${m.role}: ${m.content}`)
|
|
189
|
+
};
|
|
190
|
+
const recentMemories = await db.memory.findMany({
|
|
191
|
+
where: {
|
|
192
|
+
sessionId,
|
|
193
|
+
projectId,
|
|
194
|
+
isActive: true
|
|
195
|
+
},
|
|
196
|
+
orderBy: {
|
|
197
|
+
createdAt: "desc"
|
|
198
|
+
},
|
|
199
|
+
take: 20,
|
|
200
|
+
select: {
|
|
201
|
+
content: true,
|
|
202
|
+
entityMentions: true
|
|
203
|
+
}
|
|
204
|
+
});
|
|
205
|
+
context.entityContext = buildEntityContext(recentMemories);
|
|
206
|
+
const latestMessage = messages[messages.length - 1].content;
|
|
207
|
+
const extractedMemories = await extractMemories(latestMessage, context);
|
|
208
|
+
const validMemories = extractedMemories.filter(validateMemory);
|
|
209
|
+
if (validMemories.length === 0) {
|
|
210
|
+
return result;
|
|
211
|
+
}
|
|
212
|
+
const existingMemories = await db.memory.findMany({
|
|
213
|
+
where: {
|
|
214
|
+
projectId,
|
|
215
|
+
userId,
|
|
216
|
+
isActive: true
|
|
217
|
+
},
|
|
218
|
+
orderBy: {
|
|
219
|
+
createdAt: "desc"
|
|
220
|
+
},
|
|
221
|
+
take: 100,
|
|
222
|
+
// Check against last 100 memories
|
|
223
|
+
select: {
|
|
224
|
+
id: true,
|
|
225
|
+
content: true,
|
|
226
|
+
memoryType: true,
|
|
227
|
+
entityMentions: true,
|
|
228
|
+
documentDate: true
|
|
229
|
+
}
|
|
230
|
+
});
|
|
231
|
+
for (const extracted of validMemories) {
|
|
232
|
+
try {
|
|
233
|
+
const relations = await detectRelations(
|
|
234
|
+
{
|
|
235
|
+
content: extracted.content,
|
|
236
|
+
memoryType: extracted.memoryType,
|
|
237
|
+
entityMentions: extracted.entityMentions
|
|
238
|
+
},
|
|
239
|
+
existingMemories
|
|
240
|
+
);
|
|
241
|
+
const eventDate = extracted.eventDate || await extractEventDate(
|
|
242
|
+
extracted.content,
|
|
243
|
+
context.documentDate
|
|
244
|
+
);
|
|
245
|
+
const embedding = await embedSingle(extracted.content);
|
|
246
|
+
const memory = await db.memory.create({
|
|
247
|
+
data: {
|
|
248
|
+
projectId,
|
|
249
|
+
orgId,
|
|
250
|
+
userId,
|
|
251
|
+
sessionId,
|
|
252
|
+
memoryType: extracted.memoryType,
|
|
253
|
+
content: extracted.content,
|
|
254
|
+
embedding,
|
|
255
|
+
entityMentions: extracted.entityMentions,
|
|
256
|
+
confidence: extracted.confidence,
|
|
257
|
+
documentDate: context.documentDate,
|
|
258
|
+
eventDate,
|
|
259
|
+
validFrom: /* @__PURE__ */ new Date(),
|
|
260
|
+
metadata: {
|
|
261
|
+
reasoning: extracted.reasoning,
|
|
262
|
+
extractedFrom: "session_ingestion"
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
});
|
|
266
|
+
result.memoriesCreated++;
|
|
267
|
+
for (const relation of relations) {
|
|
268
|
+
try {
|
|
269
|
+
await db.memoryRelation.create({
|
|
270
|
+
data: {
|
|
271
|
+
fromMemoryId: memory.id,
|
|
272
|
+
toMemoryId: relation.toMemoryId,
|
|
273
|
+
relationType: relation.relationType,
|
|
274
|
+
confidence: relation.confidence,
|
|
275
|
+
reasoning: relation.reasoning
|
|
276
|
+
}
|
|
277
|
+
});
|
|
278
|
+
result.relationsCreated++;
|
|
279
|
+
if (shouldInvalidateMemory(relation.relationType)) {
|
|
280
|
+
await db.memory.update({
|
|
281
|
+
where: { id: relation.toMemoryId },
|
|
282
|
+
data: {
|
|
283
|
+
validUntil: /* @__PURE__ */ new Date(),
|
|
284
|
+
supersededBy: memory.id
|
|
285
|
+
}
|
|
286
|
+
});
|
|
287
|
+
const oldMemory = await db.memory.findUnique({
|
|
288
|
+
where: { id: relation.toMemoryId },
|
|
289
|
+
select: { version: true }
|
|
290
|
+
});
|
|
291
|
+
if (oldMemory) {
|
|
292
|
+
await db.memory.update({
|
|
293
|
+
where: { id: memory.id },
|
|
294
|
+
data: { version: oldMemory.version + 1 }
|
|
295
|
+
});
|
|
296
|
+
}
|
|
297
|
+
result.memoriesInvalidated++;
|
|
298
|
+
}
|
|
299
|
+
} catch (error) {
|
|
300
|
+
result.errors.push(`Failed to create relation: ${error}`);
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
} catch (error) {
|
|
304
|
+
result.errors.push(`Failed to process memory: ${error}`);
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
return result;
|
|
308
|
+
} catch (error) {
|
|
309
|
+
result.errors.push(`Ingestion failed: ${error}`);
|
|
310
|
+
return result;
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
async function ingestChunk(params) {
|
|
314
|
+
const { chunkId, chunkContent, projectId, orgId, documentDate, metadata } = params;
|
|
315
|
+
const result = {
|
|
316
|
+
memoriesCreated: 0,
|
|
317
|
+
relationsCreated: 0,
|
|
318
|
+
memoriesInvalidated: 0,
|
|
319
|
+
errors: []
|
|
320
|
+
};
|
|
321
|
+
try {
|
|
322
|
+
const context = {
|
|
323
|
+
sessionId: `chunk_${chunkId}`,
|
|
324
|
+
userId: "system",
|
|
325
|
+
projectId,
|
|
326
|
+
orgId,
|
|
327
|
+
documentDate
|
|
328
|
+
};
|
|
329
|
+
const extractedMemories = await extractMemories(chunkContent, context);
|
|
330
|
+
const validMemories = extractedMemories.filter(validateMemory);
|
|
331
|
+
for (const extracted of validMemories) {
|
|
332
|
+
const eventDate = extracted.eventDate || await extractEventDate(
|
|
333
|
+
extracted.content,
|
|
334
|
+
documentDate
|
|
335
|
+
);
|
|
336
|
+
const embedding = await embedSingle(extracted.content);
|
|
337
|
+
await db.memory.create({
|
|
338
|
+
data: {
|
|
339
|
+
projectId,
|
|
340
|
+
orgId,
|
|
341
|
+
memoryType: extracted.memoryType,
|
|
342
|
+
content: extracted.content,
|
|
343
|
+
embedding,
|
|
344
|
+
entityMentions: extracted.entityMentions,
|
|
345
|
+
confidence: extracted.confidence,
|
|
346
|
+
documentDate,
|
|
347
|
+
eventDate,
|
|
348
|
+
validFrom: /* @__PURE__ */ new Date(),
|
|
349
|
+
sourceChunkId: chunkId,
|
|
350
|
+
scope: "DOCUMENT",
|
|
351
|
+
// Document-level scope
|
|
352
|
+
metadata: {
|
|
353
|
+
...metadata,
|
|
354
|
+
reasoning: extracted.reasoning
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
});
|
|
358
|
+
result.memoriesCreated++;
|
|
359
|
+
}
|
|
360
|
+
return result;
|
|
361
|
+
} catch (error) {
|
|
362
|
+
result.errors.push(`Chunk ingestion failed: ${error}`);
|
|
363
|
+
return result;
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
async function ingestChunksBatch(params) {
|
|
367
|
+
const { chunks, projectId, orgId, documentDate } = params;
|
|
368
|
+
const aggregateResult = {
|
|
369
|
+
memoriesCreated: 0,
|
|
370
|
+
relationsCreated: 0,
|
|
371
|
+
memoriesInvalidated: 0,
|
|
372
|
+
errors: []
|
|
373
|
+
};
|
|
374
|
+
const batchSize = 10;
|
|
375
|
+
for (let i = 0; i < chunks.length; i += batchSize) {
|
|
376
|
+
const batch = chunks.slice(i, i + batchSize);
|
|
377
|
+
const results = await Promise.all(
|
|
378
|
+
batch.map(
|
|
379
|
+
(chunk) => ingestChunk({
|
|
380
|
+
chunkId: chunk.id,
|
|
381
|
+
chunkContent: chunk.content,
|
|
382
|
+
projectId,
|
|
383
|
+
orgId,
|
|
384
|
+
documentDate,
|
|
385
|
+
metadata: chunk.metadata
|
|
386
|
+
})
|
|
387
|
+
)
|
|
388
|
+
);
|
|
389
|
+
for (const result of results) {
|
|
390
|
+
aggregateResult.memoriesCreated += result.memoriesCreated;
|
|
391
|
+
aggregateResult.relationsCreated += result.relationsCreated;
|
|
392
|
+
aggregateResult.memoriesInvalidated += result.memoriesInvalidated;
|
|
393
|
+
aggregateResult.errors.push(...result.errors);
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
return aggregateResult;
|
|
397
|
+
}
|
|
398
|
+
async function updateMemory(params) {
|
|
399
|
+
const { memoryId, newContent, reasoning } = params;
|
|
400
|
+
const oldMemory = await db.memory.findUnique({
|
|
401
|
+
where: { id: memoryId }
|
|
402
|
+
});
|
|
403
|
+
if (!oldMemory) {
|
|
404
|
+
throw new Error("Memory not found");
|
|
405
|
+
}
|
|
406
|
+
const embedding = await embedSingle(newContent);
|
|
407
|
+
const newMemory = await db.memory.create({
|
|
408
|
+
data: {
|
|
409
|
+
projectId: oldMemory.projectId,
|
|
410
|
+
orgId: oldMemory.orgId,
|
|
411
|
+
userId: oldMemory.userId,
|
|
412
|
+
sessionId: oldMemory.sessionId,
|
|
413
|
+
memoryType: oldMemory.memoryType,
|
|
414
|
+
content: newContent,
|
|
415
|
+
embedding,
|
|
416
|
+
entityMentions: oldMemory.entityMentions,
|
|
417
|
+
confidence: oldMemory.confidence,
|
|
418
|
+
documentDate: oldMemory.documentDate,
|
|
419
|
+
eventDate: oldMemory.eventDate,
|
|
420
|
+
validFrom: /* @__PURE__ */ new Date(),
|
|
421
|
+
version: oldMemory.version + 1,
|
|
422
|
+
scope: oldMemory.scope,
|
|
423
|
+
metadata: {
|
|
424
|
+
...oldMemory.metadata,
|
|
425
|
+
updateReasoning: reasoning
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
});
|
|
429
|
+
await db.memory.update({
|
|
430
|
+
where: { id: memoryId },
|
|
431
|
+
data: {
|
|
432
|
+
validUntil: /* @__PURE__ */ new Date(),
|
|
433
|
+
supersededBy: newMemory.id
|
|
434
|
+
}
|
|
435
|
+
});
|
|
436
|
+
await db.memoryRelation.create({
|
|
437
|
+
data: {
|
|
438
|
+
fromMemoryId: newMemory.id,
|
|
439
|
+
toMemoryId: memoryId,
|
|
440
|
+
relationType: "updates",
|
|
441
|
+
confidence: 1,
|
|
442
|
+
reasoning: reasoning || "Manual update"
|
|
443
|
+
}
|
|
444
|
+
});
|
|
445
|
+
return {
|
|
446
|
+
newMemoryId: newMemory.id,
|
|
447
|
+
oldMemoryId: memoryId
|
|
448
|
+
};
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
export {
|
|
452
|
+
ingestSession,
|
|
453
|
+
ingestChunk,
|
|
454
|
+
ingestChunksBatch,
|
|
455
|
+
updateMemory
|
|
456
|
+
};
|
|
457
|
+
//# sourceMappingURL=chunk-L6DXSM2U.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/engine/memory/extractor.ts","../../src/engine/memory/ingest.ts"],"sourcesContent":["/**\n * SOTA Memory Extraction Engine\n * Extracts atomic, disambiguated memories from chunks\n * Beats Supermemory's contextual retrieval approach\n */\n\nimport Anthropic from \"@anthropic-ai/sdk\";\nimport { v4 as uuidv4 } from \"uuid\";\nimport type { ExtractedMemory, ExtractionContext, MemoryType } from \"./types.js\";\n\nconst anthropic = new Anthropic({\n apiKey: process.env.ANTHROPIC_API_KEY || \"\",\n});\n\nconst EXTRACTION_PROMPT = `You are an expert memory extraction system. Your job is to extract atomic, unambiguous memories from conversation chunks.\n\n**Critical Rules:**\n1. Each memory must be a SINGLE fact/preference/event/relationship\n2. Resolve ALL pronouns (he/she/it/they/them) to actual names using context\n3. Resolve ALL ambiguous references (\"the company\", \"that project\") to specific entities\n4. Extract temporal information when events occurred (not when mentioned)\n5. Be conservative - only extract high-confidence memories\n\n**Memory Types:**\n- factual: Objective facts (\"John works at Google\")\n- preference: User preferences (\"Sarah prefers dark mode\")\n- event: Events with timestamps (\"Team met on Jan 15, 2024\")\n- relationship: Relationships (\"Alex reports to Maria\")\n- opinion: Subjective views (\"User thinks Python is easier than Rust\")\n- goal: Future intentions (\"User wants to learn machine learning\")\n- instruction: Persistent instructions (\"Always use formal tone with clients\")\n\n**Disambiguation:**\n- Replace \"he\" → actual name using context\n- Replace \"she\" → actual name using context\n- Replace \"it\" → specific thing using context\n- Replace \"the company\" → company name\n- Replace \"that project\" → project name\n\n**Example:**\nInput: \"He said he prefers using React. The project will launch next week.\"\nContext: Previous message: \"Alex joined the team yesterday. He's working on the dashboard project.\"\n\nBad Output:\n- \"He prefers React\" ❌ (ambiguous)\n- \"The project launches next week\" ❌ (what project?)\n\nGood Output:\n- content: \"Alex prefers using React for development\"\n type: preference\n entities: [\"Alex\", \"React\"]\n eventDate: null\n\n- content: \"Dashboard project launch scheduled for [specific date if mentioned]\"\n type: event\n entities: [\"Dashboard project\"]\n eventDate: [calculated date] or null if not specific`;\n\nexport async function extractMemories(\n chunk: string,\n context: ExtractionContext\n): Promise<ExtractedMemory[]> {\n const contextStr = buildContextString(context);\n\n const prompt = `${EXTRACTION_PROMPT}\n\n${contextStr}\n\n**Current chunk to analyze:**\n${chunk}\n\n**Document Date (when this was said):** ${context.documentDate.toISOString()}\n\nExtract memories and return a JSON array. For each memory:\n{\n \"content\": \"clear, unambiguous statement with no pronouns\",\n \"memoryType\": \"factual|preference|event|relationship|opinion|goal|instruction\",\n \"entityMentions\": [\"list\", \"of\", \"entities\", \"mentioned\"],\n \"eventDate\": \"ISO date string or null\",\n \"confidence\": 0.0-1.0,\n \"reasoning\": \"brief explanation of extraction\"\n}\n\nReturn ONLY the JSON array, no other text.`;\n\n try {\n const response = await anthropic.messages.create({\n model: \"claude-sonnet-4.5\",\n max_tokens: 4096,\n temperature: 0.0, // Deterministic for extraction\n messages: [\n {\n role: \"user\",\n content: prompt,\n },\n ],\n });\n\n const textContent = response.content.find((c) => c.type === \"text\");\n if (!textContent || textContent.type !== \"text\") {\n throw new Error(\"No text response from Claude\");\n }\n\n const text = textContent.text.trim();\n\n // Extract JSON from response (handle markdown code blocks)\n const jsonMatch = text.match(/```json\\n?([\\s\\S]*?)\\n?```/) || text.match(/\\[[\\s\\S]*\\]/);\n const jsonStr = jsonMatch ? (jsonMatch[1] || jsonMatch[0]) : text;\n\n const rawMemories = JSON.parse(jsonStr);\n\n if (!Array.isArray(rawMemories)) {\n console.error(\"Expected array of memories, got:\", rawMemories);\n return [];\n }\n\n return rawMemories.map((m: any) => ({\n content: m.content,\n memoryType: m.memoryType as MemoryType,\n entityMentions: m.entityMentions || [],\n eventDate: m.eventDate ? new Date(m.eventDate) : null,\n confidence: m.confidence || 0.7,\n reasoning: m.reasoning,\n }));\n } catch (error) {\n console.error(\"Memory extraction failed:\", error);\n return [];\n }\n}\n\n/**\n * Fast batch extraction for multiple chunks\n * Uses parallel processing with rate limiting\n */\nexport async function extractMemoriesBatch(\n chunks: Array<{ id: string; content: string }>,\n context: ExtractionContext\n): Promise<Map<string, ExtractedMemory[]>> {\n const results = new Map<string, ExtractedMemory[]>();\n\n // Process in batches of 5 to avoid rate limits\n const batchSize = 5;\n for (let i = 0; i < chunks.length; i += batchSize) {\n const batch = chunks.slice(i, i + batchSize);\n\n const promises = batch.map(async (chunk) => {\n const memories = await extractMemories(chunk.content, context);\n return { chunkId: chunk.id, memories };\n });\n\n const batchResults = await Promise.all(promises);\n\n for (const { chunkId, memories } of batchResults) {\n results.set(chunkId, memories);\n }\n\n // Small delay between batches\n if (i + batchSize < chunks.length) {\n await new Promise((resolve) => setTimeout(resolve, 1000));\n }\n }\n\n return results;\n}\n\n/**\n * Build context string from previous messages\n */\nfunction buildContextString(context: ExtractionContext): string {\n const parts = [];\n\n if (context.previousMessages && context.previousMessages.length > 0) {\n parts.push(\"**Context from previous messages:**\");\n parts.push(context.previousMessages.slice(-5).join(\"\\n\")); // Last 5 messages\n }\n\n if (context.entityContext && context.entityContext.size > 0) {\n parts.push(\"\\n**Known entities:**\");\n context.entityContext.forEach((name, pronoun) => {\n parts.push(`- \"${pronoun}\" refers to ${name}`);\n });\n }\n\n if (parts.length === 0) {\n return \"**Context:** None available\";\n }\n\n return parts.join(\"\\n\");\n}\n\n/**\n * Build entity context from recent memories\n * Helps with pronoun resolution\n */\nexport function buildEntityContext(recentMemories: Array<{\n content: string;\n entityMentions: string[];\n}>): Map<string, string> {\n const entityMap = new Map<string, string>();\n\n // Extract person names from recent memories\n // This is a simple heuristic - could be enhanced with NER\n for (const memory of recentMemories) {\n for (const entity of memory.entityMentions) {\n // Simple check: if it looks like a person name (capitalized)\n if (/^[A-Z][a-z]+(?:\\s[A-Z][a-z]+)*$/.test(entity)) {\n // Map common pronouns to this entity\n entityMap.set(\"he\", entity);\n entityMap.set(\"she\", entity);\n entityMap.set(\"they\", entity);\n }\n }\n }\n\n return entityMap;\n}\n\n/**\n * Validate extracted memory quality\n * Returns true if memory meets quality thresholds\n */\nexport function validateMemory(memory: ExtractedMemory): boolean {\n // Confidence threshold\n if (memory.confidence < 0.6) {\n return false;\n }\n\n // Content must be substantial\n if (memory.content.length < 10) {\n return false;\n }\n\n // Check for unresolved pronouns (simple check)\n const pronouns = /\\b(he|she|it|they|them|his|her|their)\\b/i;\n if (pronouns.test(memory.content)) {\n // Pronouns should have been resolved\n console.warn(\"Memory contains unresolved pronouns:\", memory.content);\n return false;\n }\n\n // Check for vague references\n const vagueRefs = /\\b(the company|that project|this thing|the system)\\b/i;\n if (vagueRefs.test(memory.content)) {\n console.warn(\"Memory contains vague references:\", memory.content);\n return false;\n }\n\n return true;\n}\n","/**\n * SOTA Memory Ingestion Pipeline\n * Session-based ingestion with extraction, disambiguation, and relation detection\n */\n\nimport { db } from \"../../db/index.js\";\nimport { embedSingle } from \"../embeddings.js\";\nimport { extractMemories, buildEntityContext, validateMemory } from \"./extractor.js\";\nimport { detectRelations, shouldInvalidateMemory } from \"./relations.js\";\nimport { extractEventDate } from \"./temporal.js\";\nimport type { ExtractionContext, ExtractedMemory } from \"./types.js\";\n\nexport interface IngestionResult {\n memoriesCreated: number;\n relationsCreated: number;\n memoriesInvalidated: number;\n errors: string[];\n}\n\n/**\n * Ingest a session (multiple messages/chunks) and extract memories\n * This is the main entry point for memory creation\n */\nexport async function ingestSession(params: {\n sessionId: string;\n projectId: string;\n orgId?: string;\n userId?: string;\n messages: Array<{\n role: string;\n content: string;\n timestamp: Date;\n }>;\n}): Promise<IngestionResult> {\n const { sessionId, projectId, orgId, userId, messages } = params;\n\n const result: IngestionResult = {\n memoriesCreated: 0,\n relationsCreated: 0,\n memoriesInvalidated: 0,\n errors: [],\n };\n\n if (messages.length === 0) {\n return result;\n }\n\n try {\n // Build context for extraction\n const context: ExtractionContext = {\n sessionId,\n userId: userId || \"unknown\",\n projectId,\n orgId,\n documentDate: messages[messages.length - 1].timestamp,\n previousMessages: messages.slice(0, -1).map((m) => `${m.role}: ${m.content}`),\n };\n\n // Get recent memories for entity context building\n const recentMemories = await db.memory.findMany({\n where: {\n sessionId,\n projectId,\n isActive: true,\n },\n orderBy: {\n createdAt: \"desc\",\n },\n take: 20,\n select: {\n content: true,\n entityMentions: true,\n },\n });\n\n // Build entity context for pronoun resolution\n context.entityContext = buildEntityContext(recentMemories);\n\n // Extract memories from latest message\n const latestMessage = messages[messages.length - 1].content;\n const extractedMemories = await extractMemories(latestMessage, context);\n\n // Filter valid memories\n const validMemories = extractedMemories.filter(validateMemory);\n\n if (validMemories.length === 0) {\n return result;\n }\n\n // Get existing memories for relation detection\n const existingMemories = await db.memory.findMany({\n where: {\n projectId,\n userId,\n isActive: true,\n },\n orderBy: {\n createdAt: \"desc\",\n },\n take: 100, // Check against last 100 memories\n select: {\n id: true,\n content: true,\n memoryType: true,\n entityMentions: true,\n documentDate: true,\n },\n });\n\n // Process each extracted memory\n for (const extracted of validMemories) {\n try {\n // Detect relations with existing memories\n const relations = await detectRelations(\n {\n content: extracted.content,\n memoryType: extracted.memoryType,\n entityMentions: extracted.entityMentions,\n },\n existingMemories\n );\n\n // Extract event date if applicable\n const eventDate = extracted.eventDate || await extractEventDate(\n extracted.content,\n context.documentDate\n );\n\n // Embed memory content\n const embedding = await embedSingle(extracted.content);\n\n // Create memory in database\n const memory = await db.memory.create({\n data: {\n projectId,\n orgId,\n userId,\n sessionId,\n memoryType: extracted.memoryType,\n content: extracted.content,\n embedding: embedding,\n entityMentions: extracted.entityMentions,\n confidence: extracted.confidence,\n documentDate: context.documentDate,\n eventDate,\n validFrom: new Date(),\n metadata: {\n reasoning: extracted.reasoning,\n extractedFrom: \"session_ingestion\",\n },\n },\n });\n\n result.memoriesCreated++;\n\n // Create relations\n for (const relation of relations) {\n try {\n await db.memoryRelation.create({\n data: {\n fromMemoryId: memory.id,\n toMemoryId: relation.toMemoryId,\n relationType: relation.relationType,\n confidence: relation.confidence,\n reasoning: relation.reasoning,\n },\n });\n\n result.relationsCreated++;\n\n // If this relation invalidates the old memory, update it\n if (shouldInvalidateMemory(relation.relationType)) {\n await db.memory.update({\n where: { id: relation.toMemoryId },\n data: {\n validUntil: new Date(),\n supersededBy: memory.id,\n },\n });\n\n // Update version number\n const oldMemory = await db.memory.findUnique({\n where: { id: relation.toMemoryId },\n select: { version: true },\n });\n\n if (oldMemory) {\n await db.memory.update({\n where: { id: memory.id },\n data: { version: oldMemory.version + 1 },\n });\n }\n\n result.memoriesInvalidated++;\n }\n } catch (error) {\n result.errors.push(`Failed to create relation: ${error}`);\n }\n }\n } catch (error) {\n result.errors.push(`Failed to process memory: ${error}`);\n }\n }\n\n return result;\n } catch (error) {\n result.errors.push(`Ingestion failed: ${error}`);\n return result;\n }\n}\n\n/**\n * Ingest a single chunk (from document indexing)\n * Different from session ingestion - no conversation context\n */\nexport async function ingestChunk(params: {\n chunkId: string;\n chunkContent: string;\n projectId: string;\n orgId?: string;\n documentDate: Date;\n metadata?: Record<string, any>;\n}): Promise<IngestionResult> {\n const { chunkId, chunkContent, projectId, orgId, documentDate, metadata } = params;\n\n const result: IngestionResult = {\n memoriesCreated: 0,\n relationsCreated: 0,\n memoriesInvalidated: 0,\n errors: [],\n };\n\n try {\n const context: ExtractionContext = {\n sessionId: `chunk_${chunkId}`,\n userId: \"system\",\n projectId,\n orgId,\n documentDate,\n };\n\n const extractedMemories = await extractMemories(chunkContent, context);\n const validMemories = extractedMemories.filter(validateMemory);\n\n for (const extracted of validMemories) {\n const eventDate = extracted.eventDate || await extractEventDate(\n extracted.content,\n documentDate\n );\n\n const embedding = await embedSingle(extracted.content);\n\n await db.memory.create({\n data: {\n projectId,\n orgId,\n memoryType: extracted.memoryType,\n content: extracted.content,\n embedding,\n entityMentions: extracted.entityMentions,\n confidence: extracted.confidence,\n documentDate,\n eventDate,\n validFrom: new Date(),\n sourceChunkId: chunkId,\n scope: \"DOCUMENT\", // Document-level scope\n metadata: {\n ...metadata,\n reasoning: extracted.reasoning,\n },\n },\n });\n\n result.memoriesCreated++;\n }\n\n return result;\n } catch (error) {\n result.errors.push(`Chunk ingestion failed: ${error}`);\n return result;\n }\n}\n\n/**\n * Batch ingest multiple chunks\n * Used during document indexing\n */\nexport async function ingestChunksBatch(params: {\n chunks: Array<{\n id: string;\n content: string;\n metadata?: Record<string, any>;\n }>;\n projectId: string;\n orgId?: string;\n documentDate: Date;\n}): Promise<IngestionResult> {\n const { chunks, projectId, orgId, documentDate } = params;\n\n const aggregateResult: IngestionResult = {\n memoriesCreated: 0,\n relationsCreated: 0,\n memoriesInvalidated: 0,\n errors: [],\n };\n\n // Process in batches to avoid overwhelming the database\n const batchSize = 10;\n\n for (let i = 0; i < chunks.length; i += batchSize) {\n const batch = chunks.slice(i, i + batchSize);\n\n const results = await Promise.all(\n batch.map((chunk) =>\n ingestChunk({\n chunkId: chunk.id,\n chunkContent: chunk.content,\n projectId,\n orgId,\n documentDate,\n metadata: chunk.metadata,\n })\n )\n );\n\n for (const result of results) {\n aggregateResult.memoriesCreated += result.memoriesCreated;\n aggregateResult.relationsCreated += result.relationsCreated;\n aggregateResult.memoriesInvalidated += result.memoriesInvalidated;\n aggregateResult.errors.push(...result.errors);\n }\n }\n\n return aggregateResult;\n}\n\n/**\n * Update existing memory with new information\n * Creates a new version and invalidates the old one\n */\nexport async function updateMemory(params: {\n memoryId: string;\n newContent: string;\n reasoning?: string;\n}): Promise<{ newMemoryId: string; oldMemoryId: string }> {\n const { memoryId, newContent, reasoning } = params;\n\n const oldMemory = await db.memory.findUnique({\n where: { id: memoryId },\n });\n\n if (!oldMemory) {\n throw new Error(\"Memory not found\");\n }\n\n // Create new version\n const embedding = await embedSingle(newContent);\n\n const newMemory = await db.memory.create({\n data: {\n projectId: oldMemory.projectId,\n orgId: oldMemory.orgId,\n userId: oldMemory.userId,\n sessionId: oldMemory.sessionId,\n memoryType: oldMemory.memoryType,\n content: newContent,\n embedding,\n entityMentions: oldMemory.entityMentions,\n confidence: oldMemory.confidence,\n documentDate: oldMemory.documentDate,\n eventDate: oldMemory.eventDate,\n validFrom: new Date(),\n version: oldMemory.version + 1,\n scope: oldMemory.scope,\n metadata: {\n ...oldMemory.metadata,\n updateReasoning: reasoning,\n },\n },\n });\n\n // Invalidate old memory\n await db.memory.update({\n where: { id: memoryId },\n data: {\n validUntil: new Date(),\n supersededBy: newMemory.id,\n },\n });\n\n // Create update relation\n await db.memoryRelation.create({\n data: {\n fromMemoryId: newMemory.id,\n toMemoryId: memoryId,\n relationType: \"updates\",\n confidence: 1.0,\n reasoning: reasoning || \"Manual update\",\n },\n });\n\n return {\n newMemoryId: newMemory.id,\n oldMemoryId: memoryId,\n };\n}\n"],"mappings":";;;;;;;;;;;AAMA,OAAO,eAAe;AAItB,IAAM,YAAY,IAAI,UAAU;AAAA,EAC9B,QAAQ,QAAQ,IAAI,qBAAqB;AAC3C,CAAC;AAED,IAAM,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4C1B,eAAsB,gBACpB,OACA,SAC4B;AAC5B,QAAM,aAAa,mBAAmB,OAAO;AAE7C,QAAM,SAAS,GAAG,iBAAiB;AAAA;AAAA,EAEnC,UAAU;AAAA;AAAA;AAAA,EAGV,KAAK;AAAA;AAAA,0CAEmC,QAAQ,aAAa,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAc1E,MAAI;AACF,UAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,MAC/C,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,aAAa;AAAA;AAAA,MACb,UAAU;AAAA,QACR;AAAA,UACE,MAAM;AAAA,UACN,SAAS;AAAA,QACX;AAAA,MACF;AAAA,IACF,CAAC;AAED,UAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,QAAI,CAAC,eAAe,YAAY,SAAS,QAAQ;AAC/C,YAAM,IAAI,MAAM,8BAA8B;AAAA,IAChD;AAEA,UAAM,OAAO,YAAY,KAAK,KAAK;AAGnC,UAAM,YAAY,KAAK,MAAM,4BAA4B,KAAK,KAAK,MAAM,aAAa;AACtF,UAAM,UAAU,YAAa,UAAU,CAAC,KAAK,UAAU,CAAC,IAAK;AAE7D,UAAM,cAAc,KAAK,MAAM,OAAO;AAEtC,QAAI,CAAC,MAAM,QAAQ,WAAW,GAAG;AAC/B,cAAQ,MAAM,oCAAoC,WAAW;AAC7D,aAAO,CAAC;AAAA,IACV;AAEA,WAAO,YAAY,IAAI,CAAC,OAAY;AAAA,MAClC,SAAS,EAAE;AAAA,MACX,YAAY,EAAE;AAAA,MACd,gBAAgB,EAAE,kBAAkB,CAAC;AAAA,MACrC,WAAW,EAAE,YAAY,IAAI,KAAK,EAAE,SAAS,IAAI;AAAA,MACjD,YAAY,EAAE,cAAc;AAAA,MAC5B,WAAW,EAAE;AAAA,IACf,EAAE;AAAA,EACJ,SAAS,OAAO;AACd,YAAQ,MAAM,6BAA6B,KAAK;AAChD,WAAO,CAAC;AAAA,EACV;AACF;AAwCA,SAAS,mBAAmB,SAAoC;AAC9D,QAAM,QAAQ,CAAC;AAEf,MAAI,QAAQ,oBAAoB,QAAQ,iBAAiB,SAAS,GAAG;AACnE,UAAM,KAAK,qCAAqC;AAChD,UAAM,KAAK,QAAQ,iBAAiB,MAAM,EAAE,EAAE,KAAK,IAAI,CAAC;AAAA,EAC1D;AAEA,MAAI,QAAQ,iBAAiB,QAAQ,cAAc,OAAO,GAAG;AAC3D,UAAM,KAAK,uBAAuB;AAClC,YAAQ,cAAc,QAAQ,CAAC,MAAM,YAAY;AAC/C,YAAM,KAAK,MAAM,OAAO,eAAe,IAAI,EAAE;AAAA,IAC/C,CAAC;AAAA,EACH;AAEA,MAAI,MAAM,WAAW,GAAG;AACtB,WAAO;AAAA,EACT;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAMO,SAAS,mBAAmB,gBAGV;AACvB,QAAM,YAAY,oBAAI,IAAoB;AAI1C,aAAW,UAAU,gBAAgB;AACnC,eAAW,UAAU,OAAO,gBAAgB;AAE1C,UAAI,kCAAkC,KAAK,MAAM,GAAG;AAElD,kBAAU,IAAI,MAAM,MAAM;AAC1B,kBAAU,IAAI,OAAO,MAAM;AAC3B,kBAAU,IAAI,QAAQ,MAAM;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAMO,SAAS,eAAe,QAAkC;AAE/D,MAAI,OAAO,aAAa,KAAK;AAC3B,WAAO;AAAA,EACT;AAGA,MAAI,OAAO,QAAQ,SAAS,IAAI;AAC9B,WAAO;AAAA,EACT;AAGA,QAAM,WAAW;AACjB,MAAI,SAAS,KAAK,OAAO,OAAO,GAAG;AAEjC,YAAQ,KAAK,wCAAwC,OAAO,OAAO;AACnE,WAAO;AAAA,EACT;AAGA,QAAM,YAAY;AAClB,MAAI,UAAU,KAAK,OAAO,OAAO,GAAG;AAClC,YAAQ,KAAK,qCAAqC,OAAO,OAAO;AAChE,WAAO;AAAA,EACT;AAEA,SAAO;AACT;;;ACjOA,eAAsB,cAAc,QAUP;AAC3B,QAAM,EAAE,WAAW,WAAW,OAAO,QAAQ,SAAS,IAAI;AAE1D,QAAM,SAA0B;AAAA,IAC9B,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,IAClB,qBAAqB;AAAA,IACrB,QAAQ,CAAC;AAAA,EACX;AAEA,MAAI,SAAS,WAAW,GAAG;AACzB,WAAO;AAAA,EACT;AAEA,MAAI;AAEF,UAAM,UAA6B;AAAA,MACjC;AAAA,MACA,QAAQ,UAAU;AAAA,MAClB;AAAA,MACA;AAAA,MACA,cAAc,SAAS,SAAS,SAAS,CAAC,EAAE;AAAA,MAC5C,kBAAkB,SAAS,MAAM,GAAG,EAAE,EAAE,IAAI,CAAC,MAAM,GAAG,EAAE,IAAI,KAAK,EAAE,OAAO,EAAE;AAAA,IAC9E;AAGA,UAAM,iBAAiB,MAAM,GAAG,OAAO,SAAS;AAAA,MAC9C,OAAO;AAAA,QACL;AAAA,QACA;AAAA,QACA,UAAU;AAAA,MACZ;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,MACb;AAAA,MACA,MAAM;AAAA,MACN,QAAQ;AAAA,QACN,SAAS;AAAA,QACT,gBAAgB;AAAA,MAClB;AAAA,IACF,CAAC;AAGD,YAAQ,gBAAgB,mBAAmB,cAAc;AAGzD,UAAM,gBAAgB,SAAS,SAAS,SAAS,CAAC,EAAE;AACpD,UAAM,oBAAoB,MAAM,gBAAgB,eAAe,OAAO;AAGtE,UAAM,gBAAgB,kBAAkB,OAAO,cAAc;AAE7D,QAAI,cAAc,WAAW,GAAG;AAC9B,aAAO;AAAA,IACT;AAGA,UAAM,mBAAmB,MAAM,GAAG,OAAO,SAAS;AAAA,MAChD,OAAO;AAAA,QACL;AAAA,QACA;AAAA,QACA,UAAU;AAAA,MACZ;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,MACb;AAAA,MACA,MAAM;AAAA;AAAA,MACN,QAAQ;AAAA,QACN,IAAI;AAAA,QACJ,SAAS;AAAA,QACT,YAAY;AAAA,QACZ,gBAAgB;AAAA,QAChB,cAAc;AAAA,MAChB;AAAA,IACF,CAAC;AAGD,eAAW,aAAa,eAAe;AACrC,UAAI;AAEF,cAAM,YAAY,MAAM;AAAA,UACtB;AAAA,YACE,SAAS,UAAU;AAAA,YACnB,YAAY,UAAU;AAAA,YACtB,gBAAgB,UAAU;AAAA,UAC5B;AAAA,UACA;AAAA,QACF;AAGA,cAAM,YAAY,UAAU,aAAa,MAAM;AAAA,UAC7C,UAAU;AAAA,UACV,QAAQ;AAAA,QACV;AAGA,cAAM,YAAY,MAAM,YAAY,UAAU,OAAO;AAGrD,cAAM,SAAS,MAAM,GAAG,OAAO,OAAO;AAAA,UACpC,MAAM;AAAA,YACJ;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA,YAAY,UAAU;AAAA,YACtB,SAAS,UAAU;AAAA,YACnB;AAAA,YACA,gBAAgB,UAAU;AAAA,YAC1B,YAAY,UAAU;AAAA,YACtB,cAAc,QAAQ;AAAA,YACtB;AAAA,YACA,WAAW,oBAAI,KAAK;AAAA,YACpB,UAAU;AAAA,cACR,WAAW,UAAU;AAAA,cACrB,eAAe;AAAA,YACjB;AAAA,UACF;AAAA,QACF,CAAC;AAED,eAAO;AAGP,mBAAW,YAAY,WAAW;AAChC,cAAI;AACF,kBAAM,GAAG,eAAe,OAAO;AAAA,cAC7B,MAAM;AAAA,gBACJ,cAAc,OAAO;AAAA,gBACrB,YAAY,SAAS;AAAA,gBACrB,cAAc,SAAS;AAAA,gBACvB,YAAY,SAAS;AAAA,gBACrB,WAAW,SAAS;AAAA,cACtB;AAAA,YACF,CAAC;AAED,mBAAO;AAGP,gBAAI,uBAAuB,SAAS,YAAY,GAAG;AACjD,oBAAM,GAAG,OAAO,OAAO;AAAA,gBACrB,OAAO,EAAE,IAAI,SAAS,WAAW;AAAA,gBACjC,MAAM;AAAA,kBACJ,YAAY,oBAAI,KAAK;AAAA,kBACrB,cAAc,OAAO;AAAA,gBACvB;AAAA,cACF,CAAC;AAGD,oBAAM,YAAY,MAAM,GAAG,OAAO,WAAW;AAAA,gBAC3C,OAAO,EAAE,IAAI,SAAS,WAAW;AAAA,gBACjC,QAAQ,EAAE,SAAS,KAAK;AAAA,cAC1B,CAAC;AAED,kBAAI,WAAW;AACb,sBAAM,GAAG,OAAO,OAAO;AAAA,kBACrB,OAAO,EAAE,IAAI,OAAO,GAAG;AAAA,kBACvB,MAAM,EAAE,SAAS,UAAU,UAAU,EAAE;AAAA,gBACzC,CAAC;AAAA,cACH;AAEA,qBAAO;AAAA,YACT;AAAA,UACF,SAAS,OAAO;AACd,mBAAO,OAAO,KAAK,8BAA8B,KAAK,EAAE;AAAA,UAC1D;AAAA,QACF;AAAA,MACF,SAAS,OAAO;AACd,eAAO,OAAO,KAAK,6BAA6B,KAAK,EAAE;AAAA,MACzD;AAAA,IACF;AAEA,WAAO;AAAA,EACT,SAAS,OAAO;AACd,WAAO,OAAO,KAAK,qBAAqB,KAAK,EAAE;AAC/C,WAAO;AAAA,EACT;AACF;AAMA,eAAsB,YAAY,QAOL;AAC3B,QAAM,EAAE,SAAS,cAAc,WAAW,OAAO,cAAc,SAAS,IAAI;AAE5E,QAAM,SAA0B;AAAA,IAC9B,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,IAClB,qBAAqB;AAAA,IACrB,QAAQ,CAAC;AAAA,EACX;AAEA,MAAI;AACF,UAAM,UAA6B;AAAA,MACjC,WAAW,SAAS,OAAO;AAAA,MAC3B,QAAQ;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,UAAM,oBAAoB,MAAM,gBAAgB,cAAc,OAAO;AACrE,UAAM,gBAAgB,kBAAkB,OAAO,cAAc;AAE7D,eAAW,aAAa,eAAe;AACrC,YAAM,YAAY,UAAU,aAAa,MAAM;AAAA,QAC7C,UAAU;AAAA,QACV;AAAA,MACF;AAEA,YAAM,YAAY,MAAM,YAAY,UAAU,OAAO;AAErD,YAAM,GAAG,OAAO,OAAO;AAAA,QACrB,MAAM;AAAA,UACJ;AAAA,UACA;AAAA,UACA,YAAY,UAAU;AAAA,UACtB,SAAS,UAAU;AAAA,UACnB;AAAA,UACA,gBAAgB,UAAU;AAAA,UAC1B,YAAY,UAAU;AAAA,UACtB;AAAA,UACA;AAAA,UACA,WAAW,oBAAI,KAAK;AAAA,UACpB,eAAe;AAAA,UACf,OAAO;AAAA;AAAA,UACP,UAAU;AAAA,YACR,GAAG;AAAA,YACH,WAAW,UAAU;AAAA,UACvB;AAAA,QACF;AAAA,MACF,CAAC;AAED,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT,SAAS,OAAO;AACd,WAAO,OAAO,KAAK,2BAA2B,KAAK,EAAE;AACrD,WAAO;AAAA,EACT;AACF;AAMA,eAAsB,kBAAkB,QASX;AAC3B,QAAM,EAAE,QAAQ,WAAW,OAAO,aAAa,IAAI;AAEnD,QAAM,kBAAmC;AAAA,IACvC,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,IAClB,qBAAqB;AAAA,IACrB,QAAQ,CAAC;AAAA,EACX;AAGA,QAAM,YAAY;AAElB,WAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK,WAAW;AACjD,UAAM,QAAQ,OAAO,MAAM,GAAG,IAAI,SAAS;AAE3C,UAAM,UAAU,MAAM,QAAQ;AAAA,MAC5B,MAAM;AAAA,QAAI,CAAC,UACT,YAAY;AAAA,UACV,SAAS,MAAM;AAAA,UACf,cAAc,MAAM;AAAA,UACpB;AAAA,UACA;AAAA,UACA;AAAA,UACA,UAAU,MAAM;AAAA,QAClB,CAAC;AAAA,MACH;AAAA,IACF;AAEA,eAAW,UAAU,SAAS;AAC5B,sBAAgB,mBAAmB,OAAO;AAC1C,sBAAgB,oBAAoB,OAAO;AAC3C,sBAAgB,uBAAuB,OAAO;AAC9C,sBAAgB,OAAO,KAAK,GAAG,OAAO,MAAM;AAAA,IAC9C;AAAA,EACF;AAEA,SAAO;AACT;AAMA,eAAsB,aAAa,QAIuB;AACxD,QAAM,EAAE,UAAU,YAAY,UAAU,IAAI;AAE5C,QAAM,YAAY,MAAM,GAAG,OAAO,WAAW;AAAA,IAC3C,OAAO,EAAE,IAAI,SAAS;AAAA,EACxB,CAAC;AAED,MAAI,CAAC,WAAW;AACd,UAAM,IAAI,MAAM,kBAAkB;AAAA,EACpC;AAGA,QAAM,YAAY,MAAM,YAAY,UAAU;AAE9C,QAAM,YAAY,MAAM,GAAG,OAAO,OAAO;AAAA,IACvC,MAAM;AAAA,MACJ,WAAW,UAAU;AAAA,MACrB,OAAO,UAAU;AAAA,MACjB,QAAQ,UAAU;AAAA,MAClB,WAAW,UAAU;AAAA,MACrB,YAAY,UAAU;AAAA,MACtB,SAAS;AAAA,MACT;AAAA,MACA,gBAAgB,UAAU;AAAA,MAC1B,YAAY,UAAU;AAAA,MACtB,cAAc,UAAU;AAAA,MACxB,WAAW,UAAU;AAAA,MACrB,WAAW,oBAAI,KAAK;AAAA,MACpB,SAAS,UAAU,UAAU;AAAA,MAC7B,OAAO,UAAU;AAAA,MACjB,UAAU;AAAA,QACR,GAAG,UAAU;AAAA,QACb,iBAAiB;AAAA,MACnB;AAAA,IACF;AAAA,EACF,CAAC;AAGD,QAAM,GAAG,OAAO,OAAO;AAAA,IACrB,OAAO,EAAE,IAAI,SAAS;AAAA,IACtB,MAAM;AAAA,MACJ,YAAY,oBAAI,KAAK;AAAA,MACrB,cAAc,UAAU;AAAA,IAC1B;AAAA,EACF,CAAC;AAGD,QAAM,GAAG,eAAe,OAAO;AAAA,IAC7B,MAAM;AAAA,MACJ,cAAc,UAAU;AAAA,MACxB,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,YAAY;AAAA,MACZ,WAAW,aAAa;AAAA,IAC1B;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL,aAAa,UAAU;AAAA,IACvB,aAAa;AAAA,EACf;AACF;","names":[]}
|