@usewhisper/mcp-server 0.3.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +182 -154
- package/dist/autosubscribe-6EDKPBE2.js +4068 -4068
- package/dist/autosubscribe-GHO6YR5A.js +4068 -4068
- package/dist/autosubscribe-ISDETQIB.js +435 -435
- package/dist/chunk-3WGYBAYR.js +8387 -8387
- package/dist/chunk-52VJYCZ7.js +455 -455
- package/dist/chunk-5KBZQHDL.js +189 -189
- package/dist/chunk-5KIJNY6Z.js +370 -370
- package/dist/chunk-7SN3CKDK.js +1076 -1076
- package/dist/chunk-B3VWOHUA.js +271 -271
- package/dist/chunk-C57DHKTL.js +459 -459
- package/dist/chunk-EI5CE3EY.js +616 -616
- package/dist/chunk-FTWUJBAH.js +386 -386
- package/dist/chunk-H3HSKH2P.js +4841 -4841
- package/dist/chunk-JO3ORBZD.js +616 -616
- package/dist/chunk-L6DXSM2U.js +456 -456
- package/dist/chunk-LMEYV4JD.js +368 -368
- package/dist/chunk-MEFLJ4PV.js +8385 -8385
- package/dist/chunk-OBLI4FE4.js +275 -275
- package/dist/chunk-PPGYJJED.js +271 -271
- package/dist/chunk-QGM4M3NI.js +37 -37
- package/dist/chunk-T7KMSTWP.js +399 -399
- package/dist/chunk-TWEIYHI6.js +399 -399
- package/dist/chunk-UYWE7HSU.js +368 -368
- package/dist/chunk-X2DL2GWT.js +32 -32
- package/dist/chunk-X7HNNNJJ.js +1079 -1079
- package/dist/consolidation-2GCKI4RE.js +220 -220
- package/dist/consolidation-4JOPW6BG.js +220 -220
- package/dist/consolidation-FOVQTWNQ.js +222 -222
- package/dist/consolidation-IFQ52E44.js +209 -209
- package/dist/context-sharing-4ITCNKG4.js +307 -307
- package/dist/context-sharing-6CCFIAKL.js +275 -275
- package/dist/context-sharing-GYKLXHZA.js +307 -307
- package/dist/context-sharing-PH64JTXS.js +308 -308
- package/dist/context-sharing-Y6LTZZOF.js +307 -307
- package/dist/cost-optimization-6OIKRSBV.js +195 -195
- package/dist/cost-optimization-7DVSTL6R.js +307 -307
- package/dist/cost-optimization-BH5NAX33.js +286 -286
- package/dist/cost-optimization-F3L5BS5F.js +303 -303
- package/dist/ingest-2LPTWUUM.js +16 -16
- package/dist/ingest-7T5FAZNC.js +15 -15
- package/dist/ingest-EBNIE7XB.js +15 -15
- package/dist/ingest-FSHT5BCS.js +15 -15
- package/dist/ingest-QE2BTV72.js +14 -14
- package/dist/oracle-3RLQF3DP.js +259 -259
- package/dist/oracle-FKRTQUUG.js +282 -282
- package/dist/oracle-J47QCSEW.js +263 -263
- package/dist/oracle-MDP5MZRC.js +256 -256
- package/dist/search-BLVHWLWC.js +14 -14
- package/dist/search-CZ5NYL5B.js +12 -12
- package/dist/search-EG6TYWWW.js +13 -13
- package/dist/search-I22QQA7T.js +13 -13
- package/dist/search-T7H5G6DW.js +13 -13
- package/dist/server.d.ts +2 -2
- package/dist/server.js +1973 -169
- package/dist/server.js.map +1 -1
- package/package.json +51 -51
|
@@ -1,220 +1,220 @@
|
|
|
1
|
-
import {
|
|
2
|
-
db,
|
|
3
|
-
embedSingle
|
|
4
|
-
} from "./chunk-3WGYBAYR.js";
|
|
5
|
-
import "./chunk-QGM4M3NI.js";
|
|
6
|
-
|
|
7
|
-
// ../src/engine/memory/consolidation.ts
|
|
8
|
-
import OpenAI from "openai";
|
|
9
|
-
var openai = new OpenAI({
|
|
10
|
-
apiKey: process.env.OPENAI_API_KEY || ""
|
|
11
|
-
});
|
|
12
|
-
async function findDuplicateMemories(params) {
|
|
13
|
-
const {
|
|
14
|
-
projectId,
|
|
15
|
-
userId,
|
|
16
|
-
similarityThreshold = 0.95,
|
|
17
|
-
limit = 50
|
|
18
|
-
} = params;
|
|
19
|
-
const maxMemories = Math.min(Math.max(limit, 10), 100);
|
|
20
|
-
const memories = await db.memory.findMany({
|
|
21
|
-
where: {
|
|
22
|
-
projectId,
|
|
23
|
-
userId,
|
|
24
|
-
isActive: true,
|
|
25
|
-
validUntil: null
|
|
26
|
-
},
|
|
27
|
-
orderBy: { importance: "desc" },
|
|
28
|
-
take: maxMemories
|
|
29
|
-
});
|
|
30
|
-
const clusters = [];
|
|
31
|
-
const processed = /* @__PURE__ */ new Set();
|
|
32
|
-
for (let i = 0; i < memories.length; i++) {
|
|
33
|
-
const memory = memories[i];
|
|
34
|
-
if (processed.has(memory.id)) continue;
|
|
35
|
-
const similar = [];
|
|
36
|
-
const candidates = memories.slice(i + 1);
|
|
37
|
-
const batchSimilarities = await calculateBatchSimilarity(memory.id, candidates.map((c) => c.id));
|
|
38
|
-
for (let j = 0; j < candidates.length; j++) {
|
|
39
|
-
const other = candidates[j];
|
|
40
|
-
if (processed.has(other.id)) continue;
|
|
41
|
-
const similarity = batchSimilarities[j];
|
|
42
|
-
if (similarity >= similarityThreshold) {
|
|
43
|
-
similar.push({ ...other, similarity });
|
|
44
|
-
processed.add(other.id);
|
|
45
|
-
}
|
|
46
|
-
}
|
|
47
|
-
if (similar.length > 0) {
|
|
48
|
-
clusters.push({
|
|
49
|
-
representative: memory,
|
|
50
|
-
duplicates: similar,
|
|
51
|
-
similarity: similar.reduce((sum, m) => sum + m.similarity, 0) / similar.length
|
|
52
|
-
});
|
|
53
|
-
processed.add(memory.id);
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
return clusters;
|
|
57
|
-
}
|
|
58
|
-
async function calculateBatchSimilarity(memoryId, otherIds) {
|
|
59
|
-
if (otherIds.length === 0) return [];
|
|
60
|
-
const placeholders = otherIds.map((_, i) => `(m1.embedding <=> $${i + 2}::vector)`).join(" + ");
|
|
61
|
-
const conditions = otherIds.map((id, i) => `m2.id = $${i + 2}`).join(" OR ");
|
|
62
|
-
const result = await db.$queryRaw`
|
|
63
|
-
SELECT
|
|
64
|
-
1 - (m1.embedding <=> m2.embedding) as similarity,
|
|
65
|
-
m2.id as id
|
|
66
|
-
FROM memories m1, memories m2
|
|
67
|
-
WHERE m1.id = ${memoryId} AND (${conditions})
|
|
68
|
-
`;
|
|
69
|
-
const similarityMap = new Map(result.map((r) => [r.id, r.similarity]));
|
|
70
|
-
return otherIds.map((id) => similarityMap.get(id) || 0);
|
|
71
|
-
}
|
|
72
|
-
async function mergeDuplicateMemories(cluster) {
|
|
73
|
-
const memories = [cluster.representative, ...cluster.duplicates];
|
|
74
|
-
const prompt = `You are merging duplicate memories into a single, comprehensive memory.
|
|
75
|
-
|
|
76
|
-
**Memories to merge:**
|
|
77
|
-
${memories.map(
|
|
78
|
-
(m, i) => `${i + 1}. "${m.content}" (confidence: ${m.confidence}, date: ${m.documentDate?.toISOString() || "unknown"})`
|
|
79
|
-
).join("\n")}
|
|
80
|
-
|
|
81
|
-
**Instructions:**
|
|
82
|
-
1. Combine all unique information from these memories
|
|
83
|
-
2. Resolve any contradictions by keeping the most recent or most confident information
|
|
84
|
-
3. Extract all unique entity mentions
|
|
85
|
-
4. Use the highest confidence score
|
|
86
|
-
5. Keep the most recent document date
|
|
87
|
-
|
|
88
|
-
Return JSON:
|
|
89
|
-
{
|
|
90
|
-
"merged_content": "comprehensive merged memory",
|
|
91
|
-
"entity_mentions": ["list", "of", "entities"],
|
|
92
|
-
"confidence": 0.0-1.0,
|
|
93
|
-
"reasoning": "brief explanation of how you merged"
|
|
94
|
-
}`;
|
|
95
|
-
const response = await openai.chat.completions.create({
|
|
96
|
-
model: "gpt-4o",
|
|
97
|
-
max_tokens: 2048,
|
|
98
|
-
temperature: 0,
|
|
99
|
-
messages: [{ role: "user", content: prompt }],
|
|
100
|
-
response_format: { type: "json_object" }
|
|
101
|
-
});
|
|
102
|
-
const text = response.choices[0]?.message?.content?.trim();
|
|
103
|
-
if (!text) {
|
|
104
|
-
throw new Error("Failed to merge memories");
|
|
105
|
-
}
|
|
106
|
-
const jsonMatch = text.match(/```json\n?([\s\S]*?)\n?```/) || text.match(/\{[\s\S]*\}/);
|
|
107
|
-
const jsonStr = jsonMatch ? jsonMatch[1] || jsonMatch[0] : text;
|
|
108
|
-
const result = JSON.parse(jsonStr);
|
|
109
|
-
const embedding = await embedSingle(result.merged_content);
|
|
110
|
-
const mergedMemory = await db.memory.create({
|
|
111
|
-
data: {
|
|
112
|
-
projectId: cluster.representative.projectId,
|
|
113
|
-
orgId: cluster.representative.orgId,
|
|
114
|
-
userId: cluster.representative.userId,
|
|
115
|
-
sessionId: cluster.representative.sessionId,
|
|
116
|
-
memoryType: cluster.representative.memoryType,
|
|
117
|
-
content: result.merged_content,
|
|
118
|
-
embedding,
|
|
119
|
-
entityMentions: result.entity_mentions || [],
|
|
120
|
-
confidence: result.confidence || cluster.representative.confidence,
|
|
121
|
-
documentDate: cluster.representative.documentDate,
|
|
122
|
-
eventDate: cluster.representative.eventDate,
|
|
123
|
-
validFrom: /* @__PURE__ */ new Date(),
|
|
124
|
-
importance: Math.max(...memories.map((m) => m.importance || 0.5)),
|
|
125
|
-
metadata: {
|
|
126
|
-
mergedFrom: memories.map((m) => m.id),
|
|
127
|
-
mergeReasoning: result.reasoning,
|
|
128
|
-
mergedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
});
|
|
132
|
-
for (const memory of memories) {
|
|
133
|
-
await db.memory.update({
|
|
134
|
-
where: { id: memory.id },
|
|
135
|
-
data: {
|
|
136
|
-
isActive: false,
|
|
137
|
-
validUntil: /* @__PURE__ */ new Date(),
|
|
138
|
-
supersededBy: mergedMemory.id
|
|
139
|
-
}
|
|
140
|
-
});
|
|
141
|
-
}
|
|
142
|
-
return mergedMemory.id;
|
|
143
|
-
}
|
|
144
|
-
async function consolidateMemories(params) {
|
|
145
|
-
const { projectId, userId, similarityThreshold = 0.95, dryRun = false } = params;
|
|
146
|
-
console.log(`\u{1F50D} Finding duplicate memories in project ${projectId}...`);
|
|
147
|
-
const clusters = await findDuplicateMemories({
|
|
148
|
-
projectId,
|
|
149
|
-
userId,
|
|
150
|
-
similarityThreshold
|
|
151
|
-
});
|
|
152
|
-
console.log(`\u{1F4CA} Found ${clusters.length} memory clusters`);
|
|
153
|
-
if (dryRun) {
|
|
154
|
-
for (const cluster of clusters) {
|
|
155
|
-
console.log(`
|
|
156
|
-
Cluster (similarity: ${cluster.similarity.toFixed(2)}):`);
|
|
157
|
-
console.log(` Representative: "${cluster.representative.content}"`);
|
|
158
|
-
console.log(` Duplicates: ${cluster.duplicates.length}`);
|
|
159
|
-
cluster.duplicates.forEach((d) => {
|
|
160
|
-
console.log(` - "${d.content}"`);
|
|
161
|
-
});
|
|
162
|
-
}
|
|
163
|
-
return {
|
|
164
|
-
clustersFound: clusters.length,
|
|
165
|
-
memoriesMerged: 0,
|
|
166
|
-
memoriesDeactivated: 0
|
|
167
|
-
};
|
|
168
|
-
}
|
|
169
|
-
let memoriesMerged = 0;
|
|
170
|
-
let memoriesDeactivated = 0;
|
|
171
|
-
for (const cluster of clusters) {
|
|
172
|
-
try {
|
|
173
|
-
console.log(`\u{1F517} Merging cluster with ${cluster.duplicates.length + 1} memories...`);
|
|
174
|
-
await mergeDuplicateMemories(cluster);
|
|
175
|
-
memoriesMerged++;
|
|
176
|
-
memoriesDeactivated += cluster.duplicates.length + 1;
|
|
177
|
-
console.log(`\u2705 Merged successfully`);
|
|
178
|
-
} catch (error) {
|
|
179
|
-
console.error(`\u274C Failed to merge cluster:`, error);
|
|
180
|
-
}
|
|
181
|
-
}
|
|
182
|
-
console.log(
|
|
183
|
-
`
|
|
184
|
-
\u2705 Consolidation complete: ${memoriesMerged} clusters merged, ${memoriesDeactivated} memories deactivated`
|
|
185
|
-
);
|
|
186
|
-
return {
|
|
187
|
-
clustersFound: clusters.length,
|
|
188
|
-
memoriesMerged,
|
|
189
|
-
memoriesDeactivated
|
|
190
|
-
};
|
|
191
|
-
}
|
|
192
|
-
async function scheduledConsolidation(orgId) {
|
|
193
|
-
console.log(`\u{1F504} Running scheduled consolidation for org ${orgId}...`);
|
|
194
|
-
const projects = await db.project.findMany({
|
|
195
|
-
where: { orgId }
|
|
196
|
-
});
|
|
197
|
-
for (const project of projects) {
|
|
198
|
-
try {
|
|
199
|
-
const result = await consolidateMemories({
|
|
200
|
-
projectId: project.id,
|
|
201
|
-
similarityThreshold: 0.92
|
|
202
|
-
// Slightly lower for scheduled runs
|
|
203
|
-
});
|
|
204
|
-
if (result.memoriesMerged > 0) {
|
|
205
|
-
console.log(
|
|
206
|
-
`\u{1F4CA} Project ${project.name}: merged ${result.memoriesMerged} clusters`
|
|
207
|
-
);
|
|
208
|
-
}
|
|
209
|
-
} catch (error) {
|
|
210
|
-
console.error(`Failed to consolidate project ${project.name}:`, error);
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
console.log("\u2705 Scheduled consolidation complete");
|
|
214
|
-
}
|
|
215
|
-
export {
|
|
216
|
-
consolidateMemories,
|
|
217
|
-
findDuplicateMemories,
|
|
218
|
-
mergeDuplicateMemories,
|
|
219
|
-
scheduledConsolidation
|
|
220
|
-
};
|
|
1
|
+
import {
|
|
2
|
+
db,
|
|
3
|
+
embedSingle
|
|
4
|
+
} from "./chunk-3WGYBAYR.js";
|
|
5
|
+
import "./chunk-QGM4M3NI.js";
|
|
6
|
+
|
|
7
|
+
// ../src/engine/memory/consolidation.ts
|
|
8
|
+
import OpenAI from "openai";
|
|
9
|
+
var openai = new OpenAI({
|
|
10
|
+
apiKey: process.env.OPENAI_API_KEY || ""
|
|
11
|
+
});
|
|
12
|
+
async function findDuplicateMemories(params) {
|
|
13
|
+
const {
|
|
14
|
+
projectId,
|
|
15
|
+
userId,
|
|
16
|
+
similarityThreshold = 0.95,
|
|
17
|
+
limit = 50
|
|
18
|
+
} = params;
|
|
19
|
+
const maxMemories = Math.min(Math.max(limit, 10), 100);
|
|
20
|
+
const memories = await db.memory.findMany({
|
|
21
|
+
where: {
|
|
22
|
+
projectId,
|
|
23
|
+
userId,
|
|
24
|
+
isActive: true,
|
|
25
|
+
validUntil: null
|
|
26
|
+
},
|
|
27
|
+
orderBy: { importance: "desc" },
|
|
28
|
+
take: maxMemories
|
|
29
|
+
});
|
|
30
|
+
const clusters = [];
|
|
31
|
+
const processed = /* @__PURE__ */ new Set();
|
|
32
|
+
for (let i = 0; i < memories.length; i++) {
|
|
33
|
+
const memory = memories[i];
|
|
34
|
+
if (processed.has(memory.id)) continue;
|
|
35
|
+
const similar = [];
|
|
36
|
+
const candidates = memories.slice(i + 1);
|
|
37
|
+
const batchSimilarities = await calculateBatchSimilarity(memory.id, candidates.map((c) => c.id));
|
|
38
|
+
for (let j = 0; j < candidates.length; j++) {
|
|
39
|
+
const other = candidates[j];
|
|
40
|
+
if (processed.has(other.id)) continue;
|
|
41
|
+
const similarity = batchSimilarities[j];
|
|
42
|
+
if (similarity >= similarityThreshold) {
|
|
43
|
+
similar.push({ ...other, similarity });
|
|
44
|
+
processed.add(other.id);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (similar.length > 0) {
|
|
48
|
+
clusters.push({
|
|
49
|
+
representative: memory,
|
|
50
|
+
duplicates: similar,
|
|
51
|
+
similarity: similar.reduce((sum, m) => sum + m.similarity, 0) / similar.length
|
|
52
|
+
});
|
|
53
|
+
processed.add(memory.id);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
return clusters;
|
|
57
|
+
}
|
|
58
|
+
async function calculateBatchSimilarity(memoryId, otherIds) {
|
|
59
|
+
if (otherIds.length === 0) return [];
|
|
60
|
+
const placeholders = otherIds.map((_, i) => `(m1.embedding <=> $${i + 2}::vector)`).join(" + ");
|
|
61
|
+
const conditions = otherIds.map((id, i) => `m2.id = $${i + 2}`).join(" OR ");
|
|
62
|
+
const result = await db.$queryRaw`
|
|
63
|
+
SELECT
|
|
64
|
+
1 - (m1.embedding <=> m2.embedding) as similarity,
|
|
65
|
+
m2.id as id
|
|
66
|
+
FROM memories m1, memories m2
|
|
67
|
+
WHERE m1.id = ${memoryId} AND (${conditions})
|
|
68
|
+
`;
|
|
69
|
+
const similarityMap = new Map(result.map((r) => [r.id, r.similarity]));
|
|
70
|
+
return otherIds.map((id) => similarityMap.get(id) || 0);
|
|
71
|
+
}
|
|
72
|
+
async function mergeDuplicateMemories(cluster) {
|
|
73
|
+
const memories = [cluster.representative, ...cluster.duplicates];
|
|
74
|
+
const prompt = `You are merging duplicate memories into a single, comprehensive memory.
|
|
75
|
+
|
|
76
|
+
**Memories to merge:**
|
|
77
|
+
${memories.map(
|
|
78
|
+
(m, i) => `${i + 1}. "${m.content}" (confidence: ${m.confidence}, date: ${m.documentDate?.toISOString() || "unknown"})`
|
|
79
|
+
).join("\n")}
|
|
80
|
+
|
|
81
|
+
**Instructions:**
|
|
82
|
+
1. Combine all unique information from these memories
|
|
83
|
+
2. Resolve any contradictions by keeping the most recent or most confident information
|
|
84
|
+
3. Extract all unique entity mentions
|
|
85
|
+
4. Use the highest confidence score
|
|
86
|
+
5. Keep the most recent document date
|
|
87
|
+
|
|
88
|
+
Return JSON:
|
|
89
|
+
{
|
|
90
|
+
"merged_content": "comprehensive merged memory",
|
|
91
|
+
"entity_mentions": ["list", "of", "entities"],
|
|
92
|
+
"confidence": 0.0-1.0,
|
|
93
|
+
"reasoning": "brief explanation of how you merged"
|
|
94
|
+
}`;
|
|
95
|
+
const response = await openai.chat.completions.create({
|
|
96
|
+
model: "gpt-4o",
|
|
97
|
+
max_tokens: 2048,
|
|
98
|
+
temperature: 0,
|
|
99
|
+
messages: [{ role: "user", content: prompt }],
|
|
100
|
+
response_format: { type: "json_object" }
|
|
101
|
+
});
|
|
102
|
+
const text = response.choices[0]?.message?.content?.trim();
|
|
103
|
+
if (!text) {
|
|
104
|
+
throw new Error("Failed to merge memories");
|
|
105
|
+
}
|
|
106
|
+
const jsonMatch = text.match(/```json\n?([\s\S]*?)\n?```/) || text.match(/\{[\s\S]*\}/);
|
|
107
|
+
const jsonStr = jsonMatch ? jsonMatch[1] || jsonMatch[0] : text;
|
|
108
|
+
const result = JSON.parse(jsonStr);
|
|
109
|
+
const embedding = await embedSingle(result.merged_content);
|
|
110
|
+
const mergedMemory = await db.memory.create({
|
|
111
|
+
data: {
|
|
112
|
+
projectId: cluster.representative.projectId,
|
|
113
|
+
orgId: cluster.representative.orgId,
|
|
114
|
+
userId: cluster.representative.userId,
|
|
115
|
+
sessionId: cluster.representative.sessionId,
|
|
116
|
+
memoryType: cluster.representative.memoryType,
|
|
117
|
+
content: result.merged_content,
|
|
118
|
+
embedding,
|
|
119
|
+
entityMentions: result.entity_mentions || [],
|
|
120
|
+
confidence: result.confidence || cluster.representative.confidence,
|
|
121
|
+
documentDate: cluster.representative.documentDate,
|
|
122
|
+
eventDate: cluster.representative.eventDate,
|
|
123
|
+
validFrom: /* @__PURE__ */ new Date(),
|
|
124
|
+
importance: Math.max(...memories.map((m) => m.importance || 0.5)),
|
|
125
|
+
metadata: {
|
|
126
|
+
mergedFrom: memories.map((m) => m.id),
|
|
127
|
+
mergeReasoning: result.reasoning,
|
|
128
|
+
mergedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
});
|
|
132
|
+
for (const memory of memories) {
|
|
133
|
+
await db.memory.update({
|
|
134
|
+
where: { id: memory.id },
|
|
135
|
+
data: {
|
|
136
|
+
isActive: false,
|
|
137
|
+
validUntil: /* @__PURE__ */ new Date(),
|
|
138
|
+
supersededBy: mergedMemory.id
|
|
139
|
+
}
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
return mergedMemory.id;
|
|
143
|
+
}
|
|
144
|
+
async function consolidateMemories(params) {
|
|
145
|
+
const { projectId, userId, similarityThreshold = 0.95, dryRun = false } = params;
|
|
146
|
+
console.log(`\u{1F50D} Finding duplicate memories in project ${projectId}...`);
|
|
147
|
+
const clusters = await findDuplicateMemories({
|
|
148
|
+
projectId,
|
|
149
|
+
userId,
|
|
150
|
+
similarityThreshold
|
|
151
|
+
});
|
|
152
|
+
console.log(`\u{1F4CA} Found ${clusters.length} memory clusters`);
|
|
153
|
+
if (dryRun) {
|
|
154
|
+
for (const cluster of clusters) {
|
|
155
|
+
console.log(`
|
|
156
|
+
Cluster (similarity: ${cluster.similarity.toFixed(2)}):`);
|
|
157
|
+
console.log(` Representative: "${cluster.representative.content}"`);
|
|
158
|
+
console.log(` Duplicates: ${cluster.duplicates.length}`);
|
|
159
|
+
cluster.duplicates.forEach((d) => {
|
|
160
|
+
console.log(` - "${d.content}"`);
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
return {
|
|
164
|
+
clustersFound: clusters.length,
|
|
165
|
+
memoriesMerged: 0,
|
|
166
|
+
memoriesDeactivated: 0
|
|
167
|
+
};
|
|
168
|
+
}
|
|
169
|
+
let memoriesMerged = 0;
|
|
170
|
+
let memoriesDeactivated = 0;
|
|
171
|
+
for (const cluster of clusters) {
|
|
172
|
+
try {
|
|
173
|
+
console.log(`\u{1F517} Merging cluster with ${cluster.duplicates.length + 1} memories...`);
|
|
174
|
+
await mergeDuplicateMemories(cluster);
|
|
175
|
+
memoriesMerged++;
|
|
176
|
+
memoriesDeactivated += cluster.duplicates.length + 1;
|
|
177
|
+
console.log(`\u2705 Merged successfully`);
|
|
178
|
+
} catch (error) {
|
|
179
|
+
console.error(`\u274C Failed to merge cluster:`, error);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
console.log(
|
|
183
|
+
`
|
|
184
|
+
\u2705 Consolidation complete: ${memoriesMerged} clusters merged, ${memoriesDeactivated} memories deactivated`
|
|
185
|
+
);
|
|
186
|
+
return {
|
|
187
|
+
clustersFound: clusters.length,
|
|
188
|
+
memoriesMerged,
|
|
189
|
+
memoriesDeactivated
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
async function scheduledConsolidation(orgId) {
|
|
193
|
+
console.log(`\u{1F504} Running scheduled consolidation for org ${orgId}...`);
|
|
194
|
+
const projects = await db.project.findMany({
|
|
195
|
+
where: { orgId }
|
|
196
|
+
});
|
|
197
|
+
for (const project of projects) {
|
|
198
|
+
try {
|
|
199
|
+
const result = await consolidateMemories({
|
|
200
|
+
projectId: project.id,
|
|
201
|
+
similarityThreshold: 0.92
|
|
202
|
+
// Slightly lower for scheduled runs
|
|
203
|
+
});
|
|
204
|
+
if (result.memoriesMerged > 0) {
|
|
205
|
+
console.log(
|
|
206
|
+
`\u{1F4CA} Project ${project.name}: merged ${result.memoriesMerged} clusters`
|
|
207
|
+
);
|
|
208
|
+
}
|
|
209
|
+
} catch (error) {
|
|
210
|
+
console.error(`Failed to consolidate project ${project.name}:`, error);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
console.log("\u2705 Scheduled consolidation complete");
|
|
214
|
+
}
|
|
215
|
+
export {
|
|
216
|
+
consolidateMemories,
|
|
217
|
+
findDuplicateMemories,
|
|
218
|
+
mergeDuplicateMemories,
|
|
219
|
+
scheduledConsolidation
|
|
220
|
+
};
|