memories-lite 0.10.1 → 0.99.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,16 +2,22 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.DEFAULT_MEMORY_CONFIG = void 0;
4
4
  const DEFAULT_SCORING_CONFIG = {
5
- // Focused on user preferences with AI - removed episodic and procedural memories
6
- todo: { alpha: 0.40, beta: 0.50, gamma: 0.10, halfLifeDays: 40 }, // ~40 days
7
- factual: { alpha: 0.70, beta: 0.20, gamma: 0.10, halfLifeDays: 150 }, // ~150 days
5
+ //
6
+ // assistant_preference: préférences utilisateur avec l'assistant (style, langue, etc.)
8
7
  assistant_preference: { alpha: 0.60, beta: 0.05, gamma: 0.35, halfLifeDays: Infinity },
9
- default: { alpha: 0.5, beta: 0.3, gamma: 0.1, halfLifeDays: 30 } // Fallback default
8
+ //
9
+ // discussion: mémoires de discussions (synthèses opérationnelles)
10
+ // - alpha=1: score basé uniquement sur la similarité cosinus
11
+ // - beta=0: pas de recency
12
+ // - gamma=0: pas d'importance base
13
+ discussion: { alpha: 1, beta: 0, gamma: 0, halfLifeDays: Infinity },
14
+ //
15
+ // default: fallback si type manquant ou inconnu
16
+ default: { alpha: 0.5, beta: 0.3, gamma: 0.1, halfLifeDays: 30 }
10
17
  };
11
18
  exports.DEFAULT_MEMORY_CONFIG = {
12
19
  disableHistory: true,
13
- enableGraph: false,
14
- version: "v1.1",
20
+ version: "v2.0",
15
21
  embedder: {
16
22
  provider: "openai",
17
23
  config: {
@@ -26,6 +32,7 @@ exports.DEFAULT_MEMORY_CONFIG = {
26
32
  collectionName: "memories",
27
33
  dimension: 768,
28
34
  scoring: DEFAULT_SCORING_CONFIG,
35
+ searchThreshold: 0.50, // Seuil minimum de score pour retourner un résultat
29
36
  },
30
37
  },
31
38
  llm: {
@@ -36,20 +43,6 @@ exports.DEFAULT_MEMORY_CONFIG = {
36
43
  modelProperties: undefined,
37
44
  },
38
45
  },
39
- graphStore: {
40
- provider: "neo4j",
41
- config: {
42
- url: process.env.NEO4J_URL || "neo4j://localhost:7687",
43
- username: process.env.NEO4J_USERNAME || "neo4j",
44
- password: process.env.NEO4J_PASSWORD || "password",
45
- },
46
- llm: {
47
- provider: "openai",
48
- config: {
49
- model: "gpt-5-mini",
50
- },
51
- },
52
- },
53
46
  historyStore: {
54
47
  provider: "dummy",
55
48
  config: {
@@ -47,9 +47,8 @@ class ConfigManager {
47
47
  dimension: userConf.dimension || defaultConf.dimension,
48
48
  // Merge scoring deeply if present in userConf, otherwise use default
49
49
  scoring: userConf.scoring ? {
50
- todo: { ...defaultConf.scoring?.todo, ...userConf.scoring.todo },
51
- factual: { ...defaultConf.scoring?.factual, ...userConf.scoring.factual },
52
50
  assistant_preference: { ...defaultConf.scoring?.assistant_preference, ...userConf.scoring.assistant_preference },
51
+ discussion: { ...defaultConf.scoring?.discussion, ...userConf.scoring.discussion },
53
52
  default: { ...defaultConf.scoring?.default, ...userConf.scoring.default },
54
53
  } : defaultConf.scoring,
55
54
  ...userConf, // Include any other passthrough fields from user
@@ -63,9 +62,8 @@ class ConfigManager {
63
62
  client: undefined,
64
63
  // Merge scoring deeply if present in userConf, otherwise use default
65
64
  scoring: userConf?.scoring ? {
66
- todo: { ...defaultConf.scoring?.todo, ...userConf.scoring.todo },
67
- factual: { ...defaultConf.scoring?.factual, ...userConf.scoring.factual },
68
65
  assistant_preference: { ...defaultConf.scoring?.assistant_preference, ...userConf.scoring.assistant_preference },
66
+ discussion: { ...defaultConf.scoring?.discussion, ...userConf.scoring.discussion },
69
67
  default: { ...defaultConf.scoring?.default, ...userConf.scoring.default },
70
68
  } : defaultConf.scoring,
71
69
  recencyCleanupThreshold: userConf?.recencyCleanupThreshold ?? defaultConf.recencyCleanupThreshold, // Merge cleanup threshold
@@ -98,17 +96,12 @@ class ConfigManager {
98
96
  })(),
99
97
  },
100
98
  historyDbPath: userConfig.historyDbPath || defaults_1.DEFAULT_MEMORY_CONFIG.historyDbPath,
101
- customPrompt: userConfig.customPrompt,
102
- graphStore: {
103
- ...defaults_1.DEFAULT_MEMORY_CONFIG.graphStore,
104
- ...userConfig.graphStore,
105
- },
99
+ capturePrompt: userConfig.capturePrompt,
106
100
  historyStore: {
107
101
  ...defaults_1.DEFAULT_MEMORY_CONFIG.historyStore,
108
102
  ...userConfig.historyStore,
109
103
  },
110
104
  disableHistory: userConfig.disableHistory || defaults_1.DEFAULT_MEMORY_CONFIG.disableHistory,
111
- enableGraph: userConfig.enableGraph || defaults_1.DEFAULT_MEMORY_CONFIG.enableGraph,
112
105
  };
113
106
  // Validate the merged config
114
107
  return types_1.MemoryConfigSchema.parse(mergedConfig);
@@ -3,24 +3,33 @@ import { VectorStore } from "../vectorstores/base";
3
3
  import { AddMemoryOptions, SearchMemoryOptions, DeleteAllMemoryOptions, GetAllMemoryOptions } from "./memory.types";
4
4
  export declare class MemoriesLite {
5
5
  private config;
6
- private customPrompt;
6
+ private capturePrompt;
7
7
  private embedder;
8
8
  private vectorStoreConfig;
9
9
  private llm;
10
10
  private db;
11
11
  private collectionName;
12
12
  private apiVersion;
13
- private graphMemory?;
14
- private enableGraph;
15
13
  telemetryId: string;
16
14
  constructor(config?: Partial<MemoryConfig>);
17
15
  private _initializeTelemetry;
18
16
  private _getTelemetryId;
19
17
  private _captureEvent;
20
18
  private $t;
19
+ /**
20
+ * Capture une discussion et génère une synthèse (title + summary)
21
+ * Utilise getDiscussionSynthesisMessages pour produire la synthèse
22
+ */
21
23
  private addToVectorStore;
22
24
  static fromConfig(configDict: Record<string, any>): MemoriesLite;
23
25
  getVectorStore(userId: string): Promise<VectorStore>;
26
+ /**
27
+ * Capture une discussion et génère une mémoire (title + summary)
28
+ *
29
+ * @param messages - Messages de la discussion ou texte brut
30
+ * @param userId - ID utilisateur
31
+ * @param config - Options incluant capturePrompt pour personnaliser la synthèse
32
+ */
24
33
  capture(messages: string | Message[], userId: string, config: AddMemoryOptions): Promise<SearchResult>;
25
34
  get(memoryId: string, userId: string): Promise<MemoryItem | null>;
26
35
  retrieve(query: string, userId: string, config: SearchMemoryOptions): Promise<SearchResult>;
@@ -16,7 +16,7 @@ class MemoriesLite {
16
16
  constructor(config = {}) {
17
17
  // Merge and validate config
18
18
  this.config = manager_1.ConfigManager.mergeConfig(config);
19
- this.customPrompt = this.config.customPrompt;
19
+ this.capturePrompt = this.config.capturePrompt;
20
20
  this.embedder = factory_1.EmbedderFactory.create(this.config.embedder.provider, this.config.embedder.config);
21
21
  //
22
22
  // vectorStore.provider is "lite"
@@ -39,12 +39,7 @@ class MemoriesLite {
39
39
  }
40
40
  this.collectionName = this.config.vectorStore.config.collectionName;
41
41
  this.apiVersion = this.config.version || "v1.0";
42
- this.enableGraph = this.config.enableGraph || false;
43
42
  this.telemetryId = "anonymous";
44
- // Initialize graph memory if configured
45
- if (this.enableGraph && this.config.graphStore) {
46
- // this.graphMemory = new MemoryGraph(this.config);
47
- }
48
43
  // Initialize telemetry if vector store is initialized
49
44
  // this._initializeTelemetry();
50
45
  }
@@ -56,7 +51,6 @@ class MemoriesLite {
56
51
  api_version: this.apiVersion,
57
52
  client_type: "Memory",
58
53
  collection_name: this.collectionName,
59
- enable_graph: this.enableGraph,
60
54
  });
61
55
  }
62
56
  catch (error) { }
@@ -93,133 +87,68 @@ class MemoriesLite {
93
87
  // return text.replace(/<thinking>[\s\S]*?(?:<\/thinking>)/g,'').replace(/^<step.*$/g,'');
94
88
  return text.replace(/<thinking>[\s\S]*?<\/thinking>/g, '').replace(/^<step.*$/gm, '').replace(/<memories>[\s\S]*?<\/memories>/g, '');
95
89
  }
96
- async addToVectorStore(messages, metadata, userId, filters, customFacts) {
90
+ /**
91
+ * Capture une discussion et génère une synthèse (title + summary)
92
+ * Utilise getDiscussionSynthesisMessages pour produire la synthèse
93
+ */
94
+ async addToVectorStore(messages, metadata, userId, filters, capturePrompt) {
97
95
  const $t = this.$t;
98
96
  const vectorStore = await this.getVectorStore(userId);
99
- const parsedMessages = messages.filter((m) => typeof m.content === 'string' && m.role == 'user').map((m) => `${m.role == 'user' ? '**USER**: ' : '**ASSISTANT**: '}${$t(m.content)}\n`).join("\n");
100
- // Disinterest handling is delegated to the LLM via prompt guidelines
101
- const [systemPrompt, userPrompt] = (0, prompts_1.getFactRetrievalMessages)(parsedMessages, customFacts || this.customPrompt);
97
+ //
98
+ // Formater les messages pour la synthèse
99
+ const parsedMessages = messages
100
+ .filter((m) => typeof m.content === 'string')
101
+ .map((m) => `**${m.role.toUpperCase()}**: ${$t(m.content)}`)
102
+ .join("\n\n");
103
+ //
104
+ // Générer la synthèse via LLM
105
+ const [systemPrompt, userPrompt] = (0, prompts_1.getDiscussionSynthesisMessages)(parsedMessages, capturePrompt || this.capturePrompt);
102
106
  const response = await this.llm.generateResponse([
103
107
  { role: "system", content: systemPrompt },
104
108
  { role: "user", content: userPrompt },
105
- ], { ...(0, zod_1.zodResponseFormat)(prompts_1.FactRetrievalSchema_extended, "FactRetrieval") }, [], false);
106
- const parsedResponse = (response) => {
109
+ ], { ...(0, zod_1.zodResponseFormat)(prompts_1.DiscussionSynthesisSchema, "DiscussionSynthesis") }, [], false);
110
+ //
111
+ // Parser la réponse
112
+ const parsedResponse = (res) => {
107
113
  try {
108
- // structured output
109
- if (typeof response === 'object') {
110
- return response;
114
+ if (typeof res === 'object') {
115
+ return res;
111
116
  }
112
- const cleanResponse = (0, prompts_1.removeCodeBlocks)(response);
117
+ const cleanResponse = (0, prompts_1.removeCodeBlocks)(res);
113
118
  return JSON.parse(cleanResponse);
114
119
  }
115
120
  catch (e) {
116
- console.error("Failed to parse facts from LLM response:", response, e, response);
117
- return [];
121
+ console.error("Failed to parse synthesis from LLM response:", res, e);
122
+ return { title: "Sans titre", summary: "" };
118
123
  }
119
124
  };
120
- //
121
- // can use native structured output
122
- // Drop factual facts at capture level (do not store factual memories)
123
- // FIXME Drop factual should be done at prompt level
124
- const facts = parsedResponse(response).facts?.filter((f) => !f.existing) || [];
125
- // console.log("-- DBG extract:", userPrompt);
126
- // console.log("-- DBG facts:", facts);
127
- // Get embeddings for new facts
128
- const newMessageEmbeddings = {};
129
- const retrievedOldMemory = [];
130
- //
131
- // add the userId to the filters
132
- filters.userId = userId;
133
- // Create embeddings and search for similar memories
134
- for (const elem of facts) {
135
- const fact = elem.fact;
136
- const embedding = await this.embedder.embed(fact);
137
- newMessageEmbeddings[fact] = embedding;
138
- const existingMemories = await vectorStore.search(embedding, 5, filters);
139
- for (const mem of existingMemories) {
140
- retrievedOldMemory.push({ id: mem.id, text: mem.payload.data, type: mem.payload.type });
141
- }
125
+ const { title, summary } = parsedResponse(response);
126
+ if (!summary) {
127
+ console.warn("-- ⚠️ Empty summary from LLM, skipping memory creation");
128
+ return [];
142
129
  }
143
- // console.log("-- DBG old memories:", retrievedOldMemory);
144
- // Remove duplicates from old memories
145
- const uniqueOldMemories = retrievedOldMemory.filter((mem, index) => retrievedOldMemory.findIndex((m) => m.id === mem.id) === index);
146
- // Create UUID mapping for handling UUID hallucinations
147
- const tempUuidMapping = {};
148
- uniqueOldMemories.forEach((item, idx) => {
149
- tempUuidMapping[String(idx)] = item.id;
150
- uniqueOldMemories[idx].id = String(idx);
151
- });
152
- // Get memory update decisions
153
- const lastUserMessage = [...messages].reverse().find(m => m.role === 'user');
154
- const userInstruction = typeof lastUserMessage?.content === 'string' ? lastUserMessage?.content : '';
155
- const updatePrompt = (0, prompts_1.getUpdateMemoryMessages)(uniqueOldMemories, facts, 'French', userInstruction);
156
- // console.log("-- DBG updatePrompt:", updatePrompt);
157
- const updateResponse = await this.llm.generateResponse([{ role: "user", content: updatePrompt }], { ...(0, zod_1.zodResponseFormat)(prompts_1.MemoryUpdateSchema, "Memory") }, [], false);
158
- // console.log("-- DBG merge:", updatePrompt);
159
- const memoryActions = parsedResponse(updateResponse).memory || [];
160
- // Process memory actions
161
- const results = [];
162
- for (const action of memoryActions) {
163
- // Ignore any factual memory actions (ADD/UPDATE/DELETE) → void
164
- if (action.type === 'factual') {
165
- continue;
166
- }
167
- if (action.reason === "undefined") {
168
- console.log(`-- ⛔ LLM Error: ${action.event}, ${action.type}, "${action.text}"`);
169
- continue;
170
- }
171
- console.log(`-- DBG memory "${userId}": ${action.event}, ${action.type}, "${action.text}", why: "${action.reason}"`);
172
- try {
173
- switch (action.event) {
174
- case "ADD": {
175
- if (!action.type) {
176
- // log error
177
- console.error("Type is mandatory to manage memories:", action);
178
- continue;
179
- }
180
- metadata.type = action.type;
181
- const memoryId = await this.createMemory(action.text, newMessageEmbeddings, metadata, userId);
182
- results.push({
183
- id: memoryId,
184
- memory: action.text,
185
- type: action.type,
186
- metadata: { event: action.event },
187
- });
188
- break;
189
- }
190
- case "UPDATE": {
191
- const realMemoryId = tempUuidMapping[action.id];
192
- const type = metadata.type = uniqueOldMemories[action.id].type || action.type;
193
- await this.updateMemory(realMemoryId, action.text, newMessageEmbeddings, metadata, userId);
194
- results.push({
195
- id: realMemoryId,
196
- memory: action.text,
197
- type,
198
- metadata: {
199
- event: action.event,
200
- previousMemory: action.old_memory,
201
- },
202
- });
203
- break;
204
- }
205
- case "DELETE": {
206
- const realMemoryId = tempUuidMapping[action.id];
207
- await this.deleteMemory(realMemoryId, userId);
208
- results.push({
209
- id: realMemoryId,
210
- memory: action.text,
211
- type: action.type,
212
- metadata: { event: action.event },
213
- });
214
- break;
215
- }
216
- }
217
- }
218
- catch (error) {
219
- console.error(`Error processing memory action: ${error}`);
220
- }
221
- }
222
- return results;
130
+ //
131
+ // Créer l'embedding sur le title seul (TEST COMPARATIF)
132
+ const embedding = await this.embedder.embed(title);
133
+ //
134
+ // Préparer les métadonnées
135
+ const memoryType = metadata.type || 'discussion';
136
+ const memoryMetadata = {
137
+ ...metadata,
138
+ title,
139
+ type: memoryType,
140
+ userId,
141
+ };
142
+ //
143
+ // Stocker la mémoire
144
+ const memoryId = await this.createMemory(summary, { [summary]: embedding }, memoryMetadata, userId);
145
+ console.log(`-- 🧠 Memory created: "${title}" (${memoryType})`);
146
+ return [{
147
+ id: memoryId,
148
+ memory: summary,
149
+ type: memoryType,
150
+ metadata: { title, event: "ADD" },
151
+ }];
223
152
  }
224
153
  static fromConfig(configDict) {
225
154
  try {
@@ -234,14 +163,15 @@ class MemoriesLite {
234
163
  async getVectorStore(userId) {
235
164
  return lite_1.LiteVectorStore.from(userId, this.vectorStoreConfig);
236
165
  }
166
+ /**
167
+ * Capture une discussion et génère une mémoire (title + summary)
168
+ *
169
+ * @param messages - Messages de la discussion ou texte brut
170
+ * @param userId - ID utilisateur
171
+ * @param config - Options incluant capturePrompt pour personnaliser la synthèse
172
+ */
237
173
  async capture(messages, userId, config) {
238
- // await this._captureEvent("add", {
239
- // message_count: Array.isArray(messages) ? messages.length : 1,
240
- // has_metadata: !!config.metadata,
241
- // has_filters: !!config.filters,
242
- // infer: config.infer,
243
- // });
244
- const { agentId, runId, metadata = {}, filters = {}, infer = true, customFacts } = config;
174
+ const { agentId, runId, metadata = {}, filters = {}, capturePrompt } = config;
245
175
  if (agentId)
246
176
  filters.agentId = metadata.agentId = agentId;
247
177
  if (runId)
@@ -253,21 +183,11 @@ class MemoriesLite {
253
183
  ? messages
254
184
  : [{ role: "user", content: messages }];
255
185
  const final_parsedMessages = await (0, memory_1.parse_vision_messages)(parsedMessages);
256
- // Add to vector store
257
- const vectorStoreResult = await this.addToVectorStore(final_parsedMessages, metadata, userId, filters, customFacts);
258
- // Add to graph store if available
259
- let graphResult;
260
- if (this.graphMemory) {
261
- try {
262
- graphResult = await this.graphMemory.add(final_parsedMessages.map((m) => m.content).join("\n"), filters);
263
- }
264
- catch (error) {
265
- console.error("Error adding to graph memory:", error);
266
- }
267
- }
186
+ //
187
+ // Générer synthèse et stocker
188
+ const vectorStoreResult = await this.addToVectorStore(final_parsedMessages, metadata, userId, filters, capturePrompt);
268
189
  return {
269
190
  results: vectorStoreResult,
270
- relations: graphResult?.relations,
271
191
  };
272
192
  }
273
193
  async get(memoryId, userId) {
@@ -324,16 +244,6 @@ class MemoriesLite {
324
244
  // Search vector store
325
245
  const queryEmbedding = await this.embedder.embed(query);
326
246
  const memories = await vectorStore.search(queryEmbedding, limit, filters);
327
- // Search graph store if available
328
- let graphResults = [];
329
- if (this.graphMemory) {
330
- try {
331
- graphResults = await this.graphMemory.search(query, filters);
332
- }
333
- catch (error) {
334
- console.error("Error searching graph memory:", error);
335
- }
336
- }
337
247
  const excludedKeys = new Set([
338
248
  "userId",
339
249
  "agentId",
@@ -359,7 +269,6 @@ class MemoriesLite {
359
269
  }));
360
270
  return {
361
271
  results,
362
- relations: graphResults,
363
272
  };
364
273
  }
365
274
  async update(memoryId, data, userId) {
@@ -408,9 +317,6 @@ class MemoriesLite {
408
317
  await this.db.reset();
409
318
  // Check provider before attempting deleteCol
410
319
  await vectorStore.deleteCol();
411
- if (this.graphMemory) {
412
- await this.graphMemory.deleteAll({ userId: "default" }); // Assuming this is okay, or needs similar check?
413
- }
414
320
  // Re-initialize factories/clients based on the original config
415
321
  this.embedder = factory_1.EmbedderFactory.create(this.config.embedder.provider, this.config.embedder.config);
416
322
  this.llm = factory_1.LLMFactory.create(this.config.llm.provider, this.config.llm.config);
@@ -8,8 +8,7 @@ export interface Entity {
8
8
  export interface AddMemoryOptions extends Entity {
9
9
  metadata?: Record<string, any>;
10
10
  filters?: SearchFilters;
11
- customFacts?: string;
12
- infer?: boolean;
11
+ capturePrompt?: string;
13
12
  }
14
13
  export interface SearchMemoryOptions extends Entity {
15
14
  limit?: number;
@@ -1,5 +1,34 @@
1
1
  import { z } from "zod";
2
2
  import { MemoryItem } from "../types";
3
+ /**
4
+ * Schema pour la synthèse de discussion
5
+ * Produit un titre court et une synthèse opérationnelle
6
+ */
7
+ export declare const DiscussionSynthesisSchema: z.ZodObject<{
8
+ title: z.ZodString;
9
+ summary: z.ZodString;
10
+ }, "strip", z.ZodTypeAny, {
11
+ title: string;
12
+ summary: string;
13
+ }, {
14
+ title: string;
15
+ summary: string;
16
+ }>;
17
+ /**
18
+ * Prompt par défaut pour la synthèse de discussion
19
+ * Peut être remplacé via capturePrompt dans AddMemoryOptions
20
+ */
21
+ export declare const DEFAULT_DISCUSSION_PROMPT = "Tu es un expert en synth\u00E8se op\u00E9rationnelle.\n\n\u00C0 partir de cette discussion, g\u00E9n\u00E8re :\n1. TITRE: Un titre court et descriptif (10-20 mots) qui capture l'essence de la demande\n2. SUMMARY: Les points cl\u00E9s du chemin de r\u00E9solution en markdown (max 150 mots)\n\nCette synth\u00E8se servira \u00E0 retrouver et r\u00E9appliquer ce pattern de r\u00E9solution similaire.\nUtilise la m\u00EAme langue que la discussion.\n\nDiscussion \u00E0 synth\u00E9tiser:\n";
22
+ /**
23
+ * Génère les messages pour la synthèse de discussion
24
+ * @param discussion - Contenu de la discussion formatée
25
+ * @param capturePrompt - Prompt custom optionnel (remplace DEFAULT_DISCUSSION_PROMPT)
26
+ * @returns [systemPrompt, userPrompt]
27
+ */
28
+ export declare function getDiscussionSynthesisMessages(discussion: string, capturePrompt?: string): [string, string];
29
+ /**
30
+ * @deprecated Use DiscussionSynthesisSchema instead
31
+ */
3
32
  export declare const FactRetrievalSchema_simple: z.ZodObject<{
4
33
  facts: z.ZodArray<z.ZodString, "many">;
5
34
  }, "strip", z.ZodTypeAny, {
@@ -7,33 +36,40 @@ export declare const FactRetrievalSchema_simple: z.ZodObject<{
7
36
  }, {
8
37
  facts: string[];
9
38
  }>;
39
+ /**
40
+ * @deprecated Use DiscussionSynthesisSchema instead
41
+ * Types todo et factual supprimés - seul assistant_preference reste pour compatibilité
42
+ */
10
43
  export declare const FactRetrievalSchema_extended: z.ZodObject<{
11
44
  facts: z.ZodArray<z.ZodObject<{
12
45
  fact: z.ZodString;
13
46
  existing: z.ZodBoolean;
14
- type: z.ZodEnum<["assistant_preference", "factual", "todo"]>;
47
+ type: z.ZodEnum<["assistant_preference"]>;
15
48
  }, "strip", z.ZodTypeAny, {
16
- type: "todo" | "factual" | "assistant_preference";
49
+ type: "assistant_preference";
17
50
  fact: string;
18
51
  existing: boolean;
19
52
  }, {
20
- type: "todo" | "factual" | "assistant_preference";
53
+ type: "assistant_preference";
21
54
  fact: string;
22
55
  existing: boolean;
23
56
  }>, "many">;
24
57
  }, "strip", z.ZodTypeAny, {
25
58
  facts: {
26
- type: "todo" | "factual" | "assistant_preference";
59
+ type: "assistant_preference";
27
60
  fact: string;
28
61
  existing: boolean;
29
62
  }[];
30
63
  }, {
31
64
  facts: {
32
- type: "todo" | "factual" | "assistant_preference";
65
+ type: "assistant_preference";
33
66
  fact: string;
34
67
  existing: boolean;
35
68
  }[];
36
69
  }>;
70
+ /**
71
+ * @deprecated Memory updates are disabled - use capture() for new memories
72
+ */
37
73
  export declare const MemoryUpdateSchema: z.ZodObject<{
38
74
  memory: z.ZodArray<z.ZodObject<{
39
75
  id: z.ZodString;
@@ -41,16 +77,16 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
41
77
  event: z.ZodEnum<["ADD", "UPDATE", "DELETE", "NONE"]>;
42
78
  old_memory: z.ZodNullable<z.ZodString>;
43
79
  reason: z.ZodString;
44
- type: z.ZodEnum<["factual", "todo", "assistant_preference"]>;
80
+ type: z.ZodEnum<["assistant_preference"]>;
45
81
  }, "strict", z.ZodTypeAny, {
46
- type: "todo" | "factual" | "assistant_preference";
82
+ type: "assistant_preference";
47
83
  id: string;
48
84
  text: string;
49
85
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
50
86
  old_memory: string | null;
51
87
  reason: string;
52
88
  }, {
53
- type: "todo" | "factual" | "assistant_preference";
89
+ type: "assistant_preference";
54
90
  id: string;
55
91
  text: string;
56
92
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
@@ -59,7 +95,7 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
59
95
  }>, "many">;
60
96
  }, "strip", z.ZodTypeAny, {
61
97
  memory: {
62
- type: "todo" | "factual" | "assistant_preference";
98
+ type: "assistant_preference";
63
99
  id: string;
64
100
  text: string;
65
101
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
@@ -68,7 +104,7 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
68
104
  }[];
69
105
  }, {
70
106
  memory: {
71
- type: "todo" | "factual" | "assistant_preference";
107
+ type: "assistant_preference";
72
108
  id: string;
73
109
  text: string;
74
110
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
@@ -76,16 +112,16 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
76
112
  reason: string;
77
113
  }[];
78
114
  }>;
79
- /**
80
- * Practical Application:
81
- *
82
- * If the task is "factual" (e.g., "Where do I live?", "What's my job?") → retrieve factual memory.
83
- * If the task is about assistant behavior (e.g., "How should I respond?") → retrieve assistant_preference memory.
84
- * If the task is a user task/reminder (e.g., "Add a reminder to call the bank tomorrow") → retrieve todo memory.
85
- */
86
115
  export declare const MEMORY_STRING_SYSTEM = "# DIRECTIVES FOR MEMORIES\n- Information stored in memory is always enclosed within the <memories> tag.\n- Prioritize the latest user message over memories (the user's current question is authoritative).\n- Select at most the top-5 relevant memories using cosine similarity and recency; ignore the rest.\n- Adapt your answer based strictly on the <memories> section when relevant.\n- If the memories are irrelevant to the user's query, ignore them.\n- By default, do not reference this section or the memories in your response.\n- Use memories only to guide reasoning; do not respond to the memories themselves.";
87
116
  export declare const MEMORY_STRING_PREFIX = "Use these contextual memories to guide your response. Prioritize the user's question. Ignore irrelevant memories.";
117
+ /**
118
+ * @deprecated Use getDiscussionSynthesisMessages instead
119
+ * Cette fonction est conservée pour compatibilité avec l'ancien système
120
+ */
88
121
  export declare function getFactRetrievalMessages(parsedMessages: string, customRules?: string, defaultLanguage?: string): [string, string];
122
+ /**
123
+ * @deprecated Memory updates are disabled by config
124
+ */
89
125
  export declare function getUpdateMemoryMessages(retrievedOldMemory: Array<{
90
126
  id: string;
91
127
  text: string;