memories-lite 0.9.5 → 0.99.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,13 +2,18 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.DEFAULT_MEMORY_CONFIG = void 0;
4
4
  const DEFAULT_SCORING_CONFIG = {
5
- // Values from memories-lite rule & user request
6
- procedural: { alpha: 0.30, beta: 0.40, gamma: 0.05, halfLifeDays: 1 / 24 }, // ~1 hour
7
- episodic: { alpha: 0.40, beta: 0.50, gamma: 0.10, halfLifeDays: 2 }, // ~2 days (user request 'temporary')
8
- todo: { alpha: 0.40, beta: 0.50, gamma: 0.10, halfLifeDays: 40 }, // ~40 days
9
- factual: { alpha: 0.70, beta: 0.20, gamma: 0.10, halfLifeDays: 150 }, // ~150 days
5
+ //
6
+ // assistant_preference: préférences utilisateur avec l'assistant (style, langue, etc.)
10
7
  assistant_preference: { alpha: 0.60, beta: 0.05, gamma: 0.35, halfLifeDays: Infinity },
11
- default: { alpha: 0.5, beta: 0.3, gamma: 0.1, halfLifeDays: 30 } // Fallback default
8
+ //
9
+ // discussion: mémoires de discussions (synthèses opérationnelles)
10
+ // - alpha=0: pas de cosine dans le score final
11
+ // - beta=1: score basé sur recency (constant car halfLife=∞)
12
+ // - gamma=0: pas d'importance base
13
+ discussion: { alpha: 0, beta: 1, gamma: 0, halfLifeDays: Infinity },
14
+ //
15
+ // default: fallback si type manquant ou inconnu
16
+ default: { alpha: 0.5, beta: 0.3, gamma: 0.1, halfLifeDays: 30 }
12
17
  };
13
18
  exports.DEFAULT_MEMORY_CONFIG = {
14
19
  disableHistory: true,
@@ -34,7 +39,7 @@ exports.DEFAULT_MEMORY_CONFIG = {
34
39
  provider: "openai",
35
40
  config: {
36
41
  apiKey: process.env.OPENAI_API_KEY || "",
37
- model: "gpt-5-nano",
42
+ model: "gpt-5-mini",
38
43
  modelProperties: undefined,
39
44
  },
40
45
  },
@@ -48,7 +53,7 @@ exports.DEFAULT_MEMORY_CONFIG = {
48
53
  llm: {
49
54
  provider: "openai",
50
55
  config: {
51
- model: "gpt-5-nano",
56
+ model: "gpt-5-mini",
52
57
  },
53
58
  },
54
59
  },
@@ -47,11 +47,8 @@ class ConfigManager {
47
47
  dimension: userConf.dimension || defaultConf.dimension,
48
48
  // Merge scoring deeply if present in userConf, otherwise use default
49
49
  scoring: userConf.scoring ? {
50
- todo: { ...defaultConf.scoring?.todo, ...userConf.scoring.todo },
51
- procedural: { ...defaultConf.scoring?.procedural, ...userConf.scoring.procedural },
52
- episodic: { ...defaultConf.scoring?.episodic, ...userConf.scoring.episodic },
53
- factual: { ...defaultConf.scoring?.factual, ...userConf.scoring.factual },
54
50
  assistant_preference: { ...defaultConf.scoring?.assistant_preference, ...userConf.scoring.assistant_preference },
51
+ discussion: { ...defaultConf.scoring?.discussion, ...userConf.scoring.discussion },
55
52
  default: { ...defaultConf.scoring?.default, ...userConf.scoring.default },
56
53
  } : defaultConf.scoring,
57
54
  ...userConf, // Include any other passthrough fields from user
@@ -65,11 +62,8 @@ class ConfigManager {
65
62
  client: undefined,
66
63
  // Merge scoring deeply if present in userConf, otherwise use default
67
64
  scoring: userConf?.scoring ? {
68
- todo: { ...defaultConf.scoring?.todo, ...userConf.scoring.todo },
69
- procedural: { ...defaultConf.scoring?.procedural, ...userConf.scoring.procedural },
70
- episodic: { ...defaultConf.scoring?.episodic, ...userConf.scoring.episodic },
71
- factual: { ...defaultConf.scoring?.factual, ...userConf.scoring.factual },
72
65
  assistant_preference: { ...defaultConf.scoring?.assistant_preference, ...userConf.scoring.assistant_preference },
66
+ discussion: { ...defaultConf.scoring?.discussion, ...userConf.scoring.discussion },
73
67
  default: { ...defaultConf.scoring?.default, ...userConf.scoring.default },
74
68
  } : defaultConf.scoring,
75
69
  recencyCleanupThreshold: userConf?.recencyCleanupThreshold ?? defaultConf.recencyCleanupThreshold, // Merge cleanup threshold
@@ -18,9 +18,20 @@ export declare class MemoriesLite {
18
18
  private _getTelemetryId;
19
19
  private _captureEvent;
20
20
  private $t;
21
+ /**
22
+ * Capture une discussion et génère une synthèse (title + summary)
23
+ * Utilise getDiscussionSynthesisMessages pour produire la synthèse
24
+ */
21
25
  private addToVectorStore;
22
26
  static fromConfig(configDict: Record<string, any>): MemoriesLite;
23
27
  getVectorStore(userId: string): Promise<VectorStore>;
28
+ /**
29
+ * Capture une discussion et génère une mémoire (title + summary)
30
+ *
31
+ * @param messages - Messages de la discussion ou texte brut
32
+ * @param userId - ID utilisateur
33
+ * @param config - Options incluant capturePrompt pour personnaliser la synthèse
34
+ */
24
35
  capture(messages: string | Message[], userId: string, config: AddMemoryOptions): Promise<SearchResult>;
25
36
  get(memoryId: string, userId: string): Promise<MemoryItem | null>;
26
37
  retrieve(query: string, userId: string, config: SearchMemoryOptions): Promise<SearchResult>;
@@ -93,132 +93,68 @@ class MemoriesLite {
93
93
  // return text.replace(/<thinking>[\s\S]*?(?:<\/thinking>)/g,'').replace(/^<step.*$/g,'');
94
94
  return text.replace(/<thinking>[\s\S]*?<\/thinking>/g, '').replace(/^<step.*$/gm, '').replace(/<memories>[\s\S]*?<\/memories>/g, '');
95
95
  }
96
- async addToVectorStore(messages, metadata, userId, filters, customFacts) {
96
+ /**
97
+ * Capture une discussion et génère une synthèse (title + summary)
98
+ * Utilise getDiscussionSynthesisMessages pour produire la synthèse
99
+ */
100
+ async addToVectorStore(messages, metadata, userId, filters, capturePrompt) {
97
101
  const $t = this.$t;
98
102
  const vectorStore = await this.getVectorStore(userId);
99
- const parsedMessages = messages.filter((m) => typeof m.content === 'string' && m.role == 'user').map((m) => `${m.role == 'user' ? '**USER**: ' : '**ASSISTANT**: '}${$t(m.content)}\n`).join("\n");
100
- // Disinterest handling is delegated to the LLM via prompt guidelines
101
- const [systemPrompt, userPrompt] = (0, prompts_1.getFactRetrievalMessages)(parsedMessages, customFacts || this.customPrompt);
103
+ //
104
+ // Formater les messages pour la synthèse
105
+ const parsedMessages = messages
106
+ .filter((m) => typeof m.content === 'string')
107
+ .map((m) => `**${m.role.toUpperCase()}**: ${$t(m.content)}`)
108
+ .join("\n\n");
109
+ //
110
+ // Générer la synthèse via LLM
111
+ const [systemPrompt, userPrompt] = (0, prompts_1.getDiscussionSynthesisMessages)(parsedMessages, capturePrompt || this.customPrompt);
102
112
  const response = await this.llm.generateResponse([
103
113
  { role: "system", content: systemPrompt },
104
114
  { role: "user", content: userPrompt },
105
- ], { ...(0, zod_1.zodResponseFormat)(prompts_1.FactRetrievalSchema_extended, "FactRetrieval") }, [], false);
106
- const parsedResponse = (response) => {
115
+ ], { ...(0, zod_1.zodResponseFormat)(prompts_1.DiscussionSynthesisSchema, "DiscussionSynthesis") }, [], false);
116
+ //
117
+ // Parser la réponse
118
+ const parsedResponse = (res) => {
107
119
  try {
108
- // structured output
109
- if (typeof response === 'object') {
110
- return response;
120
+ if (typeof res === 'object') {
121
+ return res;
111
122
  }
112
- const cleanResponse = (0, prompts_1.removeCodeBlocks)(response);
123
+ const cleanResponse = (0, prompts_1.removeCodeBlocks)(res);
113
124
  return JSON.parse(cleanResponse);
114
125
  }
115
126
  catch (e) {
116
- console.error("Failed to parse facts from LLM response:", response, e, response);
117
- return [];
127
+ console.error("Failed to parse synthesis from LLM response:", res, e);
128
+ return { title: "Sans titre", summary: "" };
118
129
  }
119
130
  };
120
- //
121
- // can use native structured output
122
- // Drop factual facts at capture level (do not store factual memories)
123
- // FIXME Drop factual should be done at prompt level
124
- const facts = parsedResponse(response).facts?.filter((f) => !f.existing && f.type !== 'factual') || [];
125
- // console.log("-- DBG extract:", userPrompt);
126
- // console.log("-- DBG facts:", facts);
127
- // Get embeddings for new facts
128
- const newMessageEmbeddings = {};
129
- const retrievedOldMemory = [];
130
- //
131
- // add the userId to the filters
132
- filters.userId = userId;
133
- // Create embeddings and search for similar memories
134
- for (const elem of facts) {
135
- const fact = elem.fact;
136
- const embedding = await this.embedder.embed(fact);
137
- newMessageEmbeddings[fact] = embedding;
138
- const existingMemories = await vectorStore.search(embedding, 5, filters);
139
- for (const mem of existingMemories) {
140
- retrievedOldMemory.push({ id: mem.id, text: mem.payload.data, type: mem.payload.type });
141
- }
131
+ const { title, summary } = parsedResponse(response);
132
+ if (!summary) {
133
+ console.warn("-- ⚠️ Empty summary from LLM, skipping memory creation");
134
+ return [];
142
135
  }
143
- // console.log("-- DBG old memories:", retrievedOldMemory);
144
- // Remove duplicates from old memories
145
- const uniqueOldMemories = retrievedOldMemory.filter((mem, index) => retrievedOldMemory.findIndex((m) => m.id === mem.id) === index);
146
- // Create UUID mapping for handling UUID hallucinations
147
- const tempUuidMapping = {};
148
- uniqueOldMemories.forEach((item, idx) => {
149
- tempUuidMapping[String(idx)] = item.id;
150
- uniqueOldMemories[idx].id = String(idx);
151
- });
152
- // Get memory update decisions
153
- const lastUserMessage = [...messages].reverse().find(m => m.role === 'user');
154
- const userInstruction = typeof lastUserMessage?.content === 'string' ? lastUserMessage?.content : '';
155
- const updatePrompt = (0, prompts_1.getUpdateMemoryMessages)(uniqueOldMemories, facts, 'French', userInstruction);
156
- const updateResponse = await this.llm.generateResponse([{ role: "user", content: updatePrompt }], { ...(0, zod_1.zodResponseFormat)(prompts_1.MemoryUpdateSchema, "Memory") }, [], false);
157
- // console.log("-- DBG merge:", updatePrompt);
158
- const memoryActions = parsedResponse(updateResponse).memory || [];
159
- // Process memory actions
160
- const results = [];
161
- for (const action of memoryActions) {
162
- // Ignore any factual memory actions (ADD/UPDATE/DELETE) → void
163
- if (action.type === 'factual') {
164
- continue;
165
- }
166
- if (action.reason === "undefined") {
167
- console.log(`-- ⛔ LLM Error: ${action.event}, ${action.type}, "${action.text}"`);
168
- continue;
169
- }
170
- console.log(`-- DBG memory "${userId}": ${action.event}, ${action.type}, "${action.text}", why: "${action.reason}"`);
171
- try {
172
- switch (action.event) {
173
- case "ADD": {
174
- if (!action.type) {
175
- // log error
176
- console.error("Type is mandatory to manage memories:", action);
177
- continue;
178
- }
179
- metadata.type = action.type;
180
- const memoryId = await this.createMemory(action.text, newMessageEmbeddings, metadata, userId);
181
- results.push({
182
- id: memoryId,
183
- memory: action.text,
184
- type: action.type,
185
- metadata: { event: action.event },
186
- });
187
- break;
188
- }
189
- case "UPDATE": {
190
- const realMemoryId = tempUuidMapping[action.id];
191
- const type = metadata.type = uniqueOldMemories[action.id].type || action.type;
192
- await this.updateMemory(realMemoryId, action.text, newMessageEmbeddings, metadata, userId);
193
- results.push({
194
- id: realMemoryId,
195
- memory: action.text,
196
- type,
197
- metadata: {
198
- event: action.event,
199
- previousMemory: action.old_memory,
200
- },
201
- });
202
- break;
203
- }
204
- case "DELETE": {
205
- const realMemoryId = tempUuidMapping[action.id];
206
- await this.deleteMemory(realMemoryId, userId);
207
- results.push({
208
- id: realMemoryId,
209
- memory: action.text,
210
- type: action.type,
211
- metadata: { event: action.event },
212
- });
213
- break;
214
- }
215
- }
216
- }
217
- catch (error) {
218
- console.error(`Error processing memory action: ${error}`);
219
- }
220
- }
221
- return results;
136
+ //
137
+ // Créer l'embedding sur le summary (pour recherche sémantique)
138
+ const embedding = await this.embedder.embed(summary);
139
+ //
140
+ // Préparer les métadonnées
141
+ const memoryType = metadata.type || 'discussion';
142
+ const memoryMetadata = {
143
+ ...metadata,
144
+ title,
145
+ type: memoryType,
146
+ userId,
147
+ };
148
+ //
149
+ // Stocker la mémoire
150
+ const memoryId = await this.createMemory(summary, { [summary]: embedding }, memoryMetadata, userId);
151
+ console.log(`-- 🧠 Memory created: "${title}" (${memoryType})`);
152
+ return [{
153
+ id: memoryId,
154
+ memory: summary,
155
+ type: memoryType,
156
+ metadata: { title, event: "ADD" },
157
+ }];
222
158
  }
223
159
  static fromConfig(configDict) {
224
160
  try {
@@ -233,14 +169,15 @@ class MemoriesLite {
233
169
  async getVectorStore(userId) {
234
170
  return lite_1.LiteVectorStore.from(userId, this.vectorStoreConfig);
235
171
  }
172
+ /**
173
+ * Capture une discussion et génère une mémoire (title + summary)
174
+ *
175
+ * @param messages - Messages de la discussion ou texte brut
176
+ * @param userId - ID utilisateur
177
+ * @param config - Options incluant capturePrompt pour personnaliser la synthèse
178
+ */
236
179
  async capture(messages, userId, config) {
237
- // await this._captureEvent("add", {
238
- // message_count: Array.isArray(messages) ? messages.length : 1,
239
- // has_metadata: !!config.metadata,
240
- // has_filters: !!config.filters,
241
- // infer: config.infer,
242
- // });
243
- const { agentId, runId, metadata = {}, filters = {}, infer = true, customFacts } = config;
180
+ const { agentId, runId, metadata = {}, filters = {}, capturePrompt } = config;
244
181
  if (agentId)
245
182
  filters.agentId = metadata.agentId = agentId;
246
183
  if (runId)
@@ -252,9 +189,11 @@ class MemoriesLite {
252
189
  ? messages
253
190
  : [{ role: "user", content: messages }];
254
191
  const final_parsedMessages = await (0, memory_1.parse_vision_messages)(parsedMessages);
255
- // Add to vector store
256
- const vectorStoreResult = await this.addToVectorStore(final_parsedMessages, metadata, userId, filters, customFacts);
257
- // Add to graph store if available
192
+ //
193
+ // Générer synthèse et stocker
194
+ const vectorStoreResult = await this.addToVectorStore(final_parsedMessages, metadata, userId, filters, capturePrompt);
195
+ //
196
+ // Graph store (si configuré)
258
197
  let graphResult;
259
198
  if (this.graphMemory) {
260
199
  try {
@@ -8,8 +8,7 @@ export interface Entity {
8
8
  export interface AddMemoryOptions extends Entity {
9
9
  metadata?: Record<string, any>;
10
10
  filters?: SearchFilters;
11
- customFacts?: string;
12
- infer?: boolean;
11
+ capturePrompt?: string;
13
12
  }
14
13
  export interface SearchMemoryOptions extends Entity {
15
14
  limit?: number;
@@ -1,5 +1,34 @@
1
1
  import { z } from "zod";
2
2
  import { MemoryItem } from "../types";
3
+ /**
4
+ * Schema pour la synthèse de discussion
5
+ * Produit un titre court et une synthèse opérationnelle
6
+ */
7
+ export declare const DiscussionSynthesisSchema: z.ZodObject<{
8
+ title: z.ZodString;
9
+ summary: z.ZodString;
10
+ }, "strip", z.ZodTypeAny, {
11
+ title: string;
12
+ summary: string;
13
+ }, {
14
+ title: string;
15
+ summary: string;
16
+ }>;
17
+ /**
18
+ * Prompt par défaut pour la synthèse de discussion
19
+ * Peut être remplacé via capturePrompt dans AddMemoryOptions
20
+ */
21
+ export declare const DEFAULT_DISCUSSION_PROMPT = "Tu es un expert en synth\u00E8se op\u00E9rationnelle.\n\n\u00C0 partir de cette discussion, g\u00E9n\u00E8re :\n1. TITRE: Un titre court et descriptif (6-10 mots) qui capture l'essence de la demande\n2. SUMMARY: Les points cl\u00E9s du chemin de r\u00E9solution (50-100 mots)\n\nCette synth\u00E8se servira \u00E0 retrouver et r\u00E9appliquer ce pattern de r\u00E9solution similaire.\nUtilise la m\u00EAme langue que la discussion.\n\nDiscussion \u00E0 synth\u00E9tiser:\n";
22
+ /**
23
+ * Génère les messages pour la synthèse de discussion
24
+ * @param discussion - Contenu de la discussion formatée
25
+ * @param capturePrompt - Prompt custom optionnel (remplace DEFAULT_DISCUSSION_PROMPT)
26
+ * @returns [systemPrompt, userPrompt]
27
+ */
28
+ export declare function getDiscussionSynthesisMessages(discussion: string, capturePrompt?: string): [string, string];
29
+ /**
30
+ * @deprecated Use DiscussionSynthesisSchema instead
31
+ */
3
32
  export declare const FactRetrievalSchema_simple: z.ZodObject<{
4
33
  facts: z.ZodArray<z.ZodString, "many">;
5
34
  }, "strip", z.ZodTypeAny, {
@@ -7,33 +36,40 @@ export declare const FactRetrievalSchema_simple: z.ZodObject<{
7
36
  }, {
8
37
  facts: string[];
9
38
  }>;
39
+ /**
40
+ * @deprecated Use DiscussionSynthesisSchema instead
41
+ * Types todo et factual supprimés - seul assistant_preference reste pour compatibilité
42
+ */
10
43
  export declare const FactRetrievalSchema_extended: z.ZodObject<{
11
44
  facts: z.ZodArray<z.ZodObject<{
12
45
  fact: z.ZodString;
13
46
  existing: z.ZodBoolean;
14
- type: z.ZodEnum<["assistant_preference", "factual", "episodic", "procedural", "todo"]>;
47
+ type: z.ZodEnum<["assistant_preference"]>;
15
48
  }, "strip", z.ZodTypeAny, {
16
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
49
+ type: "assistant_preference";
17
50
  fact: string;
18
51
  existing: boolean;
19
52
  }, {
20
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
53
+ type: "assistant_preference";
21
54
  fact: string;
22
55
  existing: boolean;
23
56
  }>, "many">;
24
57
  }, "strip", z.ZodTypeAny, {
25
58
  facts: {
26
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
59
+ type: "assistant_preference";
27
60
  fact: string;
28
61
  existing: boolean;
29
62
  }[];
30
63
  }, {
31
64
  facts: {
32
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
65
+ type: "assistant_preference";
33
66
  fact: string;
34
67
  existing: boolean;
35
68
  }[];
36
69
  }>;
70
+ /**
71
+ * @deprecated Memory updates are disabled - use capture() for new memories
72
+ */
37
73
  export declare const MemoryUpdateSchema: z.ZodObject<{
38
74
  memory: z.ZodArray<z.ZodObject<{
39
75
  id: z.ZodString;
@@ -41,16 +77,16 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
41
77
  event: z.ZodEnum<["ADD", "UPDATE", "DELETE", "NONE"]>;
42
78
  old_memory: z.ZodNullable<z.ZodString>;
43
79
  reason: z.ZodString;
44
- type: z.ZodEnum<["factual", "episodic", "todo", "procedural", "assistant_preference"]>;
80
+ type: z.ZodEnum<["assistant_preference"]>;
45
81
  }, "strict", z.ZodTypeAny, {
46
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
82
+ type: "assistant_preference";
47
83
  id: string;
48
84
  text: string;
49
85
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
50
86
  old_memory: string | null;
51
87
  reason: string;
52
88
  }, {
53
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
89
+ type: "assistant_preference";
54
90
  id: string;
55
91
  text: string;
56
92
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
@@ -59,7 +95,7 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
59
95
  }>, "many">;
60
96
  }, "strip", z.ZodTypeAny, {
61
97
  memory: {
62
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
98
+ type: "assistant_preference";
63
99
  id: string;
64
100
  text: string;
65
101
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
@@ -68,7 +104,7 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
68
104
  }[];
69
105
  }, {
70
106
  memory: {
71
- type: "procedural" | "todo" | "episodic" | "factual" | "assistant_preference";
107
+ type: "assistant_preference";
72
108
  id: string;
73
109
  text: string;
74
110
  event: "ADD" | "UPDATE" | "DELETE" | "NONE";
@@ -76,17 +112,16 @@ export declare const MemoryUpdateSchema: z.ZodObject<{
76
112
  reason: string;
77
113
  }[];
78
114
  }>;
79
- /**
80
- * Practical Application:
81
- *
82
- * If the task is "factual" (e.g., "Where do I live?") → retrieve factual memory.
83
- * If the task is temporal or event-based ("What was I doing yesterday?") → retrieve episodic memory.
84
- * If the task is a user task/reminder (e.g., "Add a reminder to call the bank tomorrow") → retrieve todo memory.
85
- */
86
115
  export declare const MEMORY_STRING_SYSTEM = "# DIRECTIVES FOR MEMORIES\n- Information stored in memory is always enclosed within the <memories> tag.\n- Prioritize the latest user message over memories (the user's current question is authoritative).\n- Select at most the top-5 relevant memories using cosine similarity and recency; ignore the rest.\n- Adapt your answer based strictly on the <memories> section when relevant.\n- If the memories are irrelevant to the user's query, ignore them.\n- By default, do not reference this section or the memories in your response.\n- Use memories only to guide reasoning; do not respond to the memories themselves.";
87
116
  export declare const MEMORY_STRING_PREFIX = "Use these contextual memories to guide your response. Prioritize the user's question. Ignore irrelevant memories.";
88
- export declare const MEMORY_STRING_SYSTEM_OLD = "# USER AND MEMORIES PREFERENCES:\n- Utilize the provided memories to guide your responses.\n- Disregard any memories that are not relevant.\n- By default, do not reference this section or the memories in your response.\n";
117
+ /**
118
+ * @deprecated Use getDiscussionSynthesisMessages instead
119
+ * Cette fonction est conservée pour compatibilité avec l'ancien système
120
+ */
89
121
  export declare function getFactRetrievalMessages(parsedMessages: string, customRules?: string, defaultLanguage?: string): [string, string];
122
+ /**
123
+ * @deprecated Memory updates are disabled by config
124
+ */
90
125
  export declare function getUpdateMemoryMessages(retrievedOldMemory: Array<{
91
126
  id: string;
92
127
  text: string;