mem0ai 2.1.8 → 2.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,8 +12,9 @@ interface Message {
12
12
  content: string | MultiModalMessages;
13
13
  }
14
14
  interface EmbeddingConfig {
15
- apiKey: string;
15
+ apiKey?: string;
16
16
  model?: string;
17
+ url?: string;
17
18
  }
18
19
  interface VectorStoreConfig {
19
20
  collectionName: string;
@@ -366,6 +367,16 @@ declare class OpenAIEmbedder implements Embedder {
366
367
  embedBatch(texts: string[]): Promise<number[][]>;
367
368
  }
368
369
 
370
+ declare class OllamaEmbedder implements Embedder {
371
+ private ollama;
372
+ private model;
373
+ private initialized;
374
+ constructor(config: EmbeddingConfig);
375
+ embed(text: string): Promise<number[]>;
376
+ embedBatch(texts: string[]): Promise<number[][]>;
377
+ private ensureModelExists;
378
+ }
379
+
369
380
  interface LLMResponse {
370
381
  content: string;
371
382
  role: string;
@@ -424,6 +435,18 @@ declare class GroqLLM implements LLM {
424
435
  generateChat(messages: Message[]): Promise<LLMResponse>;
425
436
  }
426
437
 
438
+ declare class OllamaLLM implements LLM {
439
+ private ollama;
440
+ private model;
441
+ private initialized;
442
+ constructor(config: LLMConfig);
443
+ generateResponse(messages: Message[], responseFormat?: {
444
+ type: string;
445
+ }, tools?: any[]): Promise<string | LLMResponse>;
446
+ generateChat(messages: Message[]): Promise<LLMResponse>;
447
+ private ensureModelExists;
448
+ }
449
+
427
450
  interface VectorStore {
428
451
  insert(vectors: number[][], ids: string[], payloads: Record<string, any>[]): Promise<void>;
429
452
  search(query: number[], limit?: number, filters?: SearchFilters): Promise<VectorStoreResult[]>;
@@ -515,4 +538,4 @@ declare class VectorStoreFactory {
515
538
  static create(provider: string, config: VectorStoreConfig): VectorStore;
516
539
  }
517
540
 
518
- export { type AddMemoryOptions, AnthropicLLM, type DeleteAllMemoryOptions, type Embedder, EmbedderFactory, type EmbeddingConfig, type Entity, type GetAllMemoryOptions, type GraphStoreConfig, GroqLLM, type LLM, type LLMConfig, LLMFactory, type LLMResponse, Memory, type MemoryConfig, MemoryConfigSchema, type MemoryItem, MemoryVectorStore, type Message, type MultiModalMessages, type Neo4jConfig, OpenAIEmbedder, OpenAILLM, OpenAIStructuredLLM, Qdrant, RedisDB, type SearchFilters, type SearchMemoryOptions, type SearchResult, type VectorStore, type VectorStoreConfig, VectorStoreFactory, type VectorStoreResult };
541
+ export { type AddMemoryOptions, AnthropicLLM, type DeleteAllMemoryOptions, type Embedder, EmbedderFactory, type EmbeddingConfig, type Entity, type GetAllMemoryOptions, type GraphStoreConfig, GroqLLM, type LLM, type LLMConfig, LLMFactory, type LLMResponse, Memory, type MemoryConfig, MemoryConfigSchema, type MemoryItem, MemoryVectorStore, type Message, type MultiModalMessages, type Neo4jConfig, OllamaEmbedder, OllamaLLM, OpenAIEmbedder, OpenAILLM, OpenAIStructuredLLM, Qdrant, RedisDB, type SearchFilters, type SearchMemoryOptions, type SearchResult, type VectorStore, type VectorStoreConfig, VectorStoreFactory, type VectorStoreResult };
@@ -12,8 +12,9 @@ interface Message {
12
12
  content: string | MultiModalMessages;
13
13
  }
14
14
  interface EmbeddingConfig {
15
- apiKey: string;
15
+ apiKey?: string;
16
16
  model?: string;
17
+ url?: string;
17
18
  }
18
19
  interface VectorStoreConfig {
19
20
  collectionName: string;
@@ -366,6 +367,16 @@ declare class OpenAIEmbedder implements Embedder {
366
367
  embedBatch(texts: string[]): Promise<number[][]>;
367
368
  }
368
369
 
370
+ declare class OllamaEmbedder implements Embedder {
371
+ private ollama;
372
+ private model;
373
+ private initialized;
374
+ constructor(config: EmbeddingConfig);
375
+ embed(text: string): Promise<number[]>;
376
+ embedBatch(texts: string[]): Promise<number[][]>;
377
+ private ensureModelExists;
378
+ }
379
+
369
380
  interface LLMResponse {
370
381
  content: string;
371
382
  role: string;
@@ -424,6 +435,18 @@ declare class GroqLLM implements LLM {
424
435
  generateChat(messages: Message[]): Promise<LLMResponse>;
425
436
  }
426
437
 
438
+ declare class OllamaLLM implements LLM {
439
+ private ollama;
440
+ private model;
441
+ private initialized;
442
+ constructor(config: LLMConfig);
443
+ generateResponse(messages: Message[], responseFormat?: {
444
+ type: string;
445
+ }, tools?: any[]): Promise<string | LLMResponse>;
446
+ generateChat(messages: Message[]): Promise<LLMResponse>;
447
+ private ensureModelExists;
448
+ }
449
+
427
450
  interface VectorStore {
428
451
  insert(vectors: number[][], ids: string[], payloads: Record<string, any>[]): Promise<void>;
429
452
  search(query: number[], limit?: number, filters?: SearchFilters): Promise<VectorStoreResult[]>;
@@ -515,4 +538,4 @@ declare class VectorStoreFactory {
515
538
  static create(provider: string, config: VectorStoreConfig): VectorStore;
516
539
  }
517
540
 
518
- export { type AddMemoryOptions, AnthropicLLM, type DeleteAllMemoryOptions, type Embedder, EmbedderFactory, type EmbeddingConfig, type Entity, type GetAllMemoryOptions, type GraphStoreConfig, GroqLLM, type LLM, type LLMConfig, LLMFactory, type LLMResponse, Memory, type MemoryConfig, MemoryConfigSchema, type MemoryItem, MemoryVectorStore, type Message, type MultiModalMessages, type Neo4jConfig, OpenAIEmbedder, OpenAILLM, OpenAIStructuredLLM, Qdrant, RedisDB, type SearchFilters, type SearchMemoryOptions, type SearchResult, type VectorStore, type VectorStoreConfig, VectorStoreFactory, type VectorStoreResult };
541
+ export { type AddMemoryOptions, AnthropicLLM, type DeleteAllMemoryOptions, type Embedder, EmbedderFactory, type EmbeddingConfig, type Entity, type GetAllMemoryOptions, type GraphStoreConfig, GroqLLM, type LLM, type LLMConfig, LLMFactory, type LLMResponse, Memory, type MemoryConfig, MemoryConfigSchema, type MemoryItem, MemoryVectorStore, type Message, type MultiModalMessages, type Neo4jConfig, OllamaEmbedder, OllamaLLM, OpenAIEmbedder, OpenAILLM, OpenAIStructuredLLM, Qdrant, RedisDB, type SearchFilters, type SearchMemoryOptions, type SearchResult, type VectorStore, type VectorStoreConfig, VectorStoreFactory, type VectorStoreResult };
package/dist/oss/index.js CHANGED
@@ -37,6 +37,8 @@ __export(index_exports, {
37
37
  Memory: () => Memory,
38
38
  MemoryConfigSchema: () => MemoryConfigSchema,
39
39
  MemoryVectorStore: () => MemoryVectorStore,
40
+ OllamaEmbedder: () => OllamaEmbedder,
41
+ OllamaLLM: () => OllamaLLM,
40
42
  OpenAIEmbedder: () => OpenAIEmbedder,
41
43
  OpenAILLM: () => OpenAILLM,
42
44
  OpenAIStructuredLLM: () => OpenAIStructuredLLM,
@@ -116,6 +118,60 @@ var OpenAIEmbedder = class {
116
118
  }
117
119
  };
118
120
 
121
+ // src/oss/src/embeddings/ollama.ts
122
+ var import_ollama = require("ollama");
123
+
124
+ // src/oss/src/utils/logger.ts
125
+ var logger = {
126
+ info: (message) => console.log(`[INFO] ${message}`),
127
+ error: (message) => console.error(`[ERROR] ${message}`),
128
+ debug: (message) => console.debug(`[DEBUG] ${message}`),
129
+ warn: (message) => console.warn(`[WARN] ${message}`)
130
+ };
131
+
132
+ // src/oss/src/embeddings/ollama.ts
133
+ var OllamaEmbedder = class {
134
+ constructor(config) {
135
+ // Using this variable to avoid calling the Ollama server multiple times
136
+ this.initialized = false;
137
+ this.ollama = new import_ollama.Ollama({
138
+ host: config.url || "http://localhost:11434"
139
+ });
140
+ this.model = config.model || "nomic-embed-text:latest";
141
+ this.ensureModelExists().catch((err) => {
142
+ logger.error(`Error ensuring model exists: ${err}`);
143
+ });
144
+ }
145
+ async embed(text) {
146
+ try {
147
+ await this.ensureModelExists();
148
+ } catch (err) {
149
+ logger.error(`Error ensuring model exists: ${err}`);
150
+ }
151
+ const response = await this.ollama.embeddings({
152
+ model: this.model,
153
+ prompt: text
154
+ });
155
+ return response.embedding;
156
+ }
157
+ async embedBatch(texts) {
158
+ const response = await Promise.all(texts.map((text) => this.embed(text)));
159
+ return response;
160
+ }
161
+ async ensureModelExists() {
162
+ if (this.initialized) {
163
+ return true;
164
+ }
165
+ const local_models = await this.ollama.list();
166
+ if (!local_models.models.find((m) => m.name === this.model)) {
167
+ logger.info(`Pulling model ${this.model}...`);
168
+ await this.ollama.pull({ model: this.model });
169
+ }
170
+ this.initialized = true;
171
+ return true;
172
+ }
173
+ };
174
+
119
175
  // src/oss/src/llms/openai.ts
120
176
  var import_openai2 = __toESM(require("openai"));
121
177
  var OpenAILLM = class {
@@ -1073,12 +1129,243 @@ var RedisDB = class {
1073
1129
  }
1074
1130
  };
1075
1131
 
1132
+ // src/oss/src/llms/ollama.ts
1133
+ var import_ollama2 = require("ollama");
1134
+ var OllamaLLM = class {
1135
+ constructor(config) {
1136
+ // Using this variable to avoid calling the Ollama server multiple times
1137
+ this.initialized = false;
1138
+ var _a;
1139
+ this.ollama = new import_ollama2.Ollama({
1140
+ host: ((_a = config.config) == null ? void 0 : _a.url) || "http://localhost:11434"
1141
+ });
1142
+ this.model = config.model || "llama3.1:8b";
1143
+ this.ensureModelExists().catch((err) => {
1144
+ logger.error(`Error ensuring model exists: ${err}`);
1145
+ });
1146
+ }
1147
+ async generateResponse(messages, responseFormat, tools) {
1148
+ try {
1149
+ await this.ensureModelExists();
1150
+ } catch (err) {
1151
+ logger.error(`Error ensuring model exists: ${err}`);
1152
+ }
1153
+ const completion = await this.ollama.chat({
1154
+ model: this.model,
1155
+ messages: messages.map((msg) => {
1156
+ const role = msg.role;
1157
+ return {
1158
+ role,
1159
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
1160
+ };
1161
+ }),
1162
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json_object" && { format: "json" },
1163
+ ...tools && { tools, tool_choice: "auto" }
1164
+ });
1165
+ const response = completion.message;
1166
+ if (response.tool_calls) {
1167
+ return {
1168
+ content: response.content || "",
1169
+ role: response.role,
1170
+ toolCalls: response.tool_calls.map((call) => ({
1171
+ name: call.function.name,
1172
+ arguments: JSON.stringify(call.function.arguments)
1173
+ }))
1174
+ };
1175
+ }
1176
+ return response.content || "";
1177
+ }
1178
+ async generateChat(messages) {
1179
+ try {
1180
+ await this.ensureModelExists();
1181
+ } catch (err) {
1182
+ logger.error(`Error ensuring model exists: ${err}`);
1183
+ }
1184
+ const completion = await this.ollama.chat({
1185
+ messages: messages.map((msg) => {
1186
+ const role = msg.role;
1187
+ return {
1188
+ role,
1189
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
1190
+ };
1191
+ }),
1192
+ model: this.model
1193
+ });
1194
+ const response = completion.message;
1195
+ return {
1196
+ content: response.content || "",
1197
+ role: response.role
1198
+ };
1199
+ }
1200
+ async ensureModelExists() {
1201
+ if (this.initialized) {
1202
+ return true;
1203
+ }
1204
+ const local_models = await this.ollama.list();
1205
+ if (!local_models.models.find((m) => m.name === this.model)) {
1206
+ logger.info(`Pulling model ${this.model}...`);
1207
+ await this.ollama.pull({ model: this.model });
1208
+ }
1209
+ this.initialized = true;
1210
+ return true;
1211
+ }
1212
+ };
1213
+
1214
+ // src/oss/src/vector_stores/supabase.ts
1215
+ var import_supabase_js = require("@supabase/supabase-js");
1216
+ var SupabaseDB = class {
1217
+ constructor(config) {
1218
+ this.client = (0, import_supabase_js.createClient)(config.supabaseUrl, config.supabaseKey);
1219
+ this.tableName = config.tableName;
1220
+ this.embeddingColumnName = config.embeddingColumnName || "embedding";
1221
+ this.metadataColumnName = config.metadataColumnName || "metadata";
1222
+ this.initialize().catch((err) => {
1223
+ console.error("Failed to initialize Supabase:", err);
1224
+ throw err;
1225
+ });
1226
+ }
1227
+ async initialize() {
1228
+ try {
1229
+ const testVector = Array(1536).fill(0);
1230
+ const { error: testError } = await this.client.from(this.tableName).insert({
1231
+ id: "test_vector",
1232
+ [this.embeddingColumnName]: testVector,
1233
+ [this.metadataColumnName]: {}
1234
+ }).select();
1235
+ if (testError) {
1236
+ console.error("Test insert error:", testError);
1237
+ throw new Error(
1238
+ `Vector operations failed. Please ensure:
1239
+ 1. The vector extension is enabled
1240
+ 2. The table "${this.tableName}" exists with correct schema
1241
+ 3. The match_vectors function is created
1242
+ See the SQL migration instructions in the code comments.`
1243
+ );
1244
+ }
1245
+ await this.client.from(this.tableName).delete().eq("id", "test_vector");
1246
+ console.log("Connected to Supabase successfully");
1247
+ } catch (error) {
1248
+ console.error("Error during Supabase initialization:", error);
1249
+ throw error;
1250
+ }
1251
+ }
1252
+ async insert(vectors, ids, payloads) {
1253
+ try {
1254
+ const data = vectors.map((vector, idx) => ({
1255
+ id: ids[idx],
1256
+ [this.embeddingColumnName]: vector,
1257
+ [this.metadataColumnName]: {
1258
+ ...payloads[idx],
1259
+ created_at: (/* @__PURE__ */ new Date()).toISOString()
1260
+ }
1261
+ }));
1262
+ const { error } = await this.client.from(this.tableName).insert(data);
1263
+ if (error) throw error;
1264
+ } catch (error) {
1265
+ console.error("Error during vector insert:", error);
1266
+ throw error;
1267
+ }
1268
+ }
1269
+ async search(query, limit = 5, filters) {
1270
+ try {
1271
+ const rpcQuery = {
1272
+ query_embedding: query,
1273
+ match_count: limit
1274
+ };
1275
+ if (filters) {
1276
+ rpcQuery.filter = filters;
1277
+ }
1278
+ const { data, error } = await this.client.rpc("match_vectors", rpcQuery);
1279
+ if (error) throw error;
1280
+ if (!data) return [];
1281
+ const results = data;
1282
+ return results.map((result) => ({
1283
+ id: result.id,
1284
+ payload: result.metadata,
1285
+ score: result.similarity
1286
+ }));
1287
+ } catch (error) {
1288
+ console.error("Error during vector search:", error);
1289
+ throw error;
1290
+ }
1291
+ }
1292
+ async get(vectorId) {
1293
+ try {
1294
+ const { data, error } = await this.client.from(this.tableName).select("*").eq("id", vectorId).single();
1295
+ if (error) throw error;
1296
+ if (!data) return null;
1297
+ return {
1298
+ id: data.id,
1299
+ payload: data[this.metadataColumnName]
1300
+ };
1301
+ } catch (error) {
1302
+ console.error("Error getting vector:", error);
1303
+ throw error;
1304
+ }
1305
+ }
1306
+ async update(vectorId, vector, payload) {
1307
+ try {
1308
+ const { error } = await this.client.from(this.tableName).update({
1309
+ [this.embeddingColumnName]: vector,
1310
+ [this.metadataColumnName]: {
1311
+ ...payload,
1312
+ updated_at: (/* @__PURE__ */ new Date()).toISOString()
1313
+ }
1314
+ }).eq("id", vectorId);
1315
+ if (error) throw error;
1316
+ } catch (error) {
1317
+ console.error("Error during vector update:", error);
1318
+ throw error;
1319
+ }
1320
+ }
1321
+ async delete(vectorId) {
1322
+ try {
1323
+ const { error } = await this.client.from(this.tableName).delete().eq("id", vectorId);
1324
+ if (error) throw error;
1325
+ } catch (error) {
1326
+ console.error("Error deleting vector:", error);
1327
+ throw error;
1328
+ }
1329
+ }
1330
+ async deleteCol() {
1331
+ try {
1332
+ const { error } = await this.client.from(this.tableName).delete().neq("id", "");
1333
+ if (error) throw error;
1334
+ } catch (error) {
1335
+ console.error("Error deleting collection:", error);
1336
+ throw error;
1337
+ }
1338
+ }
1339
+ async list(filters, limit = 100) {
1340
+ try {
1341
+ let query = this.client.from(this.tableName).select("*", { count: "exact" }).limit(limit);
1342
+ if (filters) {
1343
+ Object.entries(filters).forEach(([key, value]) => {
1344
+ query = query.eq(`${this.metadataColumnName}->>${key}`, value);
1345
+ });
1346
+ }
1347
+ const { data, error, count } = await query;
1348
+ if (error) throw error;
1349
+ const results = data.map((item) => ({
1350
+ id: item.id,
1351
+ payload: item[this.metadataColumnName]
1352
+ }));
1353
+ return [results, count || 0];
1354
+ } catch (error) {
1355
+ console.error("Error listing vectors:", error);
1356
+ throw error;
1357
+ }
1358
+ }
1359
+ };
1360
+
1076
1361
  // src/oss/src/utils/factory.ts
1077
1362
  var EmbedderFactory = class {
1078
1363
  static create(provider, config) {
1079
1364
  switch (provider.toLowerCase()) {
1080
1365
  case "openai":
1081
1366
  return new OpenAIEmbedder(config);
1367
+ case "ollama":
1368
+ return new OllamaEmbedder(config);
1082
1369
  default:
1083
1370
  throw new Error(`Unsupported embedder provider: ${provider}`);
1084
1371
  }
@@ -1095,6 +1382,8 @@ var LLMFactory = class {
1095
1382
  return new AnthropicLLM(config);
1096
1383
  case "groq":
1097
1384
  return new GroqLLM(config);
1385
+ case "ollama":
1386
+ return new OllamaLLM(config);
1098
1387
  default:
1099
1388
  throw new Error(`Unsupported LLM provider: ${provider}`);
1100
1389
  }
@@ -1111,6 +1400,9 @@ var VectorStoreFactory = class {
1111
1400
  case "redis":
1112
1401
  return new RedisDB(config);
1113
1402
  // Type assertion needed as config is extended
1403
+ case "supabase":
1404
+ return new SupabaseDB(config);
1405
+ // Type assertion needed as config is extended
1114
1406
  default:
1115
1407
  throw new Error(`Unsupported vector store provider: ${provider}`);
1116
1408
  }
@@ -1707,14 +1999,6 @@ function getDeleteMessages(existingMemoriesString, data, userId) {
1707
1999
  ];
1708
2000
  }
1709
2001
 
1710
- // src/oss/src/utils/logger.ts
1711
- var logger = {
1712
- info: (message) => console.log(`[INFO] ${message}`),
1713
- error: (message) => console.error(`[ERROR] ${message}`),
1714
- debug: (message) => console.debug(`[DEBUG] ${message}`),
1715
- warn: (message) => console.warn(`[WARN] ${message}`)
1716
- };
1717
-
1718
2002
  // src/oss/src/memory/graph_memory.ts
1719
2003
  var MemoryGraph = class {
1720
2004
  constructor(config) {
@@ -2661,6 +2945,8 @@ ${parsedMessages}`] : getFactRetrievalMessages(parsedMessages);
2661
2945
  Memory,
2662
2946
  MemoryConfigSchema,
2663
2947
  MemoryVectorStore,
2948
+ OllamaEmbedder,
2949
+ OllamaLLM,
2664
2950
  OpenAIEmbedder,
2665
2951
  OpenAILLM,
2666
2952
  OpenAIStructuredLLM,