memories-lite 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/MEMORIES.md +39 -0
- package/README.md +221 -0
- package/TECHNICAL.md +135 -0
- package/dist/config/defaults.d.ts +2 -0
- package/dist/config/defaults.js +61 -0
- package/dist/config/manager.d.ts +4 -0
- package/dist/config/manager.js +121 -0
- package/dist/embeddings/base.d.ts +4 -0
- package/dist/embeddings/base.js +2 -0
- package/dist/embeddings/google.d.ts +10 -0
- package/dist/embeddings/google.js +28 -0
- package/dist/embeddings/openai.d.ts +10 -0
- package/dist/embeddings/openai.js +31 -0
- package/dist/graphs/configs.d.ts +14 -0
- package/dist/graphs/configs.js +19 -0
- package/dist/graphs/tools.d.ts +271 -0
- package/dist/graphs/tools.js +220 -0
- package/dist/graphs/utils.d.ts +9 -0
- package/dist/graphs/utils.js +105 -0
- package/dist/index.d.ts +14 -0
- package/dist/index.js +30 -0
- package/dist/llms/base.d.ts +16 -0
- package/dist/llms/base.js +2 -0
- package/dist/llms/google.d.ts +11 -0
- package/dist/llms/google.js +44 -0
- package/dist/llms/openai.d.ts +9 -0
- package/dist/llms/openai.js +73 -0
- package/dist/llms/openai_structured.d.ts +11 -0
- package/dist/llms/openai_structured.js +72 -0
- package/dist/memory/index.d.ts +42 -0
- package/dist/memory/index.js +499 -0
- package/dist/memory/memory.types.d.ts +23 -0
- package/dist/memory/memory.types.js +2 -0
- package/dist/prompts/index.d.ts +102 -0
- package/dist/prompts/index.js +233 -0
- package/dist/storage/DummyHistoryManager.d.ts +7 -0
- package/dist/storage/DummyHistoryManager.js +19 -0
- package/dist/storage/MemoryHistoryManager.d.ts +8 -0
- package/dist/storage/MemoryHistoryManager.js +36 -0
- package/dist/storage/base.d.ts +6 -0
- package/dist/storage/base.js +2 -0
- package/dist/storage/index.d.ts +3 -0
- package/dist/storage/index.js +19 -0
- package/dist/types/index.d.ts +1071 -0
- package/dist/types/index.js +100 -0
- package/dist/utils/bm25.d.ts +13 -0
- package/dist/utils/bm25.js +51 -0
- package/dist/utils/factory.d.ts +13 -0
- package/dist/utils/factory.js +49 -0
- package/dist/utils/logger.d.ts +7 -0
- package/dist/utils/logger.js +9 -0
- package/dist/utils/memory.d.ts +3 -0
- package/dist/utils/memory.js +44 -0
- package/dist/utils/telemetry.d.ts +11 -0
- package/dist/utils/telemetry.js +74 -0
- package/dist/utils/telemetry.types.d.ts +27 -0
- package/dist/utils/telemetry.types.js +2 -0
- package/dist/vectorstores/base.d.ts +11 -0
- package/dist/vectorstores/base.js +2 -0
- package/dist/vectorstores/lite.d.ts +40 -0
- package/dist/vectorstores/lite.js +319 -0
- package/dist/vectorstores/llm.d.ts +31 -0
- package/dist/vectorstores/llm.js +88 -0
- package/jest.config.js +22 -0
- package/memories-lite.db +0 -0
- package/package.json +38 -0
- package/src/config/defaults.ts +61 -0
- package/src/config/manager.ts +132 -0
- package/src/embeddings/base.ts +4 -0
- package/src/embeddings/google.ts +32 -0
- package/src/embeddings/openai.ts +33 -0
- package/src/graphs/configs.ts +30 -0
- package/src/graphs/tools.ts +267 -0
- package/src/graphs/utils.ts +114 -0
- package/src/index.ts +14 -0
- package/src/llms/base.ts +20 -0
- package/src/llms/google.ts +56 -0
- package/src/llms/openai.ts +85 -0
- package/src/llms/openai_structured.ts +82 -0
- package/src/memory/index.ts +723 -0
- package/src/memory/memory.types.ts +27 -0
- package/src/prompts/index.ts +268 -0
- package/src/storage/DummyHistoryManager.ts +27 -0
- package/src/storage/MemoryHistoryManager.ts +58 -0
- package/src/storage/base.ts +14 -0
- package/src/storage/index.ts +3 -0
- package/src/types/index.ts +243 -0
- package/src/utils/bm25.ts +64 -0
- package/src/utils/factory.ts +59 -0
- package/src/utils/logger.ts +13 -0
- package/src/utils/memory.ts +48 -0
- package/src/utils/telemetry.ts +98 -0
- package/src/utils/telemetry.types.ts +34 -0
- package/src/vectorstores/base.ts +27 -0
- package/src/vectorstores/lite.ts +402 -0
- package/src/vectorstores/llm.ts +126 -0
- package/tests/lite.spec.ts +158 -0
- package/tests/memory.facts.test.ts +211 -0
- package/tests/memory.test.ts +406 -0
- package/tsconfig.json +16 -0
- package/tsconfig.tsbuildinfo +1 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export * from "./memory";
|
|
2
|
+
export * from "./memory/memory.types";
|
|
3
|
+
export * from "./types";
|
|
4
|
+
export * from "./prompts";
|
|
5
|
+
export * from "./embeddings/base";
|
|
6
|
+
export * from "./embeddings/openai";
|
|
7
|
+
export * from "./embeddings/google";
|
|
8
|
+
export * from "./llms/base";
|
|
9
|
+
export * from "./llms/openai";
|
|
10
|
+
export * from "./llms/openai_structured";
|
|
11
|
+
export * from "./vectorstores/base";
|
|
12
|
+
export * from "./vectorstores/llm";
|
|
13
|
+
export * from "./vectorstores/lite";
|
|
14
|
+
export * from "./utils/factory";
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./memory"), exports);
|
|
18
|
+
__exportStar(require("./memory/memory.types"), exports);
|
|
19
|
+
__exportStar(require("./types"), exports);
|
|
20
|
+
__exportStar(require("./prompts"), exports);
|
|
21
|
+
__exportStar(require("./embeddings/base"), exports);
|
|
22
|
+
__exportStar(require("./embeddings/openai"), exports);
|
|
23
|
+
__exportStar(require("./embeddings/google"), exports);
|
|
24
|
+
__exportStar(require("./llms/base"), exports);
|
|
25
|
+
__exportStar(require("./llms/openai"), exports);
|
|
26
|
+
__exportStar(require("./llms/openai_structured"), exports);
|
|
27
|
+
__exportStar(require("./vectorstores/base"), exports);
|
|
28
|
+
__exportStar(require("./vectorstores/llm"), exports);
|
|
29
|
+
__exportStar(require("./vectorstores/lite"), exports);
|
|
30
|
+
__exportStar(require("./utils/factory"), exports);
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { Message } from "../types";
|
|
2
|
+
export interface LLMResponse {
|
|
3
|
+
content: string;
|
|
4
|
+
role: string;
|
|
5
|
+
toolCalls?: Array<{
|
|
6
|
+
name: string;
|
|
7
|
+
arguments: string;
|
|
8
|
+
}>;
|
|
9
|
+
}
|
|
10
|
+
export interface LLM {
|
|
11
|
+
generateResponse(messages: Array<{
|
|
12
|
+
role: string;
|
|
13
|
+
content: string;
|
|
14
|
+
}>, response_format?: any, tools?: any[], structuredOutput?: boolean, model?: string): Promise<any>;
|
|
15
|
+
generateChat(messages: Message[]): Promise<LLMResponse>;
|
|
16
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { LLM, LLMResponse } from "./base";
|
|
2
|
+
import { LLMConfig, Message } from "../types";
|
|
3
|
+
export declare class GoogleLLM implements LLM {
|
|
4
|
+
private google;
|
|
5
|
+
private model;
|
|
6
|
+
constructor(config: LLMConfig);
|
|
7
|
+
generateResponse(messages: Message[], responseFormat?: {
|
|
8
|
+
type: string;
|
|
9
|
+
}, tools?: any[]): Promise<string | LLMResponse>;
|
|
10
|
+
generateChat(messages: Message[]): Promise<LLMResponse>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GoogleLLM = void 0;
|
|
4
|
+
const genai_1 = require("@google/genai");
|
|
5
|
+
class GoogleLLM {
|
|
6
|
+
constructor(config) {
|
|
7
|
+
this.google = new genai_1.GoogleGenAI({ apiKey: config.apiKey });
|
|
8
|
+
this.model = config.model || "gemini-2.0-flash";
|
|
9
|
+
}
|
|
10
|
+
async generateResponse(messages, responseFormat, tools) {
|
|
11
|
+
const completion = await this.google.models.generateContent({
|
|
12
|
+
contents: messages.map((msg) => ({
|
|
13
|
+
parts: [
|
|
14
|
+
{
|
|
15
|
+
text: typeof msg.content === "string"
|
|
16
|
+
? msg.content
|
|
17
|
+
: JSON.stringify(msg.content),
|
|
18
|
+
},
|
|
19
|
+
],
|
|
20
|
+
role: msg.role === "system" ? "model" : "user",
|
|
21
|
+
})),
|
|
22
|
+
model: this.model,
|
|
23
|
+
// config: {
|
|
24
|
+
// responseSchema: {}, // Add response schema if needed
|
|
25
|
+
// },
|
|
26
|
+
});
|
|
27
|
+
const text = completion.text
|
|
28
|
+
?.replace(/^```json\n/, "")
|
|
29
|
+
.replace(/\n```$/, "");
|
|
30
|
+
return text || "";
|
|
31
|
+
}
|
|
32
|
+
async generateChat(messages) {
|
|
33
|
+
const completion = await this.google.models.generateContent({
|
|
34
|
+
contents: messages,
|
|
35
|
+
model: this.model,
|
|
36
|
+
});
|
|
37
|
+
const response = completion.candidates[0].content;
|
|
38
|
+
return {
|
|
39
|
+
content: response.parts[0].text || "",
|
|
40
|
+
role: response.role,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
exports.GoogleLLM = GoogleLLM;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { LLM, LLMResponse } from "./base";
|
|
2
|
+
import { LLMConfig, Message } from "../types";
|
|
3
|
+
export declare class OpenAILLM implements LLM {
|
|
4
|
+
private openai;
|
|
5
|
+
private model;
|
|
6
|
+
constructor(config: LLMConfig);
|
|
7
|
+
generateResponse(messages: Message[], responseFormat?: any, tools?: any[], structuredOutput?: boolean, model?: string): Promise<string | LLMResponse>;
|
|
8
|
+
generateChat(messages: Message[]): Promise<LLMResponse>;
|
|
9
|
+
}
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.OpenAILLM = void 0;
|
|
7
|
+
const openai_1 = __importDefault(require("openai"));
|
|
8
|
+
class OpenAILLM {
|
|
9
|
+
constructor(config) {
|
|
10
|
+
this.openai = new openai_1.default({ apiKey: config.apiKey });
|
|
11
|
+
this.model = config.model || "gpt-4.1-mini";
|
|
12
|
+
}
|
|
13
|
+
async generateResponse(messages, responseFormat, tools, structuredOutput = false, model) {
|
|
14
|
+
const msg = messages.map((msg) => {
|
|
15
|
+
const role = msg.role;
|
|
16
|
+
return {
|
|
17
|
+
role,
|
|
18
|
+
content: typeof msg.content === "string"
|
|
19
|
+
? msg.content
|
|
20
|
+
: JSON.stringify(msg.content),
|
|
21
|
+
};
|
|
22
|
+
});
|
|
23
|
+
// DEPRECATED: use structured ouput message with `await openai.beta.chat.completions.parse(...)`
|
|
24
|
+
// https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&lang=javascript
|
|
25
|
+
// ⚠️ SyntaxError: Unexpected non-whitespace character after JSON at position 13
|
|
26
|
+
const fnCall = structuredOutput ?
|
|
27
|
+
this.openai.beta.chat.completions.parse.bind(this.openai.beta.chat.completions) : this.openai.chat.completions.create.bind(this.openai.chat.completions);
|
|
28
|
+
const completion = await fnCall({
|
|
29
|
+
messages: msg,
|
|
30
|
+
model: model || this.model,
|
|
31
|
+
top_p: .8, temperature: .2,
|
|
32
|
+
...(structuredOutput && { logit_bias: { "1734": -100 } }),
|
|
33
|
+
response_format: responseFormat,
|
|
34
|
+
...(tools && { tools, tool_choice: "auto" }),
|
|
35
|
+
});
|
|
36
|
+
const response = completion.choices[0].message;
|
|
37
|
+
if (response.tool_calls) {
|
|
38
|
+
return {
|
|
39
|
+
content: response.content || "",
|
|
40
|
+
role: response.role,
|
|
41
|
+
toolCalls: response.tool_calls.map((call) => ({
|
|
42
|
+
name: call.function.name,
|
|
43
|
+
arguments: call.function.arguments,
|
|
44
|
+
})),
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
const result = structuredOutput ? response.parsed : response.content;
|
|
48
|
+
//const result = response.content;
|
|
49
|
+
// console.log("--- DBG query:", msg);
|
|
50
|
+
// console.log("\n\n--- DBG result:", result);
|
|
51
|
+
return result || "";
|
|
52
|
+
}
|
|
53
|
+
async generateChat(messages) {
|
|
54
|
+
const completion = await this.openai.chat.completions.create({
|
|
55
|
+
messages: messages.map((msg) => {
|
|
56
|
+
const role = msg.role;
|
|
57
|
+
return {
|
|
58
|
+
role,
|
|
59
|
+
content: typeof msg.content === "string"
|
|
60
|
+
? msg.content
|
|
61
|
+
: JSON.stringify(msg.content),
|
|
62
|
+
};
|
|
63
|
+
}),
|
|
64
|
+
model: this.model,
|
|
65
|
+
});
|
|
66
|
+
const response = completion.choices[0].message;
|
|
67
|
+
return {
|
|
68
|
+
content: response.content || "",
|
|
69
|
+
role: response.role,
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
exports.OpenAILLM = OpenAILLM;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { LLM, LLMResponse } from "./base";
|
|
2
|
+
import { LLMConfig, Message } from "../types";
|
|
3
|
+
export declare class OpenAIStructuredLLM implements LLM {
|
|
4
|
+
private openai;
|
|
5
|
+
private model;
|
|
6
|
+
constructor(config: LLMConfig);
|
|
7
|
+
generateResponse(messages: Message[], responseFormat?: {
|
|
8
|
+
type: string;
|
|
9
|
+
} | null, tools?: any[]): Promise<string | LLMResponse>;
|
|
10
|
+
generateChat(messages: Message[]): Promise<LLMResponse>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.OpenAIStructuredLLM = void 0;
|
|
7
|
+
const openai_1 = __importDefault(require("openai"));
|
|
8
|
+
class OpenAIStructuredLLM {
|
|
9
|
+
constructor(config) {
|
|
10
|
+
this.openai = new openai_1.default({ apiKey: config.apiKey });
|
|
11
|
+
this.model = config.model || "gpt-4-turbo-preview";
|
|
12
|
+
}
|
|
13
|
+
async generateResponse(messages, responseFormat, tools) {
|
|
14
|
+
const completion = await this.openai.chat.completions.create({
|
|
15
|
+
messages: messages.map((msg) => ({
|
|
16
|
+
role: msg.role,
|
|
17
|
+
content: typeof msg.content === "string"
|
|
18
|
+
? msg.content
|
|
19
|
+
: JSON.stringify(msg.content),
|
|
20
|
+
})),
|
|
21
|
+
model: this.model,
|
|
22
|
+
...(tools
|
|
23
|
+
? {
|
|
24
|
+
tools: tools.map((tool) => ({
|
|
25
|
+
type: "function",
|
|
26
|
+
function: {
|
|
27
|
+
name: tool.function.name,
|
|
28
|
+
description: tool.function.description,
|
|
29
|
+
parameters: tool.function.parameters,
|
|
30
|
+
},
|
|
31
|
+
})),
|
|
32
|
+
tool_choice: "auto",
|
|
33
|
+
}
|
|
34
|
+
: responseFormat
|
|
35
|
+
? {
|
|
36
|
+
response_format: {
|
|
37
|
+
type: responseFormat.type,
|
|
38
|
+
},
|
|
39
|
+
}
|
|
40
|
+
: {}),
|
|
41
|
+
});
|
|
42
|
+
const response = completion.choices[0].message;
|
|
43
|
+
if (response.tool_calls) {
|
|
44
|
+
return {
|
|
45
|
+
content: response.content || "",
|
|
46
|
+
role: response.role,
|
|
47
|
+
toolCalls: response.tool_calls.map((call) => ({
|
|
48
|
+
name: call.function.name,
|
|
49
|
+
arguments: call.function.arguments,
|
|
50
|
+
})),
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
return response.content || "";
|
|
54
|
+
}
|
|
55
|
+
async generateChat(messages) {
|
|
56
|
+
const completion = await this.openai.chat.completions.create({
|
|
57
|
+
messages: messages.map((msg) => ({
|
|
58
|
+
role: msg.role,
|
|
59
|
+
content: typeof msg.content === "string"
|
|
60
|
+
? msg.content
|
|
61
|
+
: JSON.stringify(msg.content),
|
|
62
|
+
})),
|
|
63
|
+
model: this.model,
|
|
64
|
+
});
|
|
65
|
+
const response = completion.choices[0].message;
|
|
66
|
+
return {
|
|
67
|
+
content: response.content || "",
|
|
68
|
+
role: response.role,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
exports.OpenAIStructuredLLM = OpenAIStructuredLLM;
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { MemoryConfig, MemoryItem, Message, SearchResult } from "../types";
|
|
2
|
+
import { VectorStore } from "../vectorstores/base";
|
|
3
|
+
import { AddMemoryOptions, SearchMemoryOptions, DeleteAllMemoryOptions, GetAllMemoryOptions } from "./memory.types";
|
|
4
|
+
export declare class MemoriesLite {
|
|
5
|
+
private config;
|
|
6
|
+
private customPrompt;
|
|
7
|
+
private embedder;
|
|
8
|
+
private vectorStoreConfig;
|
|
9
|
+
private llm;
|
|
10
|
+
private db;
|
|
11
|
+
private collectionName;
|
|
12
|
+
private apiVersion;
|
|
13
|
+
private graphMemory?;
|
|
14
|
+
private enableGraph;
|
|
15
|
+
telemetryId: string;
|
|
16
|
+
constructor(config?: Partial<MemoryConfig>);
|
|
17
|
+
private _initializeTelemetry;
|
|
18
|
+
private _getTelemetryId;
|
|
19
|
+
private _captureEvent;
|
|
20
|
+
private $t;
|
|
21
|
+
private addToVectorStore;
|
|
22
|
+
static fromConfig(configDict: Record<string, any>): MemoriesLite;
|
|
23
|
+
getVectorStore(userId: string): Promise<VectorStore>;
|
|
24
|
+
capture(messages: string | Message[], userId: string, config: AddMemoryOptions): Promise<SearchResult>;
|
|
25
|
+
get(memoryId: string, userId: string): Promise<MemoryItem | null>;
|
|
26
|
+
retrieve(query: string, userId: string, config: SearchMemoryOptions): Promise<SearchResult>;
|
|
27
|
+
update(memoryId: string, data: string, userId: string): Promise<{
|
|
28
|
+
message: string;
|
|
29
|
+
}>;
|
|
30
|
+
delete(memoryId: string, userId: string): Promise<{
|
|
31
|
+
message: string;
|
|
32
|
+
}>;
|
|
33
|
+
deleteAll(userId: string, config: DeleteAllMemoryOptions): Promise<{
|
|
34
|
+
message: string;
|
|
35
|
+
}>;
|
|
36
|
+
history(memoryId: string): Promise<any[]>;
|
|
37
|
+
reset(userId: string): Promise<void>;
|
|
38
|
+
getAll(userId: string, config: GetAllMemoryOptions): Promise<SearchResult>;
|
|
39
|
+
private createMemory;
|
|
40
|
+
private updateMemory;
|
|
41
|
+
private deleteMemory;
|
|
42
|
+
}
|