memories-lite 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/MEMORIES.md +39 -0
  2. package/README.md +221 -0
  3. package/TECHNICAL.md +135 -0
  4. package/dist/config/defaults.d.ts +2 -0
  5. package/dist/config/defaults.js +61 -0
  6. package/dist/config/manager.d.ts +4 -0
  7. package/dist/config/manager.js +121 -0
  8. package/dist/embeddings/base.d.ts +4 -0
  9. package/dist/embeddings/base.js +2 -0
  10. package/dist/embeddings/google.d.ts +10 -0
  11. package/dist/embeddings/google.js +28 -0
  12. package/dist/embeddings/openai.d.ts +10 -0
  13. package/dist/embeddings/openai.js +31 -0
  14. package/dist/graphs/configs.d.ts +14 -0
  15. package/dist/graphs/configs.js +19 -0
  16. package/dist/graphs/tools.d.ts +271 -0
  17. package/dist/graphs/tools.js +220 -0
  18. package/dist/graphs/utils.d.ts +9 -0
  19. package/dist/graphs/utils.js +105 -0
  20. package/dist/index.d.ts +14 -0
  21. package/dist/index.js +30 -0
  22. package/dist/llms/base.d.ts +16 -0
  23. package/dist/llms/base.js +2 -0
  24. package/dist/llms/google.d.ts +11 -0
  25. package/dist/llms/google.js +44 -0
  26. package/dist/llms/openai.d.ts +9 -0
  27. package/dist/llms/openai.js +73 -0
  28. package/dist/llms/openai_structured.d.ts +11 -0
  29. package/dist/llms/openai_structured.js +72 -0
  30. package/dist/memory/index.d.ts +42 -0
  31. package/dist/memory/index.js +499 -0
  32. package/dist/memory/memory.types.d.ts +23 -0
  33. package/dist/memory/memory.types.js +2 -0
  34. package/dist/prompts/index.d.ts +102 -0
  35. package/dist/prompts/index.js +233 -0
  36. package/dist/storage/DummyHistoryManager.d.ts +7 -0
  37. package/dist/storage/DummyHistoryManager.js +19 -0
  38. package/dist/storage/MemoryHistoryManager.d.ts +8 -0
  39. package/dist/storage/MemoryHistoryManager.js +36 -0
  40. package/dist/storage/base.d.ts +6 -0
  41. package/dist/storage/base.js +2 -0
  42. package/dist/storage/index.d.ts +3 -0
  43. package/dist/storage/index.js +19 -0
  44. package/dist/types/index.d.ts +1071 -0
  45. package/dist/types/index.js +100 -0
  46. package/dist/utils/bm25.d.ts +13 -0
  47. package/dist/utils/bm25.js +51 -0
  48. package/dist/utils/factory.d.ts +13 -0
  49. package/dist/utils/factory.js +49 -0
  50. package/dist/utils/logger.d.ts +7 -0
  51. package/dist/utils/logger.js +9 -0
  52. package/dist/utils/memory.d.ts +3 -0
  53. package/dist/utils/memory.js +44 -0
  54. package/dist/utils/telemetry.d.ts +11 -0
  55. package/dist/utils/telemetry.js +74 -0
  56. package/dist/utils/telemetry.types.d.ts +27 -0
  57. package/dist/utils/telemetry.types.js +2 -0
  58. package/dist/vectorstores/base.d.ts +11 -0
  59. package/dist/vectorstores/base.js +2 -0
  60. package/dist/vectorstores/lite.d.ts +40 -0
  61. package/dist/vectorstores/lite.js +319 -0
  62. package/dist/vectorstores/llm.d.ts +31 -0
  63. package/dist/vectorstores/llm.js +88 -0
  64. package/jest.config.js +22 -0
  65. package/memories-lite.db +0 -0
  66. package/package.json +38 -0
  67. package/src/config/defaults.ts +61 -0
  68. package/src/config/manager.ts +132 -0
  69. package/src/embeddings/base.ts +4 -0
  70. package/src/embeddings/google.ts +32 -0
  71. package/src/embeddings/openai.ts +33 -0
  72. package/src/graphs/configs.ts +30 -0
  73. package/src/graphs/tools.ts +267 -0
  74. package/src/graphs/utils.ts +114 -0
  75. package/src/index.ts +14 -0
  76. package/src/llms/base.ts +20 -0
  77. package/src/llms/google.ts +56 -0
  78. package/src/llms/openai.ts +85 -0
  79. package/src/llms/openai_structured.ts +82 -0
  80. package/src/memory/index.ts +723 -0
  81. package/src/memory/memory.types.ts +27 -0
  82. package/src/prompts/index.ts +268 -0
  83. package/src/storage/DummyHistoryManager.ts +27 -0
  84. package/src/storage/MemoryHistoryManager.ts +58 -0
  85. package/src/storage/base.ts +14 -0
  86. package/src/storage/index.ts +3 -0
  87. package/src/types/index.ts +243 -0
  88. package/src/utils/bm25.ts +64 -0
  89. package/src/utils/factory.ts +59 -0
  90. package/src/utils/logger.ts +13 -0
  91. package/src/utils/memory.ts +48 -0
  92. package/src/utils/telemetry.ts +98 -0
  93. package/src/utils/telemetry.types.ts +34 -0
  94. package/src/vectorstores/base.ts +27 -0
  95. package/src/vectorstores/lite.ts +402 -0
  96. package/src/vectorstores/llm.ts +126 -0
  97. package/tests/lite.spec.ts +158 -0
  98. package/tests/memory.facts.test.ts +211 -0
  99. package/tests/memory.test.ts +406 -0
  100. package/tsconfig.json +16 -0
  101. package/tsconfig.tsbuildinfo +1 -0
@@ -0,0 +1,56 @@
1
+ import { GoogleGenAI } from "@google/genai";
2
+ import { LLM, LLMResponse } from "./base";
3
+ import { LLMConfig, Message } from "../types";
4
+
5
+ export class GoogleLLM implements LLM {
6
+ private google: GoogleGenAI;
7
+ private model: string;
8
+
9
+ constructor(config: LLMConfig) {
10
+ this.google = new GoogleGenAI({ apiKey: config.apiKey });
11
+ this.model = config.model || "gemini-2.0-flash";
12
+ }
13
+
14
+ async generateResponse(
15
+ messages: Message[],
16
+ responseFormat?: { type: string },
17
+ tools?: any[],
18
+ ): Promise<string | LLMResponse> {
19
+ const completion = await this.google.models.generateContent({
20
+ contents: messages.map((msg) => ({
21
+ parts: [
22
+ {
23
+ text:
24
+ typeof msg.content === "string"
25
+ ? msg.content
26
+ : JSON.stringify(msg.content),
27
+ },
28
+ ],
29
+ role: msg.role === "system" ? "model" : "user",
30
+ })),
31
+
32
+ model: this.model,
33
+ // config: {
34
+ // responseSchema: {}, // Add response schema if needed
35
+ // },
36
+ });
37
+
38
+ const text = completion.text
39
+ ?.replace(/^```json\n/, "")
40
+ .replace(/\n```$/, "");
41
+
42
+ return text || "";
43
+ }
44
+
45
+ async generateChat(messages: Message[]): Promise<LLMResponse> {
46
+ const completion = await this.google.models.generateContent({
47
+ contents: messages,
48
+ model: this.model,
49
+ });
50
+ const response = completion.candidates![0].content;
51
+ return {
52
+ content: response!.parts![0].text || "",
53
+ role: response!.role!,
54
+ };
55
+ }
56
+ }
@@ -0,0 +1,85 @@
1
+ import OpenAI from "openai";
2
+ import { LLM, LLMResponse } from "./base";
3
+ import { LLMConfig, Message } from "../types";
4
+ import { z } from "zod";
5
+
6
+ export class OpenAILLM implements LLM {
7
+ private openai: OpenAI;
8
+ private model: string;
9
+
10
+ constructor(config: LLMConfig) {
11
+ this.openai = new OpenAI({ apiKey: config.apiKey });
12
+ this.model = config.model || "gpt-4.1-mini";
13
+ }
14
+
15
+ async generateResponse(
16
+ messages: Message[],
17
+ responseFormat?:any,
18
+ tools?: any[],structuredOutput: boolean=false, model?:string
19
+ ): Promise<string | LLMResponse> {
20
+ const msg = messages.map((msg) => {
21
+ const role = msg.role as "system" | "user" | "assistant";
22
+ return {
23
+ role,
24
+ content:
25
+ typeof msg.content === "string"
26
+ ? msg.content
27
+ : JSON.stringify(msg.content),
28
+ };
29
+ });
30
+
31
+ // DEPRECATED: use structured ouput message with `await openai.beta.chat.completions.parse(...)`
32
+ // https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&lang=javascript
33
+ // ⚠️ SyntaxError: Unexpected non-whitespace character after JSON at position 13
34
+ const fnCall = structuredOutput ?
35
+ this.openai.beta.chat.completions.parse.bind(this.openai.beta.chat.completions) : this.openai.chat.completions.create.bind(this.openai.chat.completions);
36
+ const completion = await fnCall({
37
+ messages: msg,
38
+ model: model||this.model,
39
+ top_p:.8,temperature:.2,
40
+ ...(structuredOutput && { logit_bias: { "1734": -100 } }),
41
+ response_format: responseFormat,
42
+ ...(tools && { tools, tool_choice: "auto" }),
43
+ });
44
+
45
+ const response:any = completion.choices[0].message;
46
+
47
+ if (response.tool_calls) {
48
+ return {
49
+ content: response.content || "",
50
+ role: response.role,
51
+ toolCalls: response.tool_calls.map((call:any) => ({
52
+ name: call.function.name,
53
+ arguments: call.function.arguments,
54
+ })),
55
+ };
56
+ }
57
+ const result = structuredOutput ? response.parsed : response.content;
58
+ //const result = response.content;
59
+ // console.log("--- DBG query:", msg);
60
+ // console.log("\n\n--- DBG result:", result);
61
+
62
+ return result || "";
63
+ }
64
+
65
+ async generateChat(messages: Message[]): Promise<LLMResponse> {
66
+ const completion = await this.openai.chat.completions.create({
67
+ messages: messages.map((msg) => {
68
+ const role = msg.role as "system" | "user" | "assistant";
69
+ return {
70
+ role,
71
+ content:
72
+ typeof msg.content === "string"
73
+ ? msg.content
74
+ : JSON.stringify(msg.content),
75
+ };
76
+ }),
77
+ model: this.model,
78
+ });
79
+ const response = completion.choices[0].message;
80
+ return {
81
+ content: response.content || "",
82
+ role: response.role,
83
+ };
84
+ }
85
+ }
@@ -0,0 +1,82 @@
1
+ import OpenAI from "openai";
2
+ import { LLM, LLMResponse } from "./base";
3
+ import { LLMConfig, Message } from "../types";
4
+
5
+ export class OpenAIStructuredLLM implements LLM {
6
+ private openai: OpenAI;
7
+ private model: string;
8
+
9
+ constructor(config: LLMConfig) {
10
+ this.openai = new OpenAI({ apiKey: config.apiKey });
11
+ this.model = config.model || "gpt-4-turbo-preview";
12
+ }
13
+
14
+ async generateResponse(
15
+ messages: Message[],
16
+ responseFormat?: { type: string } | null,
17
+ tools?: any[],
18
+ ): Promise<string | LLMResponse> {
19
+ const completion = await this.openai.chat.completions.create({
20
+ messages: messages.map((msg) => ({
21
+ role: msg.role as "system" | "user" | "assistant",
22
+ content:
23
+ typeof msg.content === "string"
24
+ ? msg.content
25
+ : JSON.stringify(msg.content),
26
+ })),
27
+ model: this.model,
28
+ ...(tools
29
+ ? {
30
+ tools: tools.map((tool) => ({
31
+ type: "function",
32
+ function: {
33
+ name: tool.function.name,
34
+ description: tool.function.description,
35
+ parameters: tool.function.parameters,
36
+ },
37
+ })),
38
+ tool_choice: "auto" as const,
39
+ }
40
+ : responseFormat
41
+ ? {
42
+ response_format: {
43
+ type: responseFormat.type as "text" | "json_object",
44
+ },
45
+ }
46
+ : {}),
47
+ });
48
+
49
+ const response = completion.choices[0].message;
50
+
51
+ if (response.tool_calls) {
52
+ return {
53
+ content: response.content || "",
54
+ role: response.role,
55
+ toolCalls: response.tool_calls.map((call) => ({
56
+ name: call.function.name,
57
+ arguments: call.function.arguments,
58
+ })),
59
+ };
60
+ }
61
+
62
+ return response.content || "";
63
+ }
64
+
65
+ async generateChat(messages: Message[]): Promise<LLMResponse> {
66
+ const completion = await this.openai.chat.completions.create({
67
+ messages: messages.map((msg) => ({
68
+ role: msg.role as "system" | "user" | "assistant",
69
+ content:
70
+ typeof msg.content === "string"
71
+ ? msg.content
72
+ : JSON.stringify(msg.content),
73
+ })),
74
+ model: this.model,
75
+ });
76
+ const response = completion.choices[0].message;
77
+ return {
78
+ content: response.content || "",
79
+ role: response.role,
80
+ };
81
+ }
82
+ }