@ai.ntellect/core 0.5.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/.mocharc.json +1 -1
  2. package/README.md +311 -272
  3. package/create-llm-to-select-multiple-graph copy.ts +243 -0
  4. package/create-llm-to-select-multiple-graph.ts +148 -0
  5. package/dist/graph/controller.js +63 -0
  6. package/dist/graph/engine.js +563 -0
  7. package/dist/index.js +6 -6
  8. package/dist/memory/adapters/meilisearch/index.js +249 -0
  9. package/dist/memory/adapters/redis/index.js +96 -0
  10. package/dist/memory/index.js +9 -0
  11. package/dist/services/agenda.js +115 -0
  12. package/dist/services/embedding.js +40 -0
  13. package/dist/services/queue.js +99 -103
  14. package/dist/test/graph/controller.test.js +170 -0
  15. package/dist/test/graph/engine.test.js +465 -0
  16. package/dist/test/memory/adapters/meilisearch.test.js +250 -0
  17. package/dist/test/memory/adapters/redis.test.js +143 -0
  18. package/dist/test/memory/base.test.js +209 -0
  19. package/dist/test/services/agenda.test.js +230 -0
  20. package/dist/test/services/queue.test.js +258 -0
  21. package/dist/types/index.js +2 -0
  22. package/dist/utils/generate-object.js +32 -11
  23. package/dist/utils/inject-actions.js +2 -2
  24. package/dist/utils/queue-item-transformer.js +2 -2
  25. package/dist/utils/state-manager.js +20 -0
  26. package/graph/controller.ts +64 -0
  27. package/graph/engine.ts +790 -0
  28. package/index copy.ts +81 -0
  29. package/index.ts +7 -7
  30. package/interfaces/index.ts +119 -0
  31. package/memory/adapters/meilisearch/index.ts +286 -0
  32. package/memory/adapters/redis/index.ts +103 -0
  33. package/memory/index.ts +22 -0
  34. package/package.json +7 -2
  35. package/services/agenda.ts +48 -43
  36. package/services/embedding.ts +26 -0
  37. package/services/queue.ts +2 -29
  38. package/test/.env.test +4 -0
  39. package/test/graph/controller.test.ts +186 -0
  40. package/test/graph/engine.test.ts +546 -0
  41. package/test/memory/adapters/meilisearch.test.ts +297 -0
  42. package/test/memory/adapters/redis.test.ts +160 -0
  43. package/test/memory/base.test.ts +229 -0
  44. package/test/services/agenda.test.ts +280 -0
  45. package/test/services/queue.test.ts +286 -44
  46. package/tsconfig.json +10 -10
  47. package/types/index.ts +278 -0
  48. package/utils/queue-item-transformer.ts +8 -11
  49. package/utils/setup-graphs.ts +45 -0
  50. package/utils/stringifiy-zod-schema.ts +45 -0
  51. package/.nvmrc +0 -1
  52. package/README.FR.md +0 -916
  53. package/agent/index.ts +0 -151
  54. package/agent/workflow/conditions.ts +0 -16
  55. package/agent/workflow/handlers/interpreter.handler.ts +0 -48
  56. package/agent/workflow/handlers/memory.handler.ts +0 -106
  57. package/agent/workflow/handlers/orchestrator.handler.ts +0 -23
  58. package/agent/workflow/handlers/queue.handler.ts +0 -34
  59. package/agent/workflow/handlers/scheduler.handler.ts +0 -61
  60. package/agent/workflow/index.ts +0 -62
  61. package/dist/agent/index.d.ts +0 -38
  62. package/dist/agent/index.js +0 -143
  63. package/dist/agent/tools/get-rss.d.ts +0 -16
  64. package/dist/agent/tools/get-rss.js +0 -62
  65. package/dist/bull.d.ts +0 -1
  66. package/dist/bull.js +0 -9
  67. package/dist/examples/index.d.ts +0 -2
  68. package/dist/examples/index.js +0 -89
  69. package/dist/index.d.ts +0 -7
  70. package/dist/llm/interpreter/context.d.ts +0 -15
  71. package/dist/llm/interpreter/context.js +0 -89
  72. package/dist/llm/interpreter/index.d.ts +0 -21
  73. package/dist/llm/interpreter/index.js +0 -87
  74. package/dist/llm/memory-manager/context.d.ts +0 -2
  75. package/dist/llm/memory-manager/context.js +0 -22
  76. package/dist/llm/memory-manager/index.d.ts +0 -17
  77. package/dist/llm/memory-manager/index.js +0 -107
  78. package/dist/llm/orchestrator/context.d.ts +0 -2
  79. package/dist/llm/orchestrator/context.js +0 -23
  80. package/dist/llm/orchestrator/index.d.ts +0 -44
  81. package/dist/llm/orchestrator/index.js +0 -139
  82. package/dist/llm/orchestrator/types.d.ts +0 -12
  83. package/dist/memory/cache.d.ts +0 -22
  84. package/dist/memory/cache.js +0 -165
  85. package/dist/memory/persistent.d.ts +0 -57
  86. package/dist/memory/persistent.js +0 -189
  87. package/dist/services/queue.d.ts +0 -13
  88. package/dist/services/redis-cache.d.ts +0 -37
  89. package/dist/services/redis-cache.js +0 -93
  90. package/dist/services/scheduler.d.ts +0 -40
  91. package/dist/services/scheduler.js +0 -99
  92. package/dist/services/telegram-monitor.d.ts +0 -0
  93. package/dist/services/telegram-monitor.js +0 -118
  94. package/dist/t.d.ts +0 -46
  95. package/dist/t.js +0 -102
  96. package/dist/test.d.ts +0 -0
  97. package/dist/test.js +0 -438
  98. package/dist/types.d.ts +0 -258
  99. package/dist/types.js +0 -22
  100. package/dist/utils/generate-object.d.ts +0 -12
  101. package/dist/utils/header-builder.d.ts +0 -11
  102. package/dist/utils/inject-actions.d.ts +0 -2
  103. package/dist/utils/queue-item-transformer.d.ts +0 -7
  104. package/dist/utils/sanitize-results.d.ts +0 -17
  105. package/dist/utils/schema-generator.d.ts +0 -16
  106. package/examples/actions/get-rss.ts +0 -71
  107. package/examples/index.ts +0 -98
  108. package/index.html +0 -42
  109. package/llm/dynamic-condition/example.ts +0 -36
  110. package/llm/dynamic-condition/index.ts +0 -108
  111. package/llm/interpreter/context.ts +0 -94
  112. package/llm/interpreter/index.ts +0 -140
  113. package/llm/memory-manager/context.ts +0 -19
  114. package/llm/memory-manager/index.ts +0 -115
  115. package/llm/orchestrator/context.ts +0 -19
  116. package/llm/orchestrator/index.ts +0 -192
  117. package/llm/orchestrator/types.ts +0 -14
  118. package/memory/cache.ts +0 -221
  119. package/memory/persistent.ts +0 -265
  120. package/script.js +0 -167
  121. package/services/cache.ts +0 -298
  122. package/services/telegram-monitor.ts +0 -138
  123. package/services/workflow.ts +0 -491
  124. package/t.py +0 -79
  125. package/t.ts +0 -25
  126. package/test/llm/orchestrator.test.ts +0 -47
  127. package/test/llm/synthesizer.test.ts +0 -31
  128. package/types.ts +0 -367
  129. package/utils/schema-generator.ts +0 -73
  130. package/utils/state-manager.ts +0 -25
  131. /package/dist/{llm/orchestrator/types.js → interfaces/index.js} +0 -0
@@ -1,19 +0,0 @@
1
- import { Character } from "../interpreter/context";
2
-
3
- export const memoryManagerInstructions: Character = {
4
- role: "You are the memory curator. Your role is to extract relevant memories from interactions.",
5
- language: "same_as_request",
6
- guidelines: {
7
- important: [
8
- "Generate query for requested data as the user could ask for it later (Eg: 'What is the price of Bitcoin today?')s",
9
- "Short-term memories need to be necessary and reusable",
10
- "Only store as long-term: User information, User preferences, Important facts that don't change often, Historical milestones",
11
- "Set appropriate TTL based on data volatility",
12
- ],
13
- warnings: [
14
- "Never store data that is not provided by the results",
15
- "Never store data that is not relevant to the user request",
16
- ],
17
- },
18
- examplesMessages: [],
19
- };
@@ -1,115 +0,0 @@
1
- import { LanguageModelV1 } from "ai";
2
- import { z } from "zod";
3
- import { CacheMemory } from "../../memory/cache";
4
- import { PersistentMemory } from "../../memory/persistent";
5
- import { MyContext, SharedState } from "../../types";
6
- import { generateObject } from "../../utils/generate-object";
7
- import { LLMHeaderBuilder } from "../../utils/header-builder";
8
- import { memoryManagerInstructions } from "./context";
9
-
10
- interface MemoryResponse {
11
- memories: Array<{
12
- data: string;
13
- type: "short-term" | "long-term";
14
- category:
15
- | "user_information"
16
- | "user_preference"
17
- | "task"
18
- | "current_goal"
19
- | "news"
20
- | "fact"
21
- | "other";
22
- queryForMemory: string;
23
- tags: string[];
24
- ttl: number;
25
- }>;
26
- }
27
- export class MemoryManager {
28
- private readonly model: LanguageModelV1;
29
- public readonly memory?: {
30
- cache?: CacheMemory;
31
- persistent?: PersistentMemory;
32
- };
33
-
34
- constructor(config: {
35
- model: LanguageModelV1;
36
- memory?: {
37
- cache?: CacheMemory;
38
- persistent?: PersistentMemory;
39
- };
40
- }) {
41
- this.model = config.model;
42
- this.memory = config.memory;
43
- }
44
-
45
- buildContext() {
46
- const context = LLMHeaderBuilder.create()
47
- .addHeader("ROLE", memoryManagerInstructions.role)
48
- .addHeader("LANGUAGE", memoryManagerInstructions.language)
49
- .addHeader("IMPORTANT", memoryManagerInstructions.guidelines.important)
50
- .addHeader("WARNINGS", memoryManagerInstructions.guidelines.warnings);
51
- return context.toString();
52
- }
53
-
54
- async process(
55
- state: SharedState<MyContext>,
56
- callbacks?: {
57
- onMemoriesGenerated?: (event: any) => void;
58
- }
59
- ) {
60
- const context = this.buildContext();
61
- let prompt = LLMHeaderBuilder.create();
62
- if (state.messages) {
63
- prompt.addHeader(
64
- "REQUEST",
65
- state.messages[state.messages.length - 2].content.toString()
66
- );
67
- }
68
- if (state.messages && state.messages.length > 0) {
69
- prompt.addHeader("RECENT_MESSAGES", JSON.stringify(state.messages));
70
- }
71
-
72
- if (state.context.actions) {
73
- prompt.addHeader(
74
- "PREVIOUS_ACTIONS",
75
- JSON.stringify(state.context.actions)
76
- );
77
- }
78
-
79
- const memories = await generateObject<MemoryResponse>({
80
- model: this.model,
81
- schema: z.object({
82
- memories: z.array(
83
- z.object({
84
- data: z.string(),
85
- type: z.enum(["short-term", "long-term"]),
86
- category: z.enum([
87
- "user_information",
88
- "user_preference",
89
- "task",
90
- "current_goal",
91
- "news",
92
- "fact",
93
- "other",
94
- ]),
95
- queryForData: z.string(),
96
- tags: z.array(z.string()),
97
- ttl: z.number(),
98
- })
99
- ),
100
- }),
101
- system: context.toString(),
102
- temperature: 1,
103
- prompt: prompt.toString(),
104
- });
105
-
106
- if (!this.memory) {
107
- return;
108
- }
109
-
110
- if (callbacks?.onMemoriesGenerated)
111
- callbacks.onMemoriesGenerated(memories.object);
112
-
113
- return memories.object;
114
- }
115
- }
@@ -1,19 +0,0 @@
1
- import { Character } from "../interpreter/context";
2
-
3
- export const orchestratorInstructions: Character = {
4
- role: "Your role is to evaluate the current state and determine next actions.",
5
- language: "same_as_request",
6
- guidelines: {
7
- important: [
8
- "If no actions are needed, just answer",
9
- "If required, you can schedule actions in cron expression to be executed later",
10
- "If required, you choose one interpreter to interpret the results when you have a complete picture of the goal",
11
- ],
12
- warnings: [
13
- "Never use a tool if it's not related to the user request",
14
- "Never schedule actions that are not related to the user request",
15
- "Never repeat the same action if it's not required to achieve the goal",
16
- "Never repeat scheduled actions if not required to achieve the goal",
17
- ],
18
- },
19
- };
@@ -1,192 +0,0 @@
1
- import { generateObject, LanguageModelV1 } from "ai";
2
- import { z } from "zod";
3
- import { CacheMemory } from "../../memory/cache";
4
- import { PersistentMemory } from "../../memory/persistent";
5
- import { ActionSchema, MemoryScope, MyContext, SharedState } from "../../types";
6
- import { LLMHeaderBuilder } from "../../utils/header-builder";
7
- import { injectActions } from "../../utils/inject-actions";
8
- import { Interpreter } from "../interpreter";
9
- import { orchestratorInstructions } from "./context";
10
-
11
- export class Orchestrator {
12
- private readonly model: LanguageModelV1;
13
- private readonly tools: ActionSchema[];
14
- private readonly interpreters: Interpreter[];
15
- private memory?: {
16
- persistent?: PersistentMemory;
17
- cache?: CacheMemory;
18
- };
19
-
20
- constructor(
21
- model: LanguageModelV1,
22
- tools: ActionSchema[],
23
- interpreters: Interpreter[],
24
- memory?: {
25
- persistent?: PersistentMemory;
26
- cache?: CacheMemory;
27
- }
28
- ) {
29
- this.model = model;
30
- this.tools = tools;
31
- this.interpreters = interpreters;
32
- this.memory = memory;
33
- }
34
-
35
- private async buildContext(state: SharedState<MyContext>): Promise<string> {
36
- console.log("🧠 Building context with RAG and CAG...");
37
- const context = LLMHeaderBuilder.create();
38
-
39
- // Add orchestrator instructions
40
- context.addHeader("ROLE", orchestratorInstructions.role);
41
- context.addHeader("LANGUAGE", orchestratorInstructions.language);
42
- context.addHeader(
43
- "IMPORTANT",
44
- orchestratorInstructions.guidelines.important
45
- );
46
- context.addHeader("WARNINGS", orchestratorInstructions.guidelines.warnings);
47
- // Add tools to context
48
- context.addHeader("TOOLS", injectActions(this.tools));
49
-
50
- // Get recent similar actions (CAG)
51
- if (this.memory?.cache && state.messages) {
52
- const cacheMemories = await this.memory.cache.findSimilarActions(
53
- state.messages[state.messages.length - 1].content.toString(),
54
- {
55
- similarityThreshold: 80,
56
- maxResults: 3,
57
- scope: MemoryScope.GLOBAL,
58
- }
59
- );
60
-
61
- if (cacheMemories.length > 0) {
62
- context.addHeader("RECENT_ACTIONS", JSON.stringify(cacheMemories));
63
- }
64
- }
65
-
66
- // Get relevant knowledge (RAG)
67
- if (this.memory?.persistent && state.messages) {
68
- const persistentMemory =
69
- await this.memory.persistent.findRelevantDocuments(
70
- state.messages[state.messages.length - 1].content.toString(),
71
- {
72
- similarityThreshold: 80,
73
- }
74
- );
75
-
76
- if (persistentMemory.length > 0) {
77
- context.addHeader(
78
- "RELEVANT_KNOWLEDGE",
79
- JSON.stringify(persistentMemory)
80
- );
81
- }
82
- }
83
-
84
- // Add available interpreters
85
- context.addHeader(
86
- "INTERPRETERS (choose one)",
87
- JSON.stringify(this.interpreters.map((i) => i.name))
88
- .replace("[", "")
89
- .replace("]", "")
90
- );
91
- return context.toString();
92
- }
93
-
94
- async process(
95
- state: SharedState<MyContext>,
96
- callbacks?: {
97
- onStart?: () => void;
98
- onFinish?: (event: any) => void;
99
- }
100
- ): Promise<{
101
- processing: {
102
- stop: boolean;
103
- stopReason?: string;
104
- };
105
- actions: Array<{
106
- name: string;
107
- parameters: Array<{
108
- name: string;
109
- value: any;
110
- }>;
111
- scheduler?: {
112
- isScheduled: boolean;
113
- cronExpression?: string;
114
- reason?: string;
115
- };
116
- }>;
117
- response: string;
118
- interpreter?: string | null;
119
- results?: string;
120
- }> {
121
- if (callbacks?.onStart) callbacks.onStart();
122
-
123
- const context = await this.buildContext(state);
124
- let prompt = LLMHeaderBuilder.create();
125
- if (state.messages) {
126
- prompt.addHeader(
127
- "REQUEST",
128
- state.messages[state.messages.length - 1].content.toString()
129
- );
130
-
131
- if (state.messages.length > 1) {
132
- prompt.addHeader("RECENT_MESSAGES", JSON.stringify(state.messages));
133
- }
134
- }
135
- if (state.context.results) {
136
- prompt.addHeader("ACTIONS_DONE", JSON.stringify(state.context.results));
137
- }
138
-
139
- console.log("\n🧠 Generating response from Orchestrator...");
140
- const response = await generateObject({
141
- model: this.model,
142
- schema: z.object({
143
- processing: z.object({
144
- stop: z.boolean(),
145
- reason: z.string(),
146
- }),
147
- actions: z.array(
148
- z.object({
149
- name: z.string(),
150
- parameters: z.array(
151
- z.object({
152
- name: z.string(),
153
- value: z.any(),
154
- })
155
- ),
156
- scheduler: z.object({
157
- isScheduled: z.boolean(),
158
- cronExpression: z.string(),
159
- reason: z.string(),
160
- }),
161
- })
162
- ),
163
- response: z.string(),
164
- interpreter: z.string().or(z.null()),
165
- }),
166
- system: context.toString(),
167
- temperature: 0,
168
- prompt: prompt.toString(),
169
- });
170
- console.log("šŸ”„ Orchestrator response:");
171
- console.dir(response.object, { depth: null });
172
-
173
- // Force shouldContinue to false if no actions are planned
174
- if (response.object.actions.length === 0) {
175
- response.object.processing.stop = true;
176
- console.log("āš ļø No actions planned, forcing isProcessing to false");
177
- }
178
-
179
- // Handle social interactions and actions in a single block
180
- if (response.object.response) {
181
- console.log("\nšŸ’¬ Processing social response");
182
- if (response.object.response) {
183
- console.log("šŸ“¢ Response:", response.object.response);
184
- // Ensure all parameters have a value property
185
- }
186
- }
187
-
188
- if (callbacks?.onFinish) callbacks.onFinish(response.object);
189
-
190
- return response.object as any;
191
- }
192
- }
@@ -1,14 +0,0 @@
1
- import { CoreMessage } from "ai";
2
- import { QueueResult } from "../../types";
3
-
4
- export interface State {
5
- currentContext: string;
6
- previousActions: (string | QueueResult)[];
7
- results?: string;
8
- recentMessages: CoreMessage[];
9
- }
10
-
11
- export interface Action {
12
- name: string;
13
- parameters: Record<string, any>;
14
- }
package/memory/cache.ts DELETED
@@ -1,221 +0,0 @@
1
- import { openai } from "@ai-sdk/openai";
2
- import { cosineSimilarity, embed, EmbeddingModel } from "ai";
3
- import { createClient } from "redis";
4
- import {
5
- CacheMemoryOptions,
6
- CacheMemoryType,
7
- CreateMemoryInput,
8
- MatchOptions,
9
- MemoryScope,
10
- } from "../types";
11
-
12
- export class CacheMemory {
13
- private redis;
14
- private readonly CACHE_PREFIX: string;
15
- private readonly CACHE_TTL: number;
16
- private readonly embeddingModel: EmbeddingModel<string>;
17
-
18
- constructor(options: CacheMemoryOptions) {
19
- this.embeddingModel = options.embeddingModel;
20
- const ttlInHours = options.cacheTTL ?? 1;
21
- this.CACHE_TTL = ttlInHours * 60 * 60;
22
- this.CACHE_PREFIX = options.cachePrefix ?? "memory:";
23
-
24
- this.redis = createClient({
25
- url: options.redisUrl || process.env.REDIS_URL,
26
- socket: {
27
- tls: true,
28
- rejectUnauthorized: true,
29
- },
30
- });
31
- this.initRedis();
32
- }
33
-
34
- private async initRedis() {
35
- this.redis.on("error", (err) => {
36
- console.error("āŒ Redis Client Error:", err);
37
- });
38
-
39
- try {
40
- await this.redis.connect();
41
- console.log("āœ… Successfully connected to Redis");
42
- } catch (error) {
43
- console.error("āŒ Failed to connect to Redis:", error);
44
- }
45
- }
46
-
47
- private async storeMemory(memory: CacheMemoryType, ttl?: number) {
48
- const prefix = this.CACHE_PREFIX;
49
- const key = `${prefix}${memory.id}`;
50
- const result = await this.redis.set(key, JSON.stringify(memory), {
51
- EX: ttl || this.CACHE_TTL,
52
- });
53
- console.log("šŸ’¾ Cache memory created:", result);
54
- }
55
-
56
- async findSimilarActions(
57
- query: string,
58
- options: MatchOptions & { userId?: string; scope?: MemoryScope } = {}
59
- ): Promise<
60
- {
61
- data: any;
62
- query: string;
63
- createdAt: Date;
64
- }[]
65
- > {
66
- console.log("\nšŸ” Searching in cache");
67
- console.log("Query:", query);
68
- console.log("Options:", JSON.stringify(options, null, 2));
69
-
70
- const { embedding } = await embed({
71
- model: openai.embedding("text-embedding-3-small"),
72
- value: query,
73
- });
74
-
75
- const memories = await this.getAllMemories();
76
- console.log(`\nšŸ“š Found ${memories.length} cached queries to compare`);
77
-
78
- const matches = memories
79
- .map((memory) => {
80
- const similarity = cosineSimilarity(embedding, memory.embedding);
81
- const similarityPercentage = (similarity + 1) * 50;
82
- return {
83
- data: memory.data,
84
- query: memory.query,
85
- similarityPercentage,
86
- createdAt: memory.createdAt,
87
- };
88
- })
89
- .filter(
90
- (match) =>
91
- match.similarityPercentage >= (options.similarityThreshold ?? 70)
92
- )
93
- .sort((a, b) => b.similarityPercentage - a.similarityPercentage);
94
-
95
- const results = options.maxResults
96
- ? matches.slice(0, options.maxResults)
97
- : matches;
98
-
99
- if (results.length > 0) {
100
- console.log("\n✨ Similar queries found:");
101
- console.log("─".repeat(50));
102
-
103
- results.forEach((match, index) => {
104
- console.log(`\n${index + 1}. Match Details:`);
105
- console.log(` Query: ${match.query}`);
106
- console.log(` Data: ${JSON.stringify(match.data)}`);
107
- console.log(` Similarity: ${match.similarityPercentage.toFixed(2)}%`);
108
- console.log("─".repeat(50));
109
- });
110
- } else {
111
- console.log("\nāŒ No similar queries found in cache");
112
- }
113
-
114
- return results.map((match) => {
115
- return {
116
- data: match.data,
117
- query: match.query,
118
- createdAt: match.createdAt,
119
- };
120
- });
121
- }
122
-
123
- async getAllMemories(): Promise<CacheMemoryType[]> {
124
- const keys = await this.redis.keys(`${this.CACHE_PREFIX}*`);
125
- const memories = await this.getMemoriesFromKeys(keys);
126
-
127
- return memories;
128
- }
129
-
130
- private async getMemoriesFromKeys(
131
- keys: string[]
132
- ): Promise<CacheMemoryType[]> {
133
- const memories: CacheMemoryType[] = [];
134
- for (const key of keys) {
135
- const data = await this.redis.get(key);
136
- if (data) {
137
- memories.push(JSON.parse(data));
138
- }
139
- }
140
- return memories;
141
- }
142
-
143
- public async createMemory(
144
- input: CreateMemoryInput
145
- ): Promise<CacheMemoryType | undefined> {
146
- console.log("\nšŸ“ Processing new memory creation");
147
- console.log("Content:", input.query);
148
- console.log("TTL:", input.ttl ? `${input.ttl} seconds` : "default");
149
-
150
- const existingPattern = await this.findSimilarActions(input.query, {
151
- similarityThreshold: 95,
152
- userId: input.userId,
153
- scope: input.scope,
154
- });
155
-
156
- if (existingPattern.length > 0) {
157
- console.log("\nšŸ”„ Similar cache memory already exists");
158
- console.log("─".repeat(50));
159
- existingPattern.forEach((match, index) => {
160
- console.log(`\n${index + 1}. Existing Match:`);
161
- console.log(` Query: ${match.query}`);
162
- console.log(` Data: ${JSON.stringify(match.data)}`);
163
- console.log(` Created At: ${match.createdAt}`);
164
- });
165
- console.log("\nā­ļø Skipping creation of new memory");
166
- return;
167
- }
168
-
169
- console.log("\nšŸ†• No similar memory found - creating new one");
170
-
171
- const memory = await this.createSingleMemory({
172
- id: crypto.randomUUID(),
173
- query: input.query,
174
- data: input.data,
175
- userId: input.userId,
176
- scope: input.scope,
177
- ttl: input.ttl,
178
- });
179
-
180
- return memory;
181
- }
182
-
183
- private async createSingleMemory(params: {
184
- id: string;
185
- query: string;
186
- data: any;
187
- userId?: string;
188
- scope?: MemoryScope;
189
- ttl?: number;
190
- }): Promise<CacheMemoryType> {
191
- console.log("\nšŸ—ļø Creating new cache memory");
192
- console.log("ID:", params.id);
193
- console.log("Content:", params.query);
194
-
195
- console.log("\n🧮 Generating embedding...");
196
- const { embedding } = await embed({
197
- model: this.embeddingModel,
198
- value: params.query,
199
- });
200
- console.log("āœ… Embedding generated successfully");
201
-
202
- const memory: CacheMemoryType = {
203
- id: params.id,
204
- data: params.data,
205
- query: params.query,
206
- embedding,
207
- userId: params.userId,
208
- scope:
209
- params.scope || (params.userId ? MemoryScope.USER : MemoryScope.GLOBAL),
210
- createdAt: new Date(),
211
- };
212
-
213
- await this.storeMemory(memory, params.ttl);
214
- console.log("āœ… Short-term memory created and stored successfully", {
215
- ...memory,
216
- ttl: params.ttl || this.CACHE_TTL,
217
- });
218
-
219
- return memory;
220
- }
221
- }