@ai.ntellect/core 0.7.14 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/dist/graph/event-manager.d.ts +9 -1
  2. package/dist/graph/event-manager.d.ts.map +1 -1
  3. package/dist/graph/event-manager.js +115 -37
  4. package/dist/graph/event-manager.js.map +1 -1
  5. package/dist/graph/index.d.ts +20 -1
  6. package/dist/graph/index.d.ts.map +1 -1
  7. package/dist/graph/index.js +59 -10
  8. package/dist/graph/index.js.map +1 -1
  9. package/dist/graph/node.d.ts +0 -24
  10. package/dist/graph/node.d.ts.map +1 -1
  11. package/dist/graph/node.js +2 -71
  12. package/dist/graph/node.js.map +1 -1
  13. package/dist/graph/observer.d.ts +6 -87
  14. package/dist/graph/observer.d.ts.map +1 -1
  15. package/dist/graph/observer.js +3 -116
  16. package/dist/graph/observer.js.map +1 -1
  17. package/dist/index.d.ts +1 -6
  18. package/dist/index.d.ts.map +1 -1
  19. package/dist/index.js +1 -6
  20. package/dist/index.js.map +1 -1
  21. package/dist/interfaces/index.d.ts +10 -1
  22. package/dist/interfaces/index.d.ts.map +1 -1
  23. package/dist/interfaces/index.js.map +1 -1
  24. package/dist/modules/agent/agent.d.ts +35 -0
  25. package/dist/modules/agent/agent.d.ts.map +1 -0
  26. package/dist/modules/agent/agent.js +106 -0
  27. package/dist/modules/agent/agent.js.map +1 -0
  28. package/dist/modules/agent/base/executor.d.ts +51 -0
  29. package/dist/modules/agent/base/executor.d.ts.map +1 -0
  30. package/dist/modules/agent/base/executor.js +66 -0
  31. package/dist/modules/agent/base/executor.js.map +1 -0
  32. package/dist/modules/agent/base/index.d.ts +30 -0
  33. package/dist/modules/agent/base/index.d.ts.map +1 -0
  34. package/dist/modules/agent/base/index.js +100 -0
  35. package/dist/modules/agent/base/index.js.map +1 -0
  36. package/dist/modules/agent/generic-assistant.d.ts +20 -0
  37. package/dist/modules/agent/generic-assistant.d.ts.map +1 -0
  38. package/dist/modules/agent/generic-assistant.js +89 -0
  39. package/dist/modules/agent/generic-assistant.js.map +1 -0
  40. package/dist/modules/agent/generic-executor.d.ts +57 -0
  41. package/dist/modules/agent/generic-executor.d.ts.map +1 -0
  42. package/dist/modules/agent/generic-executor.js +200 -0
  43. package/dist/modules/agent/generic-executor.js.map +1 -0
  44. package/dist/modules/agent/llm-factory.d.ts +19 -0
  45. package/dist/modules/agent/llm-factory.d.ts.map +1 -0
  46. package/dist/modules/agent/llm-factory.js +56 -0
  47. package/dist/modules/agent/llm-factory.js.map +1 -0
  48. package/dist/modules/agent/prompt-builder.d.ts +35 -0
  49. package/dist/modules/agent/prompt-builder.d.ts.map +1 -0
  50. package/dist/modules/agent/prompt-builder.js +76 -0
  51. package/dist/modules/agent/prompt-builder.js.map +1 -0
  52. package/dist/modules/memory/adapters/in-memory/index.d.ts.map +1 -1
  53. package/dist/modules/memory/adapters/in-memory/index.js +4 -3
  54. package/dist/modules/memory/adapters/in-memory/index.js.map +1 -1
  55. package/dist/modules/memory/adapters/meilisearch/index.d.ts.map +1 -1
  56. package/dist/modules/memory/adapters/meilisearch/index.js +7 -4
  57. package/dist/modules/memory/adapters/meilisearch/index.js.map +1 -1
  58. package/dist/modules/memory/adapters/redis/index.d.ts.map +1 -1
  59. package/dist/modules/memory/adapters/redis/index.js +2 -1
  60. package/dist/modules/memory/adapters/redis/index.js.map +1 -1
  61. package/dist/modules/nlp/engine.d.ts +126 -0
  62. package/dist/modules/nlp/engine.d.ts.map +1 -0
  63. package/dist/modules/nlp/engine.js +300 -0
  64. package/dist/modules/nlp/engine.js.map +1 -0
  65. package/dist/modules/nlp/index.d.ts +27 -0
  66. package/dist/modules/nlp/index.d.ts.map +1 -0
  67. package/dist/modules/nlp/index.js +56 -0
  68. package/dist/modules/nlp/index.js.map +1 -0
  69. package/dist/types/agent.d.ts +233 -0
  70. package/dist/types/agent.d.ts.map +1 -0
  71. package/dist/types/agent.js +29 -0
  72. package/dist/types/agent.js.map +1 -0
  73. package/dist/types/index.d.ts +72 -18
  74. package/dist/types/index.d.ts.map +1 -1
  75. package/dist/utils/generate-action-schema.d.ts.map +1 -1
  76. package/graph/event-manager.ts +135 -42
  77. package/graph/index.ts +57 -4
  78. package/graph/node.ts +2 -104
  79. package/graph/observer.ts +9 -215
  80. package/index.ts +2 -7
  81. package/interfaces/index.ts +12 -0
  82. package/modules/agent/agent.ts +108 -0
  83. package/modules/agent/base/executor.ts +100 -0
  84. package/modules/agent/base/index.ts +99 -0
  85. package/modules/agent/generic-assistant.ts +90 -0
  86. package/modules/agent/generic-executor.ts +259 -0
  87. package/modules/agent/llm-factory.ts +47 -0
  88. package/modules/agent/prompt-builder.ts +78 -0
  89. package/modules/memory/adapters/in-memory/index.ts +4 -3
  90. package/modules/memory/adapters/meilisearch/index.ts +7 -4
  91. package/modules/memory/adapters/redis/index.ts +2 -1
  92. package/modules/nlp/engine.ts +325 -0
  93. package/modules/nlp/index.ts +45 -0
  94. package/package.json +5 -2
  95. package/test/graph/index.test.ts +6 -4
  96. package/test/graph/node.test.ts +18 -14
  97. package/types/agent.ts +174 -0
  98. package/types/index.ts +74 -18
@@ -0,0 +1,90 @@
1
+ import { GraphFlow } from "../../graph/index";
2
+ import {
3
+ AgentConfig,
4
+ AgentContext,
5
+ AgentContextSchema,
6
+ } from "../../types/agent";
7
+ import { BaseAgent } from "./base";
8
+ import { GenericExecutor } from "./generic-executor";
9
+
10
+ /**
11
+ * A generic assistant that can be configured with different roles, goals, and personalities
12
+ * @class Agent
13
+ * @example
14
+ * const assistant = new Agent({
15
+ * role: "Email Assistant",
16
+ * goal: "Help users send emails efficiently",
17
+ * backstory: "I am a professional and friendly assistant who specializes in email communication",
18
+ * llmConfig: { provider: "openai", model: "gpt-4" }
19
+ * });
20
+ */
21
+ export class Agent {
22
+ private executor: GenericExecutor;
23
+ private workflow: GraphFlow<typeof AgentContextSchema>;
24
+
25
+ constructor(config: AgentConfig) {
26
+ const agent = new BaseAgent({
27
+ role: config.role,
28
+ goal: config.goal,
29
+ backstory: config.backstory,
30
+ tools: config.tools,
31
+ memory: config.memory,
32
+ llmConfig: config.llmConfig,
33
+ });
34
+
35
+ this.executor = new GenericExecutor(agent, config.tools, {
36
+ llmConfig: config.llmConfig,
37
+ verbose: config.verbose,
38
+ });
39
+
40
+ this.workflow = this.setupWorkflow();
41
+ }
42
+
43
+ private setupWorkflow(): GraphFlow<typeof AgentContextSchema> {
44
+ return new GraphFlow("assistant", {
45
+ name: "assistant",
46
+ schema: AgentContextSchema,
47
+ context: {
48
+ input: { raw: "" },
49
+ actions: [],
50
+ response: "",
51
+ executedActions: [],
52
+ },
53
+ nodes: [
54
+ {
55
+ name: "process",
56
+ execute: async (context) => {
57
+ const agentContext = context as unknown as AgentContext;
58
+ const decision = await this.executor.makeDecision(agentContext);
59
+ context.actions = decision.actions;
60
+ context.response = decision.response;
61
+ },
62
+ next: (context) => (context.actions.length > 0 ? ["execute"] : []),
63
+ },
64
+ {
65
+ name: "execute",
66
+ execute: async (context) => {
67
+ console.log(`Executing actions:`);
68
+ console.log(context.actions);
69
+
70
+ await this.executor.executeActions(
71
+ context.actions,
72
+ context as unknown as AgentContext
73
+ );
74
+ },
75
+ next: ["process"],
76
+ },
77
+ ],
78
+ });
79
+ }
80
+
81
+ public async process(input: string): Promise<AgentContext> {
82
+ await this.workflow.execute("process", undefined, {
83
+ input: { raw: input },
84
+ actions: [],
85
+ response: "",
86
+ });
87
+
88
+ return this.workflow.getContext() as unknown as AgentContext;
89
+ }
90
+ }
@@ -0,0 +1,259 @@
1
+ import chalk from "chalk";
2
+ import { z } from "zod";
3
+ import { GraphFlow } from "../../graph/index";
4
+ import {
5
+ ActionSchema,
6
+ AgentContext,
7
+ DecisionOutput,
8
+ ExecutorConfig,
9
+ } from "../../types/agent";
10
+ import { BaseAgent } from "./base";
11
+ import { AgentExecutor } from "./base/executor";
12
+ import { LLMFactory } from "./llm-factory";
13
+
14
+ /**
15
+ * Generic executor that handles the interaction between the agent and LLM
16
+ * Uses a structured prompt format:
17
+ * - ROLE: The function/job the agent performs
18
+ * - GOAL: The specific objective to achieve
19
+ * - BACKSTORY: The personality and behavior traits
20
+ * - CONTEXT: Current knowledge and state
21
+ * - AVAILABLE ACTIONS: What the agent can do
22
+ * - INSTRUCTIONS: How to process and respond
23
+ * @class GenericExecutor
24
+ * @extends {AgentExecutor}
25
+ */
26
+ export class GenericExecutor extends AgentExecutor {
27
+ private verbose: boolean;
28
+ private llm: ReturnType<typeof LLMFactory.createLLM>;
29
+
30
+ /**
31
+ * Creates an instance of GenericExecutor
32
+ * @param {BaseAgent} agent - The agent instance this executor is tied to
33
+ * @param {GraphFlow<any>[]} graphs - Array of available graph flows
34
+ * @param {ExecutorConfig} config - Configuration for the executor
35
+ */
36
+ constructor(
37
+ agent: BaseAgent,
38
+ graphs: GraphFlow<any>[],
39
+ config: ExecutorConfig
40
+ ) {
41
+ super(agent, graphs);
42
+ this.verbose = config.verbose ?? true;
43
+ this.llm = LLMFactory.createLLM(config.llmConfig);
44
+ }
45
+
46
+ /**
47
+ * Logs a message with a specific type if verbose mode is enabled
48
+ * @private
49
+ * @param {"info" | "success" | "warning" | "error" | "thinking"} type - The type of log message
50
+ * @param {string} message - The message to log
51
+ */
52
+ private log(
53
+ type: "info" | "success" | "warning" | "error" | "thinking",
54
+ message: string
55
+ ) {
56
+ if (!this.verbose) return;
57
+
58
+ const prefix = {
59
+ info: chalk.blue("ℹ"),
60
+ success: chalk.green("✓"),
61
+ warning: chalk.yellow("⚠"),
62
+ error: chalk.red("✖"),
63
+ thinking: chalk.magenta("🤔"),
64
+ }[type];
65
+
66
+ console.log(`${prefix} ${message}`);
67
+ }
68
+
69
+ /**
70
+ * Generates a string representation of the available action schemas
71
+ * @private
72
+ * @returns {string} Formatted string containing all available actions and their parameters
73
+ */
74
+ protected generateActionSchema(): string {
75
+ return Array.from(this.availableGraphs.values())
76
+ .map((graph) => {
77
+ const schema = graph.getSchema();
78
+ const schemaDescription = Object.entries(schema.shape)
79
+ .map(([key, value]) => {
80
+ const zodValue = value as z.ZodTypeAny;
81
+ return ` - ${key}: ${
82
+ zodValue.description || zodValue._def.typeName
83
+ }`;
84
+ })
85
+ .join("\n");
86
+
87
+ return `${graph.name}:
88
+ Parameters:
89
+ ${schemaDescription}
90
+ Available Operations:
91
+ ${graph
92
+ .getNodes()
93
+ .map((n) => ` - ${n.name}`)
94
+ .join("\n")}`;
95
+ })
96
+ .join("\n\n");
97
+ }
98
+
99
+ /**
100
+ * Makes a decision based on the current context using the LLM
101
+ * @param {AgentContext} context - The context to base the decision on
102
+ * @returns {Promise<DecisionOutput>} The decision output containing actions and response
103
+ */
104
+ async makeDecision(context: AgentContext): Promise<DecisionOutput> {
105
+ this.log(
106
+ "thinking",
107
+ chalk.dim("Analyzing context and available actions...")
108
+ );
109
+
110
+ const memories = await this.agent.recall(context.input.raw);
111
+ if (memories.length > 0) {
112
+ this.log("info", chalk.dim("Retrieved relevant memories:"));
113
+ memories.forEach((m) => this.log("info", chalk.dim(`- ${m.content}`)));
114
+
115
+ context.knowledge =
116
+ (context.knowledge || "") +
117
+ "\n" +
118
+ memories.map((m) => m.content).join("\n");
119
+ }
120
+
121
+ const systemPrompt = `
122
+ ## ROLE
123
+ ${this.agent.getRole()}
124
+
125
+ ## GOAL
126
+ ${this.agent.getGoal()}
127
+
128
+ ## BACKSTORY
129
+ ${this.agent.getBackstory()}
130
+
131
+ ## RECENT ACTIONS
132
+ ${context.knowledge ? `${context.knowledge}\n` : "None"}
133
+
134
+ ## AVAILABLE ACTIONS
135
+ ${this.generateActionSchema()}
136
+
137
+ ## INSTRUCTIONS
138
+ - Analyze the user input and what you have done (if no action is needed, just return an empty array)
139
+ - Choose appropriate actions based on their parameters
140
+ - Structure parameters according to the action's schema
141
+ - Look at the goal and the actions you have done, if you have achieved the goal, STOP
142
+ `;
143
+
144
+ this.log("info", chalk.dim("Generating response..."));
145
+
146
+ const result = await this.llm.generate(
147
+ {
148
+ system: systemPrompt,
149
+ user: `User input: ${context.input.raw}
150
+ Actions you have already done: ${
151
+ context.executedActions
152
+ ?.map((a) => `\n- ${a.name} => ${JSON.stringify(a.result)}`)
153
+ .join("") || "None"
154
+ }`,
155
+ },
156
+ z.object({
157
+ actions: z.array(
158
+ z.object({
159
+ name: z.string(),
160
+ parameters: z.array(
161
+ z.object({
162
+ name: z.string(),
163
+ value: z.any(),
164
+ })
165
+ ),
166
+ })
167
+ ),
168
+ response: z.string(),
169
+ })
170
+ );
171
+
172
+ if (result.object.actions.length > 0) {
173
+ this.log("success", chalk.green("Decided to take actions:"));
174
+ result.object.actions.forEach(
175
+ (action: {
176
+ name: string;
177
+ parameters: Array<{ name: string; value: any }>;
178
+ }) => {
179
+ this.log("info", chalk.cyan(`Action: ${action.name}`));
180
+ action.parameters.forEach((param: { name: string; value: any }) => {
181
+ this.log(
182
+ "info",
183
+ chalk.dim(` - ${param.name}: ${JSON.stringify(param.value)}`)
184
+ );
185
+ });
186
+ }
187
+ );
188
+ } else {
189
+ this.log("info", chalk.yellow("No actions needed"));
190
+ }
191
+
192
+ this.log("success", chalk.green(`Response: ${result.object.response}`));
193
+
194
+ return {
195
+ actions: result.object.actions as unknown as ActionSchema[],
196
+ response: result.object.response,
197
+ };
198
+ }
199
+
200
+ /**
201
+ * Executes multiple workflows with their respective inputs
202
+ * @protected
203
+ * @param {GraphFlow<any>[]} workflows - Array of workflows to execute
204
+ * @param {string[]} startNodes - Array of starting node names for each workflow
205
+ * @param {any[]} inputs - Array of inputs for each workflow
206
+ * @param {AgentContext} context - The context in which to execute the workflows
207
+ * @returns {Promise<void>}
208
+ */
209
+ protected async executeWorkflows(
210
+ workflows: GraphFlow<any>[],
211
+ startNodes: string[],
212
+ inputs: any[],
213
+ context: AgentContext
214
+ ): Promise<void> {
215
+ this.log("info", chalk.cyan("Executing workflows:"));
216
+
217
+ for (let i = 0; i < workflows.length; i++) {
218
+ const workflow = workflows[i];
219
+ const startNode = startNodes[i];
220
+ const input = inputs[i];
221
+
222
+ this.log(
223
+ "info",
224
+ chalk.dim(
225
+ `Executing workflow ${workflow.name} starting at node ${startNode}`
226
+ )
227
+ );
228
+ this.log(
229
+ "info",
230
+ chalk.dim(`Input parameters: ${JSON.stringify(input, null, 2)}`)
231
+ );
232
+
233
+ // Initialize workflow context with input
234
+ const workflowContext = {
235
+ ...workflow.getContext(),
236
+ ...input,
237
+ };
238
+
239
+ // Execute with merged context
240
+ const result = await workflow.execute(
241
+ startNode,
242
+ undefined,
243
+ workflowContext
244
+ );
245
+
246
+ this.log("success", chalk.green(`Workflow ${workflow.name} completed`));
247
+ this.log("info", chalk.dim(`Result: ${JSON.stringify(result, null, 2)}`));
248
+
249
+ if (context.executedActions) {
250
+ context.executedActions.push({
251
+ name: workflow.name,
252
+ result: result,
253
+ timestamp: new Date().toISOString(),
254
+ isExecuted: true,
255
+ });
256
+ }
257
+ }
258
+ }
259
+ }
@@ -0,0 +1,47 @@
1
+ import { LLMConfig, PromptInput } from "@/types/agent";
2
+ import { openai } from "@ai-sdk/openai";
3
+ import { generateObject } from "ai";
4
+ import { z } from "zod";
5
+
6
+ /**
7
+ * Factory class for creating Language Model instances based on configuration
8
+ * @class LLMFactory
9
+ */
10
+ export class LLMFactory {
11
+ /**
12
+ * Creates an LLM instance based on the provided configuration
13
+ * @static
14
+ * @param {LLMConfig} config - Configuration for the LLM
15
+ * @returns {Object} An object with a generate method for interacting with the LLM
16
+ * @throws {Error} When an unsupported provider is specified or custom provider lacks required function
17
+ */
18
+ static createLLM(config: LLMConfig) {
19
+ switch (config.provider) {
20
+ case "openai":
21
+ return {
22
+ generate: async (
23
+ prompt: string | PromptInput,
24
+ schema: z.ZodType<any>
25
+ ) => {
26
+ return generateObject({
27
+ model: openai(config.model),
28
+ temperature: config.temperature ?? 0.7,
29
+ maxTokens: config.maxTokens,
30
+ prompt: typeof prompt === "string" ? prompt : prompt.user,
31
+ system: typeof prompt === "string" ? undefined : prompt.system,
32
+ schema,
33
+ });
34
+ },
35
+ };
36
+ case "custom":
37
+ if (!config.customCall) {
38
+ throw new Error("Custom LLM provider requires a customCall function");
39
+ }
40
+ return {
41
+ generate: config.customCall,
42
+ };
43
+ default:
44
+ throw new Error(`Unsupported LLM provider: ${config.provider}`);
45
+ }
46
+ }
47
+ }
@@ -0,0 +1,78 @@
1
+ import { AgentContext, PromptSection } from "../../types/agent";
2
+
3
+ /**
4
+ * Builder class for creating structured prompts with multiple sections
5
+ * @class PromptBuilder
6
+ */
7
+ export class PromptBuilder {
8
+ private sections: PromptSection[] = [];
9
+ private formatFn?: (
10
+ sections: PromptSection[],
11
+ context: AgentContext
12
+ ) => string | Promise<string>;
13
+
14
+ /**
15
+ * Adds a new section to the prompt
16
+ * @param {string} title - The title of the section
17
+ * @param {string | ((context: AgentContext) => string | Promise<string>)} content - The content or a function to generate content
18
+ * @returns {this} The builder instance for method chaining
19
+ */
20
+ addSection(
21
+ title: string,
22
+ content: string | ((context: AgentContext) => string | Promise<string>)
23
+ ): this {
24
+ this.sections.push({ title, content });
25
+ return this;
26
+ }
27
+
28
+ /**
29
+ * Sets a custom formatter function for the final prompt
30
+ * @param {(sections: PromptSection[], context: AgentContext) => string | Promise<string>} formatter - The formatter function
31
+ * @returns {this} The builder instance for method chaining
32
+ */
33
+ setFormatter(
34
+ formatter: (
35
+ sections: PromptSection[],
36
+ context: AgentContext
37
+ ) => string | Promise<string>
38
+ ): this {
39
+ this.formatFn = formatter;
40
+ return this;
41
+ }
42
+
43
+ /**
44
+ * Builds the final prompt by resolving all sections and applying formatting
45
+ * @param {AgentContext} context - The context to use when resolving dynamic content
46
+ * @returns {Promise<string>} The formatted prompt string
47
+ */
48
+ async build(context: AgentContext): Promise<string> {
49
+ const resolvedSections = await Promise.all(
50
+ this.sections.map(async (section) => ({
51
+ title: section.title,
52
+ content:
53
+ typeof section.content === "function"
54
+ ? await section.content(context)
55
+ : section.content,
56
+ }))
57
+ );
58
+
59
+ if (this.formatFn) {
60
+ return this.formatFn(resolvedSections, context);
61
+ }
62
+
63
+ return resolvedSections
64
+ .map((section) => `## ${section.title}\n${section.content}`)
65
+ .join("\n\n");
66
+ }
67
+ }
68
+
69
+ /**
70
+ * Default formatter function that formats sections with markdown-style headers
71
+ * @param {PromptSection[]} sections - The sections to format
72
+ * @returns {string} The formatted prompt string
73
+ */
74
+ export const defaultFormatter = (sections: PromptSection[]): string => {
75
+ return sections
76
+ .map((section) => `## ${section.title}\n${section.content}`)
77
+ .join("\n\n");
78
+ };
@@ -51,7 +51,7 @@ export class InMemoryAdapter implements IMemoryAdapter {
51
51
 
52
52
  // Check if memory already exists
53
53
  const memories = this.storage.get(input.roomId) || [];
54
- const existingMemory = memories.find((m) => m.data === input.data);
54
+ const existingMemory = memories.find((m) => m.content === input.content);
55
55
  if (existingMemory) {
56
56
  return existingMemory;
57
57
  }
@@ -59,7 +59,8 @@ export class InMemoryAdapter implements IMemoryAdapter {
59
59
  // Create new memory
60
60
  const memory: BaseMemoryType = {
61
61
  id: input.id || crypto.randomUUID(),
62
- data: input.data,
62
+ content: input.content,
63
+ metadata: input.metadata,
63
64
  embedding: input.embedding,
64
65
  roomId: input.roomId,
65
66
  createdAt: new Date(),
@@ -97,7 +98,7 @@ export class InMemoryAdapter implements IMemoryAdapter {
97
98
  options: { roomId: string; limit?: number }
98
99
  ): Promise<BaseMemoryType[]> {
99
100
  const memories = this.storage.get(options.roomId) || [];
100
- const filtered = memories.filter((m) => m.data.includes(query));
101
+ const filtered = memories.filter((m) => m.content.includes(query));
101
102
  return filtered.slice(0, options.limit || filtered.length);
102
103
  }
103
104
 
@@ -204,7 +204,7 @@ export class MeilisearchAdapter implements IMemoryAdapter {
204
204
  await this.initializeStorage(input.roomId);
205
205
 
206
206
  // Check if the memory already exists
207
- const existingMemory = await this.search(input.data, input.roomId, {
207
+ const existingMemory = await this.search(input.content, input.roomId, {
208
208
  limit: 1,
209
209
  });
210
210
  if (existingMemory.length > 0) {
@@ -214,7 +214,8 @@ export class MeilisearchAdapter implements IMemoryAdapter {
214
214
  // If not found, create new memory
215
215
  const memory: BaseMemoryType = {
216
216
  id: input.id || crypto.randomUUID(),
217
- data: input.data,
217
+ content: input.content,
218
+ metadata: input.metadata,
218
219
  embedding: input.embedding,
219
220
  roomId: input.roomId,
220
221
  createdAt: new Date(),
@@ -241,7 +242,8 @@ export class MeilisearchAdapter implements IMemoryAdapter {
241
242
  return result
242
243
  ? {
243
244
  id: result.id,
244
- data: result.data,
245
+ content: result.content,
246
+ metadata: result.metadata,
245
247
  embedding: result.embedding,
246
248
  roomId: result.roomId,
247
249
  createdAt: result.createdAt,
@@ -271,7 +273,8 @@ export class MeilisearchAdapter implements IMemoryAdapter {
271
273
  .filter((result) => result.document.roomId === options.roomId)
272
274
  .map((result) => ({
273
275
  id: result.document.id,
274
- data: result.document.data,
276
+ content: result.document.content,
277
+ metadata: result.document.metadata,
275
278
  embedding: result.document.embedding,
276
279
  roomId: result.document.roomId,
277
280
  createdAt: result.document.createdAt,
@@ -58,7 +58,8 @@ export class RedisAdapter implements IMemoryAdapter {
58
58
  ): Promise<BaseMemoryType | undefined> {
59
59
  const memory: BaseMemoryType = {
60
60
  id: input.id || crypto.randomUUID(),
61
- data: input.data,
61
+ content: input.content,
62
+ metadata: input.metadata,
62
63
  embedding: input.embedding,
63
64
  roomId: input.roomId,
64
65
  createdAt: new Date(),