@mastra/memory 0.14.3-alpha.1 → 0.14.3-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/package.json +16 -3
  3. package/.turbo/turbo-build.log +0 -8
  4. package/eslint.config.js +0 -12
  5. package/integration-tests-v5/.env.test +0 -5
  6. package/integration-tests-v5/CHANGELOG.md +0 -175
  7. package/integration-tests-v5/docker-compose.yml +0 -39
  8. package/integration-tests-v5/node_modules/.bin/next +0 -21
  9. package/integration-tests-v5/node_modules/.bin/tsc +0 -21
  10. package/integration-tests-v5/node_modules/.bin/tsserver +0 -21
  11. package/integration-tests-v5/node_modules/.bin/vitest +0 -21
  12. package/integration-tests-v5/package.json +0 -43
  13. package/integration-tests-v5/src/agent-memory.test.ts +0 -621
  14. package/integration-tests-v5/src/mastra/agents/weather.ts +0 -75
  15. package/integration-tests-v5/src/mastra/index.ts +0 -13
  16. package/integration-tests-v5/src/mastra/tools/weather.ts +0 -24
  17. package/integration-tests-v5/src/processors.test.ts +0 -604
  18. package/integration-tests-v5/src/streaming-memory.test.ts +0 -367
  19. package/integration-tests-v5/src/test-utils.ts +0 -147
  20. package/integration-tests-v5/src/working-memory.test.ts +0 -1064
  21. package/integration-tests-v5/tsconfig.json +0 -13
  22. package/integration-tests-v5/vitest.config.ts +0 -18
  23. package/src/index.ts +0 -1040
  24. package/src/processors/index.test.ts +0 -246
  25. package/src/processors/index.ts +0 -2
  26. package/src/processors/token-limiter.ts +0 -159
  27. package/src/processors/tool-call-filter.ts +0 -77
  28. package/src/tools/working-memory.ts +0 -154
  29. package/tsconfig.build.json +0 -9
  30. package/tsconfig.json +0 -5
  31. package/tsup.config.ts +0 -17
  32. package/vitest.config.ts +0 -11
@@ -1,246 +0,0 @@
1
- import { openai } from '@ai-sdk/openai';
2
- import { Agent } from '@mastra/core/agent';
3
- import type { MastraMessageV1 } from '@mastra/core/memory';
4
- import { createTool } from '@mastra/core/tools';
5
- import type { CoreMessage } from 'ai';
6
- import cl100k_base from 'js-tiktoken/ranks/cl100k_base';
7
- import { describe, it, expect, vi } from 'vitest';
8
- import { z } from 'zod';
9
- import { generateConversationHistory } from '../../integration-tests/src/test-utils';
10
- import { TokenLimiter, ToolCallFilter } from './index';
11
-
12
- vi.setConfig({ testTimeout: 20_000, hookTimeout: 20_000 });
13
-
14
- describe('TokenLimiter', () => {
15
- it('should limit messages to the specified token count', () => {
16
- // Create messages with predictable token counts (approximately 25 tokens each)
17
- const { fakeCore } = generateConversationHistory({
18
- threadId: '1',
19
- messageCount: 5,
20
- toolNames: [],
21
- toolFrequency: 0,
22
- });
23
-
24
- const limiter = new TokenLimiter(200);
25
- const result = limiter.process(fakeCore) as MastraMessageV1[];
26
-
27
- // Should prioritize newest messages (higher ids)
28
- expect(result.length).toBe(2);
29
- expect(result[0].id).toBe('message-8');
30
- expect(result[1].id).toBe('message-9');
31
- });
32
-
33
- it('should handle empty messages array', () => {
34
- const limiter = new TokenLimiter(1000);
35
- const result = limiter.process([]);
36
- expect(result).toEqual([]);
37
- });
38
-
39
- it('should use different encodings based on configuration', () => {
40
- const { fakeCore } = generateConversationHistory({
41
- threadId: '6',
42
- messageCount: 1,
43
- toolNames: [],
44
- toolFrequency: 0,
45
- });
46
-
47
- // Create limiters with different encoding settings
48
- const defaultLimiter = new TokenLimiter(1000);
49
- const customLimiter = new TokenLimiter({
50
- limit: 1000,
51
- encoding: cl100k_base,
52
- });
53
-
54
- // All should process fakeCore successfully but potentially with different token counts
55
- const defaultResult = defaultLimiter.process(fakeCore);
56
- const customResult = customLimiter.process(fakeCore);
57
-
58
- // Each should return the same fakeCore but with potentially different token counts
59
- expect(defaultResult.length).toBe(fakeCore.length);
60
- expect(customResult.length).toBe(fakeCore.length);
61
- });
62
-
63
- function estimateTokens(messages: MastraMessageV1[]) {
64
- // Create a TokenLimiter just for counting tokens
65
- const testLimiter = new TokenLimiter(Infinity);
66
-
67
- let estimatedTokens = testLimiter.TOKENS_PER_CONVERSATION;
68
-
69
- // Count tokens for each message including all overheads
70
- for (const message of messages) {
71
- // Base token count from the countTokens method
72
- estimatedTokens += testLimiter.countTokens(message as CoreMessage); // TODO: this is really actually a MastraMessageV1 but in previous implementations we were casting V1 to CoreMessage which is almost the same but not exactly
73
- }
74
-
75
- return Number(estimatedTokens.toFixed(2));
76
- }
77
-
78
- function percentDifference(a: number, b: number) {
79
- const difference = Number(((Math.abs(a - b) / b) * 100).toFixed(2));
80
- console.log(`${a} and ${b} are ${difference}% different`);
81
- return difference;
82
- }
83
-
84
- async function expectTokenEstimate(config: Parameters<typeof generateConversationHistory>[0], agent: Agent) {
85
- const { messages, fakeCore, counts } = generateConversationHistory(config);
86
-
87
- const estimate = estimateTokens(messages);
88
- const used = (await agent.generate(fakeCore)).usage.promptTokens;
89
-
90
- console.log(`Estimated ${estimate} tokens, used ${used} tokens.\n`, counts);
91
-
92
- // Check if within 2% margin
93
- expect(percentDifference(estimate, used)).toBeLessThanOrEqual(2);
94
- }
95
-
96
- const calculatorTool = createTool({
97
- id: 'calculator',
98
- description: 'Perform a simple calculation',
99
- inputSchema: z.object({
100
- expression: z.string().describe('The mathematical expression to calculate'),
101
- }),
102
- execute: async ({ context: { expression } }) => {
103
- // Don't actually eval the expression. The model is dumb and sometimes passes "banana" as the expression because that's one of the sample tokens we're using in input messages lmao
104
- return `The result of ${expression} is 10`;
105
- },
106
- });
107
-
108
- const agent = new Agent({
109
- name: 'token estimate agent',
110
- model: openai('gpt-4o-mini'),
111
- instructions: ``,
112
- tools: { calculatorTool },
113
- });
114
-
115
- describe.concurrent(`98% accuracy`, () => {
116
- it(`20 messages, no tools`, async () => {
117
- await expectTokenEstimate(
118
- {
119
- messageCount: 10,
120
- toolFrequency: 0,
121
- threadId: '2',
122
- },
123
- agent,
124
- );
125
- });
126
-
127
- it(`60 messages, no tools`, async () => {
128
- await expectTokenEstimate(
129
- {
130
- messageCount: 30,
131
- toolFrequency: 0,
132
- threadId: '3',
133
- },
134
- agent,
135
- );
136
- });
137
-
138
- it(`20 messages, 0 tools`, async () => {
139
- await expectTokenEstimate(
140
- {
141
- messageCount: 10,
142
- toolFrequency: 0,
143
- threadId: '3',
144
- },
145
- agent,
146
- );
147
- });
148
-
149
- it(`20 messages, 2 tool messages`, async () => {
150
- await expectTokenEstimate(
151
- {
152
- messageCount: 10,
153
- toolFrequency: 5,
154
- threadId: '3',
155
- },
156
- agent,
157
- );
158
- });
159
-
160
- it(`40 messages, 6 tool messages`, async () => {
161
- await expectTokenEstimate(
162
- {
163
- messageCount: 20,
164
- toolFrequency: 5,
165
- threadId: '4',
166
- },
167
- agent,
168
- );
169
- });
170
-
171
- it(`100 messages, 24 tool messages`, async () => {
172
- await expectTokenEstimate(
173
- {
174
- messageCount: 50,
175
- toolFrequency: 4,
176
- threadId: '5',
177
- },
178
- agent,
179
- );
180
- });
181
-
182
- it(
183
- `101 messages, 49 tool calls`,
184
- async () => {
185
- await expectTokenEstimate(
186
- {
187
- messageCount: 50,
188
- toolFrequency: 1,
189
- threadId: '5',
190
- },
191
- agent,
192
- );
193
- },
194
- {
195
- // for some reason AI SDK randomly returns 2x token count here
196
- retry: 3,
197
- },
198
- );
199
- });
200
- });
201
-
202
- describe.concurrent('ToolCallFilter', () => {
203
- it('should exclude all tool calls when created with no arguments', () => {
204
- const { fakeCore } = generateConversationHistory({
205
- threadId: '3',
206
- toolNames: ['weather', 'calculator', 'search'],
207
- messageCount: 1,
208
- });
209
- const filter = new ToolCallFilter();
210
- const result = filter.process(fakeCore) as MastraMessageV1[];
211
-
212
- // Should only keep the text message and assistant res
213
- expect(result.length).toBe(2);
214
- expect(result[0].id).toBe('message-0');
215
- });
216
-
217
- it('should exclude specific tool calls by name', () => {
218
- const { fakeCore } = generateConversationHistory({
219
- threadId: '4',
220
- toolNames: ['weather', 'calculator'],
221
- messageCount: 2,
222
- });
223
- const filter = new ToolCallFilter({ exclude: ['weather'] });
224
- const result = filter.process(fakeCore) as MastraMessageV1[];
225
-
226
- // Should keep text message, assistant reply, calculator tool call, and calculator result
227
- expect(result.length).toBe(4);
228
- expect(result[0].id).toBe('message-0');
229
- expect(result[1].id).toBe('message-1');
230
- expect(result[2].id).toBe('message-2');
231
- expect(result[3].id).toBe('message-3');
232
- });
233
-
234
- it('should keep all messages when exclude list is empty', () => {
235
- const { fakeCore } = generateConversationHistory({
236
- threadId: '5',
237
- toolNames: ['weather', 'calculator'],
238
- });
239
-
240
- const filter = new ToolCallFilter({ exclude: [] });
241
- const result = filter.process(fakeCore);
242
-
243
- // Should keep all messages
244
- expect(result.length).toBe(fakeCore.length);
245
- });
246
- });
@@ -1,2 +0,0 @@
1
- export * from './token-limiter';
2
- export * from './tool-call-filter';
@@ -1,159 +0,0 @@
1
- import type { CoreMessage } from '@mastra/core/llm';
2
- import { MemoryProcessor } from '@mastra/core/memory';
3
- import type { MemoryProcessorOpts } from '@mastra/core/memory';
4
-
5
- import { Tiktoken } from 'js-tiktoken/lite';
6
- import type { TiktokenBPE } from 'js-tiktoken/lite';
7
- import o200k_base from 'js-tiktoken/ranks/o200k_base';
8
-
9
- /**
10
- * Configuration options for TokenLimiter
11
- */
12
- interface TokenLimiterOptions {
13
- /** Maximum number of tokens to allow */
14
- limit: number;
15
- /** Optional encoding to use (defaults to o200k_base which is used by gpt-4o) */
16
- encoding?: TiktokenBPE;
17
- }
18
-
19
- /**
20
- * Limits the total number of tokens in the messages.
21
- * Uses js-tiktoken with o200k_base encoding by default for accurate token counting with modern models.
22
- */
23
- export class TokenLimiter extends MemoryProcessor {
24
- private encoder: Tiktoken;
25
- private maxTokens: number;
26
-
27
- // Token overheads per OpenAI's documentation
28
- // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
29
- // Every message follows <|start|>{role/name}\n{content}<|end|>
30
- public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)
31
- public TOKENS_PER_CONVERSATION = 24; // fixed overhead for the conversation
32
-
33
- /**
34
- * Create a token limiter for messages.
35
- * @param options Either a number (token limit) or a configuration object
36
- */
37
- constructor(options: number | TokenLimiterOptions) {
38
- super({
39
- name: 'TokenLimiter',
40
- });
41
-
42
- if (typeof options === 'number') {
43
- // Simple number format - just the token limit with default encoding
44
- this.maxTokens = options;
45
- this.encoder = new Tiktoken(o200k_base);
46
- } else {
47
- // Object format with limit and optional encoding
48
- this.maxTokens = options.limit;
49
- this.encoder = new Tiktoken(options.encoding || o200k_base);
50
- }
51
- }
52
-
53
- process(
54
- messages: CoreMessage[],
55
- { systemMessage, memorySystemMessage, newMessages }: MemoryProcessorOpts = {},
56
- ): CoreMessage[] {
57
- // Messages are already chronologically ordered - take most recent ones up to the token limit
58
- let totalTokens = 0;
59
-
60
- // Start with the conversation overhead
61
- totalTokens += this.TOKENS_PER_CONVERSATION;
62
-
63
- if (systemMessage) {
64
- totalTokens += this.countTokens(systemMessage);
65
- totalTokens += this.TOKENS_PER_MESSAGE; // Add message overhead for system message
66
- }
67
-
68
- if (memorySystemMessage) {
69
- totalTokens += this.countTokens(memorySystemMessage);
70
- totalTokens += this.TOKENS_PER_MESSAGE; // Add message overhead for memory system message
71
- }
72
-
73
- const allMessages = [...messages, ...(newMessages || [])];
74
-
75
- const result: CoreMessage[] = [];
76
-
77
- // Process messages in reverse (newest first) so that we stop estimating tokens on old messages. Once we get to our limit of tokens there's no reason to keep processing older messages
78
- for (let i = allMessages.length - 1; i >= 0; i--) {
79
- const message = allMessages[i];
80
-
81
- // Skip undefined messages (shouldn't happen, but TypeScript is concerned)
82
- if (!message) continue;
83
-
84
- const messageTokens = this.countTokens(message);
85
-
86
- if (totalTokens + messageTokens <= this.maxTokens) {
87
- // Insert at the beginning to maintain chronological order, but only if it's not a new message
88
- if (i < messages.length) {
89
- // less than messages.length because we're iterating in reverse. If the index is greater than messages.length it's a new message
90
- result.unshift(message);
91
- }
92
- totalTokens += messageTokens;
93
- } else {
94
- this.logger.info(
95
- `filtering ${allMessages.length - result.length}/${allMessages.length} messages, token limit of ${this.maxTokens} exceeded`,
96
- );
97
- // If we can't fit the message, we stop
98
- break;
99
- }
100
- }
101
-
102
- return result;
103
- }
104
-
105
- public countTokens(message: string | CoreMessage): number {
106
- if (typeof message === `string`) {
107
- return this.encoder.encode(message).length;
108
- }
109
-
110
- let tokenString = message.role;
111
- let overhead = 0;
112
-
113
- if (typeof message.content === 'string' && message.content) {
114
- tokenString += message.content;
115
- } else if (Array.isArray(message.content)) {
116
- // Calculate tokens for each content part
117
- for (const part of message.content) {
118
- if (part.type === 'text') {
119
- tokenString += part.text;
120
- } else if (part.type === 'tool-call' || part.type === `tool-result`) {
121
- if (`args` in part && part.args && part.type === `tool-call`) {
122
- tokenString += part.toolName as any;
123
- if (typeof part.args === 'string') {
124
- tokenString += part.args;
125
- } else {
126
- tokenString += JSON.stringify(part.args);
127
- // minus some tokens for JSON
128
- overhead -= 12;
129
- }
130
- }
131
- // Token cost for result if present
132
- if (`result` in part && part.result !== undefined && part.type === `tool-result`) {
133
- if (typeof part.result === 'string') {
134
- tokenString += part.result;
135
- } else {
136
- tokenString += JSON.stringify(part.result);
137
- // minus some tokens for JSON
138
- overhead -= 12;
139
- }
140
- }
141
- } else {
142
- tokenString += JSON.stringify(part);
143
- }
144
- }
145
- }
146
-
147
- if (
148
- typeof message.content === `string` ||
149
- // if the message included non-tool parts, add our message overhead
150
- message.content.some(p => p.type !== `tool-call` && p.type !== `tool-result`)
151
- ) {
152
- // Ensure we account for message formatting tokens
153
- // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
154
- overhead += this.TOKENS_PER_MESSAGE;
155
- }
156
-
157
- return this.encoder.encode(tokenString).length + overhead;
158
- }
159
- }
@@ -1,77 +0,0 @@
1
- import type { CoreMessage } from '@mastra/core/llm';
2
- import { MemoryProcessor } from '@mastra/core/memory';
3
-
4
- /**
5
- * Filters out tool calls and results from messages.
6
- * By default (with no arguments), excludes all tool calls and their results.
7
- * Can be configured to exclude only specific tools by name.
8
- */
9
- export class ToolCallFilter extends MemoryProcessor {
10
- private exclude: string[] | 'all';
11
-
12
- /**
13
- * Create a filter for tool calls and results.
14
- * @param options Configuration options
15
- * @param options.exclude List of specific tool names to exclude. If not provided, all tool calls are excluded.
16
- */
17
- constructor(options: { exclude?: string[] } = {}) {
18
- super({ name: 'ToolCallFilter' });
19
- // If no options or exclude is provided, exclude all tools
20
- if (!options || !options.exclude) {
21
- this.exclude = 'all'; // Exclude all tools
22
- } else {
23
- // Exclude specific tools
24
- this.exclude = Array.isArray(options.exclude) ? options.exclude : [];
25
- }
26
- }
27
-
28
- process(messages: CoreMessage[]): CoreMessage[] {
29
- // Case 1: Exclude all tool calls and tool results
30
- if (this.exclude === 'all') {
31
- return messages.filter(message => {
32
- if (Array.isArray(message.content)) {
33
- return !message.content.some(part => part.type === 'tool-call' || part.type === 'tool-result');
34
- }
35
- return true;
36
- });
37
- }
38
-
39
- // Case 2: Exclude specific tools by name
40
- if (this.exclude.length > 0) {
41
- // Single pass approach - track excluded tool call IDs while filtering
42
- const excludedToolCallIds = new Set<string>();
43
-
44
- return messages.filter(message => {
45
- if (!Array.isArray(message.content)) return true;
46
-
47
- // For assistant messages, check for excluded tool calls and track their IDs
48
- if (message.role === 'assistant') {
49
- let shouldExclude = false;
50
-
51
- for (const part of message.content) {
52
- if (part.type === 'tool-call' && this.exclude.includes(part.toolName)) {
53
- excludedToolCallIds.add(part.toolCallId);
54
- shouldExclude = true;
55
- }
56
- }
57
-
58
- return !shouldExclude;
59
- }
60
-
61
- // For tool messages, filter out results for excluded tool calls
62
- if (message.role === 'tool') {
63
- const shouldExclude = message.content.some(
64
- part => part.type === 'tool-result' && excludedToolCallIds.has(part.toolCallId),
65
- );
66
-
67
- return !shouldExclude;
68
- }
69
-
70
- return true;
71
- });
72
- }
73
-
74
- // Case 3: Empty exclude array, return original messages
75
- return messages;
76
- }
77
- }
@@ -1,154 +0,0 @@
1
- import type { MemoryConfig } from '@mastra/core/memory';
2
- import { createTool } from '@mastra/core/tools';
3
- import { convertSchemaToZod } from '@mastra/schema-compat';
4
- import type { Schema } from 'ai';
5
- import { z, ZodObject } from 'zod';
6
- import type { ZodType } from 'zod';
7
-
8
- export const updateWorkingMemoryTool = (memoryConfig?: MemoryConfig) => {
9
- const schema = memoryConfig?.workingMemory?.schema;
10
-
11
- let inputSchema: ZodType = z.object({
12
- memory: z
13
- .string()
14
- .describe(`The Markdown formatted working memory content to store. This MUST be a string. Never pass an object.`),
15
- });
16
-
17
- if (schema) {
18
- inputSchema = z.object({
19
- memory:
20
- schema instanceof ZodObject
21
- ? schema
22
- : (convertSchemaToZod({ jsonSchema: schema } as Schema).describe(
23
- `The JSON formatted working memory content to store.`,
24
- ) as ZodObject<any>),
25
- });
26
- }
27
-
28
- return createTool({
29
- id: 'update-working-memory',
30
- description: `Update the working memory with new information. Any data not included will be overwritten.${schema ? ' Always pass data as string to the memory field. Never pass an object.' : ''}`,
31
- inputSchema,
32
- execute: async params => {
33
- const { context, threadId, memory, resourceId } = params;
34
- if (!threadId || !memory || !resourceId) {
35
- throw new Error('Thread ID, Memory instance, and resourceId are required for working memory updates');
36
- }
37
-
38
- let thread = await memory.getThreadById({ threadId });
39
-
40
- if (!thread) {
41
- thread = await memory.createThread({
42
- threadId,
43
- resourceId,
44
- memoryConfig,
45
- });
46
- }
47
-
48
- if (thread.resourceId && thread.resourceId !== resourceId) {
49
- throw new Error(`Thread with id ${threadId} resourceId does not match the current resourceId ${resourceId}`);
50
- }
51
-
52
- const workingMemory = typeof context.memory === 'string' ? context.memory : JSON.stringify(context.memory);
53
-
54
- // Use the new updateWorkingMemory method which handles both thread and resource scope
55
- await memory.updateWorkingMemory({
56
- threadId,
57
- resourceId,
58
- workingMemory,
59
- memoryConfig,
60
- });
61
-
62
- return { success: true };
63
- },
64
- });
65
- };
66
-
67
- export const __experimental_updateWorkingMemoryToolVNext = (config: MemoryConfig) => {
68
- return createTool({
69
- id: 'update-working-memory',
70
- description: 'Update the working memory with new information.',
71
- inputSchema: z.object({
72
- newMemory: z
73
- .string()
74
- .optional()
75
- .describe(
76
- `The ${config.workingMemory?.schema ? 'JSON' : 'Markdown'} formatted working memory content to store`,
77
- ),
78
- searchString: z
79
- .string()
80
- .optional()
81
- .describe(
82
- "The working memory string to find. Will be replaced with the newMemory string. If this is omitted or doesn't exist, the newMemory string will be appended to the end of your working memory. Replacing single lines at a time is encouraged for greater accuracy. If updateReason is not 'append-new-memory', this search string must be provided or the tool call will be rejected.",
83
- ),
84
- updateReason: z
85
- .enum(['append-new-memory', 'clarify-existing-memory', 'replace-irrelevant-memory'])
86
- .optional()
87
- .describe(
88
- "The reason you're updating working memory. Passing any value other than 'append-new-memory' requires a searchString to be provided. Defaults to append-new-memory",
89
- ),
90
- }),
91
- execute: async params => {
92
- const { context, threadId, memory, resourceId } = params;
93
- if (!threadId || !memory || !resourceId) {
94
- throw new Error('Thread ID, Memory instance, and resourceId are required for working memory updates');
95
- }
96
-
97
- let thread = await memory.getThreadById({ threadId });
98
-
99
- if (!thread) {
100
- thread = await memory.createThread({
101
- threadId,
102
- resourceId,
103
- memoryConfig: config,
104
- });
105
- }
106
-
107
- if (thread.resourceId && thread.resourceId !== resourceId) {
108
- throw new Error(`Thread with id ${threadId} resourceId does not match the current resourceId ${resourceId}`);
109
- }
110
-
111
- const workingMemory = context.newMemory || '';
112
- if (!context.updateReason) context.updateReason = `append-new-memory`;
113
-
114
- if (
115
- context.searchString &&
116
- config.workingMemory?.scope === `resource` &&
117
- context.updateReason === `replace-irrelevant-memory`
118
- ) {
119
- // don't allow replacements due to something not being relevant to the current conversation
120
- // if there's no searchString, then we will append.
121
- context.searchString = undefined;
122
- }
123
-
124
- if (context.updateReason === `append-new-memory` && context.searchString) {
125
- // do not find/replace when append-new-memory is selected
126
- // some models get confused and pass a search string even when they don't want to replace it.
127
- // TODO: maybe they're trying to add new info after the search string?
128
- context.searchString = undefined;
129
- }
130
-
131
- if (context.updateReason !== `append-new-memory` && !context.searchString) {
132
- return {
133
- success: false,
134
- reason: `updateReason was ${context.updateReason} but no searchString was provided. Unable to replace undefined with "${context.newMemory}"`,
135
- };
136
- }
137
-
138
- // Use the new updateWorkingMemory method which handles both thread and resource scope
139
- const result = await memory.__experimental_updateWorkingMemoryVNext({
140
- threadId,
141
- resourceId,
142
- workingMemory: workingMemory,
143
- searchString: context.searchString,
144
- memoryConfig: config,
145
- });
146
-
147
- if (result) {
148
- return result;
149
- }
150
-
151
- return { success: true };
152
- },
153
- });
154
- };
@@ -1,9 +0,0 @@
1
- {
2
- "extends": ["./tsconfig.json", "../../tsconfig.build.json"],
3
- "compilerOptions": {
4
- "outDir": "./dist",
5
- "rootDir": "./src"
6
- },
7
- "include": ["src/**/*"],
8
- "exclude": ["node_modules", "**/*.test.ts", "src/**/*.mock.ts"]
9
- }
package/tsconfig.json DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "extends": "../../tsconfig.node.json",
3
- "include": ["src/**/*", "tsup.config.ts"],
4
- "exclude": ["node_modules", "**/*.test.ts"]
5
- }
package/tsup.config.ts DELETED
@@ -1,17 +0,0 @@
1
- import { generateTypes } from '@internal/types-builder';
2
- import { defineConfig } from 'tsup';
3
-
4
- export default defineConfig({
5
- entry: ['src/index.ts', 'src/processors/index.ts'],
6
- format: ['esm', 'cjs'],
7
- clean: true,
8
- dts: false,
9
- splitting: true,
10
- treeshake: {
11
- preset: 'smallest',
12
- },
13
- sourcemap: true,
14
- onSuccess: async () => {
15
- await generateTypes(process.cwd());
16
- },
17
- });
package/vitest.config.ts DELETED
@@ -1,11 +0,0 @@
1
- import { defineConfig } from 'vitest/config';
2
-
3
- export default defineConfig({
4
- test: {
5
- environment: 'node',
6
- include: ['src/**/*.test.ts'],
7
- // smaller output to save token space when LLMs run tests
8
- reporters: 'dot',
9
- bail: 1,
10
- },
11
- });