@mastra/memory 0.3.2-alpha.5 → 0.3.2-alpha.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,29 +1,29 @@
1
1
 
2
- > @mastra/memory@0.3.2-alpha.5 build /home/runner/work/mastra/mastra/packages/memory
2
+ > @mastra/memory@0.3.2-alpha.7 build /home/runner/work/mastra/mastra/packages/memory
3
3
  > pnpm run check && tsup src/index.ts src/processors/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting
4
4
 
5
5
 
6
- > @mastra/memory@0.3.2-alpha.5 check /home/runner/work/mastra/mastra/packages/memory
6
+ > @mastra/memory@0.3.2-alpha.7 check /home/runner/work/mastra/mastra/packages/memory
7
7
  > tsc --noEmit
8
8
 
9
9
  CLI Building entry: src/index.ts, src/processors/index.ts
10
10
  CLI Using tsconfig: tsconfig.json
11
11
  CLI tsup v8.4.0
12
12
  TSC Build start
13
- TSC ⚡️ Build success in 8312ms
13
+ TSC ⚡️ Build success in 9292ms
14
14
  DTS Build start
15
15
  CLI Target: es2022
16
16
  Analysis will use the bundled TypeScript version 5.8.3
17
17
  Writing package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.ts
18
18
  Analysis will use the bundled TypeScript version 5.8.3
19
19
  Writing package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.cts
20
- DTS ⚡️ Build success in 6581ms
20
+ DTS ⚡️ Build success in 11931ms
21
21
  CLI Cleaning output folder
22
22
  ESM Build start
23
23
  CJS Build start
24
- ESM dist/index.js 18.27 KB
25
24
  ESM dist/processors/index.js 5.38 KB
26
- ESM ⚡️ Build success in 393ms
27
- CJS dist/index.cjs 18.45 KB
25
+ ESM dist/index.js 18.68 KB
26
+ ESM ⚡️ Build success in 1200ms
28
27
  CJS dist/processors/index.cjs 5.59 KB
29
- CJS ⚡️ Build success in 394ms
28
+ CJS dist/index.cjs 18.85 KB
29
+ CJS ⚡️ Build success in 1201ms
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # @mastra/memory
2
2
 
3
+ ## 0.3.2-alpha.7
4
+
5
+ ### Patch Changes
6
+
7
+ - 67e14dd: Allow for textpart message content to be embedded into vectors
8
+ - Updated dependencies [6052aa6]
9
+ - Updated dependencies [7d8b7c7]
10
+ - Updated dependencies [3a5f1e1]
11
+ - Updated dependencies [8398d89]
12
+ - @mastra/core@0.9.2-alpha.6
13
+
14
+ ## 0.3.2-alpha.6
15
+
16
+ ### Patch Changes
17
+
18
+ - 544767d: Improved token estimation in TokenLimiter from 96% accuracy back to 99%
19
+
3
20
  ## 0.3.2-alpha.5
4
21
 
5
22
  ### Patch Changes
package/dist/index.cjs CHANGED
@@ -301,8 +301,15 @@ var Memory = class extends memory.MastraMemory {
301
301
  let indexName;
302
302
  await Promise.all(
303
303
  updatedMessages.map(async (message) => {
304
- if (typeof message.content !== `string` || message.content === "") return;
305
- const { embeddings, chunks, dimension } = await this.embedMessageContent(message.content);
304
+ let textForEmbedding = null;
305
+ if (typeof message.content === "string" && message.content.trim() !== "") {
306
+ textForEmbedding = message.content;
307
+ } else if (Array.isArray(message.content)) {
308
+ const joined = message.content.filter((part) => part && part.type === "text" && typeof part.text === "string").map((part) => part.text).join(" ").trim();
309
+ if (joined) textForEmbedding = joined;
310
+ }
311
+ if (!textForEmbedding) return;
312
+ const { embeddings, chunks, dimension } = await this.embedMessageContent(textForEmbedding);
306
313
  if (typeof indexName === `undefined`) {
307
314
  indexName = this.createEmbeddingIndex(dimension).then((result2) => result2.indexName);
308
315
  }
package/dist/index.js CHANGED
@@ -295,8 +295,15 @@ var Memory = class extends MastraMemory {
295
295
  let indexName;
296
296
  await Promise.all(
297
297
  updatedMessages.map(async (message) => {
298
- if (typeof message.content !== `string` || message.content === "") return;
299
- const { embeddings, chunks, dimension } = await this.embedMessageContent(message.content);
298
+ let textForEmbedding = null;
299
+ if (typeof message.content === "string" && message.content.trim() !== "") {
300
+ textForEmbedding = message.content;
301
+ } else if (Array.isArray(message.content)) {
302
+ const joined = message.content.filter((part) => part && part.type === "text" && typeof part.text === "string").map((part) => part.text).join(" ").trim();
303
+ if (joined) textForEmbedding = joined;
304
+ }
305
+ if (!textForEmbedding) return;
306
+ const { embeddings, chunks, dimension } = await this.embedMessageContent(textForEmbedding);
300
307
  if (typeof indexName === `undefined`) {
301
308
  indexName = this.createEmbeddingIndex(dimension).then((result2) => result2.indexName);
302
309
  }
@@ -16,9 +16,9 @@ var TokenLimiter = class extends memory.MemoryProcessor {
16
16
  // Token overheads per OpenAI's documentation
17
17
  // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
18
18
  // Every message follows <|start|>{role/name}\n{content}<|end|>
19
- TOKENS_PER_MESSAGE = 3;
19
+ TOKENS_PER_MESSAGE = 3.8;
20
20
  // tokens added for each message (start & end tokens)
21
- TOKENS_PER_TOOL = 2;
21
+ TOKENS_PER_TOOL = 2.2;
22
22
  // empirical adjustment for tool calls
23
23
  TOKENS_PER_CONVERSATION = 25;
24
24
  // fixed overhead for the conversation
@@ -10,9 +10,9 @@ var TokenLimiter = class extends MemoryProcessor {
10
10
  // Token overheads per OpenAI's documentation
11
11
  // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
12
12
  // Every message follows <|start|>{role/name}\n{content}<|end|>
13
- TOKENS_PER_MESSAGE = 3;
13
+ TOKENS_PER_MESSAGE = 3.8;
14
14
  // tokens added for each message (start & end tokens)
15
- TOKENS_PER_TOOL = 2;
15
+ TOKENS_PER_TOOL = 2.2;
16
16
  // empirical adjustment for tool calls
17
17
  TOKENS_PER_CONVERSATION = 25;
18
18
  // fixed overhead for the conversation
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/memory",
3
- "version": "0.3.2-alpha.5",
3
+ "version": "0.3.2-alpha.7",
4
4
  "description": "",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -41,7 +41,7 @@
41
41
  "redis": "^4.7.0",
42
42
  "xxhash-wasm": "^1.1.0",
43
43
  "zod": "^3.24.2",
44
- "@mastra/core": "^0.9.2-alpha.5"
44
+ "@mastra/core": "^0.9.2-alpha.6"
45
45
  },
46
46
  "devDependencies": {
47
47
  "@ai-sdk/openai": "^1.3.3",
package/src/index.ts CHANGED
@@ -4,6 +4,7 @@ import { MastraMemory } from '@mastra/core/memory';
4
4
  import type { MessageType, MemoryConfig, SharedMemoryConfig, StorageThreadType } from '@mastra/core/memory';
5
5
  import type { StorageGetMessagesArg } from '@mastra/core/storage';
6
6
  import { embedMany } from 'ai';
7
+ import type { TextPart } from 'ai';
7
8
 
8
9
  import xxhash from 'xxhash-wasm';
9
10
  import { updateWorkingMemoryTool } from './tools/working-memory';
@@ -337,9 +338,23 @@ export class Memory extends MastraMemory {
337
338
  let indexName: Promise<string>;
338
339
  await Promise.all(
339
340
  updatedMessages.map(async message => {
340
- if (typeof message.content !== `string` || message.content === '') return;
341
+ let textForEmbedding: string | null = null;
342
+
343
+ if (typeof message.content === 'string' && message.content.trim() !== '') {
344
+ textForEmbedding = message.content;
345
+ } else if (Array.isArray(message.content)) {
346
+ // Extract text from all text parts, concatenate
347
+ const joined = message.content
348
+ .filter(part => part && part.type === 'text' && typeof part.text === 'string')
349
+ .map(part => (part as TextPart).text)
350
+ .join(' ')
351
+ .trim();
352
+ if (joined) textForEmbedding = joined;
353
+ }
354
+
355
+ if (!textForEmbedding) return;
341
356
 
342
- const { embeddings, chunks, dimension } = await this.embedMessageContent(message.content);
357
+ const { embeddings, chunks, dimension } = await this.embedMessageContent(textForEmbedding);
343
358
 
344
359
  if (typeof indexName === `undefined`) {
345
360
  indexName = this.createEmbeddingIndex(dimension).then(result => result.indexName);
@@ -72,7 +72,7 @@ describe('TokenLimiter', () => {
72
72
  estimatedTokens += testLimiter.countTokens(message as CoreMessage);
73
73
  }
74
74
 
75
- return estimatedTokens;
75
+ return Number(estimatedTokens.toFixed(2));
76
76
  }
77
77
 
78
78
  function percentDifference(a: number, b: number) {
@@ -90,7 +90,7 @@ describe('TokenLimiter', () => {
90
90
  console.log(`Estimated ${estimate} tokens, used ${used} tokens.\n`, counts);
91
91
 
92
92
  // Check if within 2% margin
93
- expect(percentDifference(estimate, used)).toBeLessThanOrEqual(4);
93
+ expect(percentDifference(estimate, used)).toBeLessThanOrEqual(2);
94
94
  }
95
95
 
96
96
  const calculatorTool = createTool({
@@ -111,7 +111,7 @@ describe('TokenLimiter', () => {
111
111
  tools: { calculatorTool },
112
112
  });
113
113
 
114
- describe.concurrent(`96% accuracy`, () => {
114
+ describe.concurrent(`98% accuracy`, () => {
115
115
  it(`20 messages, no tools`, async () => {
116
116
  await expectTokenEstimate(
117
117
  {
@@ -134,10 +134,10 @@ describe('TokenLimiter', () => {
134
134
  );
135
135
  });
136
136
 
137
- it(`4 messages, 0 tools`, async () => {
137
+ it(`20 messages, 0 tools`, async () => {
138
138
  await expectTokenEstimate(
139
139
  {
140
- messageCount: 2,
140
+ messageCount: 10,
141
141
  toolFrequency: 0,
142
142
  threadId: '3',
143
143
  },
@@ -25,8 +25,8 @@ export class TokenLimiter extends MemoryProcessor {
25
25
  // Token overheads per OpenAI's documentation
26
26
  // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
27
27
  // Every message follows <|start|>{role/name}\n{content}<|end|>
28
- public TOKENS_PER_MESSAGE = 3; // tokens added for each message (start & end tokens)
29
- public TOKENS_PER_TOOL = 2; // empirical adjustment for tool calls
28
+ public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)
29
+ public TOKENS_PER_TOOL = 2.2; // empirical adjustment for tool calls
30
30
  public TOKENS_PER_CONVERSATION = 25; // fixed overhead for the conversation
31
31
 
32
32
  /**