@mastra/memory 0.3.2-alpha.5 → 0.3.2-alpha.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +8 -8
- package/CHANGELOG.md +17 -0
- package/dist/index.cjs +9 -2
- package/dist/index.js +9 -2
- package/dist/processors/index.cjs +2 -2
- package/dist/processors/index.js +2 -2
- package/package.json +2 -2
- package/src/index.ts +17 -2
- package/src/processors/index.test.ts +5 -5
- package/src/processors/token-limiter.ts +2 -2
package/.turbo/turbo-build.log
CHANGED
|
@@ -1,29 +1,29 @@
|
|
|
1
1
|
|
|
2
|
-
> @mastra/memory@0.3.2-alpha.
|
|
2
|
+
> @mastra/memory@0.3.2-alpha.7 build /home/runner/work/mastra/mastra/packages/memory
|
|
3
3
|
> pnpm run check && tsup src/index.ts src/processors/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
> @mastra/memory@0.3.2-alpha.
|
|
6
|
+
> @mastra/memory@0.3.2-alpha.7 check /home/runner/work/mastra/mastra/packages/memory
|
|
7
7
|
> tsc --noEmit
|
|
8
8
|
|
|
9
9
|
[34mCLI[39m Building entry: src/index.ts, src/processors/index.ts
|
|
10
10
|
[34mCLI[39m Using tsconfig: tsconfig.json
|
|
11
11
|
[34mCLI[39m tsup v8.4.0
|
|
12
12
|
[34mTSC[39m Build start
|
|
13
|
-
[32mTSC[39m ⚡️ Build success in
|
|
13
|
+
[32mTSC[39m ⚡️ Build success in 9292ms
|
|
14
14
|
[34mDTS[39m Build start
|
|
15
15
|
[34mCLI[39m Target: es2022
|
|
16
16
|
Analysis will use the bundled TypeScript version 5.8.3
|
|
17
17
|
[36mWriting package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.ts[39m
|
|
18
18
|
Analysis will use the bundled TypeScript version 5.8.3
|
|
19
19
|
[36mWriting package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.cts[39m
|
|
20
|
-
[32mDTS[39m ⚡️ Build success in
|
|
20
|
+
[32mDTS[39m ⚡️ Build success in 11931ms
|
|
21
21
|
[34mCLI[39m Cleaning output folder
|
|
22
22
|
[34mESM[39m Build start
|
|
23
23
|
[34mCJS[39m Build start
|
|
24
|
-
[32mESM[39m [1mdist/index.js [22m[32m18.27 KB[39m
|
|
25
24
|
[32mESM[39m [1mdist/processors/index.js [22m[32m5.38 KB[39m
|
|
26
|
-
[32mESM[39m
|
|
27
|
-
[
|
|
25
|
+
[32mESM[39m [1mdist/index.js [22m[32m18.68 KB[39m
|
|
26
|
+
[32mESM[39m ⚡️ Build success in 1200ms
|
|
28
27
|
[32mCJS[39m [1mdist/processors/index.cjs [22m[32m5.59 KB[39m
|
|
29
|
-
[32mCJS[39m
|
|
28
|
+
[32mCJS[39m [1mdist/index.cjs [22m[32m18.85 KB[39m
|
|
29
|
+
[32mCJS[39m ⚡️ Build success in 1201ms
|
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,22 @@
|
|
|
1
1
|
# @mastra/memory
|
|
2
2
|
|
|
3
|
+
## 0.3.2-alpha.7
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 67e14dd: Allow for textpart message content to be embedded into vectors
|
|
8
|
+
- Updated dependencies [6052aa6]
|
|
9
|
+
- Updated dependencies [7d8b7c7]
|
|
10
|
+
- Updated dependencies [3a5f1e1]
|
|
11
|
+
- Updated dependencies [8398d89]
|
|
12
|
+
- @mastra/core@0.9.2-alpha.6
|
|
13
|
+
|
|
14
|
+
## 0.3.2-alpha.6
|
|
15
|
+
|
|
16
|
+
### Patch Changes
|
|
17
|
+
|
|
18
|
+
- 544767d: Improved token estimation in TokenLimiter from 96% accuracy back to 99%
|
|
19
|
+
|
|
3
20
|
## 0.3.2-alpha.5
|
|
4
21
|
|
|
5
22
|
### Patch Changes
|
package/dist/index.cjs
CHANGED
|
@@ -301,8 +301,15 @@ var Memory = class extends memory.MastraMemory {
|
|
|
301
301
|
let indexName;
|
|
302
302
|
await Promise.all(
|
|
303
303
|
updatedMessages.map(async (message) => {
|
|
304
|
-
|
|
305
|
-
|
|
304
|
+
let textForEmbedding = null;
|
|
305
|
+
if (typeof message.content === "string" && message.content.trim() !== "") {
|
|
306
|
+
textForEmbedding = message.content;
|
|
307
|
+
} else if (Array.isArray(message.content)) {
|
|
308
|
+
const joined = message.content.filter((part) => part && part.type === "text" && typeof part.text === "string").map((part) => part.text).join(" ").trim();
|
|
309
|
+
if (joined) textForEmbedding = joined;
|
|
310
|
+
}
|
|
311
|
+
if (!textForEmbedding) return;
|
|
312
|
+
const { embeddings, chunks, dimension } = await this.embedMessageContent(textForEmbedding);
|
|
306
313
|
if (typeof indexName === `undefined`) {
|
|
307
314
|
indexName = this.createEmbeddingIndex(dimension).then((result2) => result2.indexName);
|
|
308
315
|
}
|
package/dist/index.js
CHANGED
|
@@ -295,8 +295,15 @@ var Memory = class extends MastraMemory {
|
|
|
295
295
|
let indexName;
|
|
296
296
|
await Promise.all(
|
|
297
297
|
updatedMessages.map(async (message) => {
|
|
298
|
-
|
|
299
|
-
|
|
298
|
+
let textForEmbedding = null;
|
|
299
|
+
if (typeof message.content === "string" && message.content.trim() !== "") {
|
|
300
|
+
textForEmbedding = message.content;
|
|
301
|
+
} else if (Array.isArray(message.content)) {
|
|
302
|
+
const joined = message.content.filter((part) => part && part.type === "text" && typeof part.text === "string").map((part) => part.text).join(" ").trim();
|
|
303
|
+
if (joined) textForEmbedding = joined;
|
|
304
|
+
}
|
|
305
|
+
if (!textForEmbedding) return;
|
|
306
|
+
const { embeddings, chunks, dimension } = await this.embedMessageContent(textForEmbedding);
|
|
300
307
|
if (typeof indexName === `undefined`) {
|
|
301
308
|
indexName = this.createEmbeddingIndex(dimension).then((result2) => result2.indexName);
|
|
302
309
|
}
|
|
@@ -16,9 +16,9 @@ var TokenLimiter = class extends memory.MemoryProcessor {
|
|
|
16
16
|
// Token overheads per OpenAI's documentation
|
|
17
17
|
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
18
18
|
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
19
|
-
TOKENS_PER_MESSAGE = 3;
|
|
19
|
+
TOKENS_PER_MESSAGE = 3.8;
|
|
20
20
|
// tokens added for each message (start & end tokens)
|
|
21
|
-
TOKENS_PER_TOOL = 2;
|
|
21
|
+
TOKENS_PER_TOOL = 2.2;
|
|
22
22
|
// empirical adjustment for tool calls
|
|
23
23
|
TOKENS_PER_CONVERSATION = 25;
|
|
24
24
|
// fixed overhead for the conversation
|
package/dist/processors/index.js
CHANGED
|
@@ -10,9 +10,9 @@ var TokenLimiter = class extends MemoryProcessor {
|
|
|
10
10
|
// Token overheads per OpenAI's documentation
|
|
11
11
|
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
12
12
|
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
13
|
-
TOKENS_PER_MESSAGE = 3;
|
|
13
|
+
TOKENS_PER_MESSAGE = 3.8;
|
|
14
14
|
// tokens added for each message (start & end tokens)
|
|
15
|
-
TOKENS_PER_TOOL = 2;
|
|
15
|
+
TOKENS_PER_TOOL = 2.2;
|
|
16
16
|
// empirical adjustment for tool calls
|
|
17
17
|
TOKENS_PER_CONVERSATION = 25;
|
|
18
18
|
// fixed overhead for the conversation
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/memory",
|
|
3
|
-
"version": "0.3.2-alpha.
|
|
3
|
+
"version": "0.3.2-alpha.7",
|
|
4
4
|
"description": "",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -41,7 +41,7 @@
|
|
|
41
41
|
"redis": "^4.7.0",
|
|
42
42
|
"xxhash-wasm": "^1.1.0",
|
|
43
43
|
"zod": "^3.24.2",
|
|
44
|
-
"@mastra/core": "^0.9.2-alpha.
|
|
44
|
+
"@mastra/core": "^0.9.2-alpha.6"
|
|
45
45
|
},
|
|
46
46
|
"devDependencies": {
|
|
47
47
|
"@ai-sdk/openai": "^1.3.3",
|
package/src/index.ts
CHANGED
|
@@ -4,6 +4,7 @@ import { MastraMemory } from '@mastra/core/memory';
|
|
|
4
4
|
import type { MessageType, MemoryConfig, SharedMemoryConfig, StorageThreadType } from '@mastra/core/memory';
|
|
5
5
|
import type { StorageGetMessagesArg } from '@mastra/core/storage';
|
|
6
6
|
import { embedMany } from 'ai';
|
|
7
|
+
import type { TextPart } from 'ai';
|
|
7
8
|
|
|
8
9
|
import xxhash from 'xxhash-wasm';
|
|
9
10
|
import { updateWorkingMemoryTool } from './tools/working-memory';
|
|
@@ -337,9 +338,23 @@ export class Memory extends MastraMemory {
|
|
|
337
338
|
let indexName: Promise<string>;
|
|
338
339
|
await Promise.all(
|
|
339
340
|
updatedMessages.map(async message => {
|
|
340
|
-
|
|
341
|
+
let textForEmbedding: string | null = null;
|
|
342
|
+
|
|
343
|
+
if (typeof message.content === 'string' && message.content.trim() !== '') {
|
|
344
|
+
textForEmbedding = message.content;
|
|
345
|
+
} else if (Array.isArray(message.content)) {
|
|
346
|
+
// Extract text from all text parts, concatenate
|
|
347
|
+
const joined = message.content
|
|
348
|
+
.filter(part => part && part.type === 'text' && typeof part.text === 'string')
|
|
349
|
+
.map(part => (part as TextPart).text)
|
|
350
|
+
.join(' ')
|
|
351
|
+
.trim();
|
|
352
|
+
if (joined) textForEmbedding = joined;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
if (!textForEmbedding) return;
|
|
341
356
|
|
|
342
|
-
const { embeddings, chunks, dimension } = await this.embedMessageContent(
|
|
357
|
+
const { embeddings, chunks, dimension } = await this.embedMessageContent(textForEmbedding);
|
|
343
358
|
|
|
344
359
|
if (typeof indexName === `undefined`) {
|
|
345
360
|
indexName = this.createEmbeddingIndex(dimension).then(result => result.indexName);
|
|
@@ -72,7 +72,7 @@ describe('TokenLimiter', () => {
|
|
|
72
72
|
estimatedTokens += testLimiter.countTokens(message as CoreMessage);
|
|
73
73
|
}
|
|
74
74
|
|
|
75
|
-
return estimatedTokens;
|
|
75
|
+
return Number(estimatedTokens.toFixed(2));
|
|
76
76
|
}
|
|
77
77
|
|
|
78
78
|
function percentDifference(a: number, b: number) {
|
|
@@ -90,7 +90,7 @@ describe('TokenLimiter', () => {
|
|
|
90
90
|
console.log(`Estimated ${estimate} tokens, used ${used} tokens.\n`, counts);
|
|
91
91
|
|
|
92
92
|
// Check if within 2% margin
|
|
93
|
-
expect(percentDifference(estimate, used)).toBeLessThanOrEqual(
|
|
93
|
+
expect(percentDifference(estimate, used)).toBeLessThanOrEqual(2);
|
|
94
94
|
}
|
|
95
95
|
|
|
96
96
|
const calculatorTool = createTool({
|
|
@@ -111,7 +111,7 @@ describe('TokenLimiter', () => {
|
|
|
111
111
|
tools: { calculatorTool },
|
|
112
112
|
});
|
|
113
113
|
|
|
114
|
-
describe.concurrent(`
|
|
114
|
+
describe.concurrent(`98% accuracy`, () => {
|
|
115
115
|
it(`20 messages, no tools`, async () => {
|
|
116
116
|
await expectTokenEstimate(
|
|
117
117
|
{
|
|
@@ -134,10 +134,10 @@ describe('TokenLimiter', () => {
|
|
|
134
134
|
);
|
|
135
135
|
});
|
|
136
136
|
|
|
137
|
-
it(`
|
|
137
|
+
it(`20 messages, 0 tools`, async () => {
|
|
138
138
|
await expectTokenEstimate(
|
|
139
139
|
{
|
|
140
|
-
messageCount:
|
|
140
|
+
messageCount: 10,
|
|
141
141
|
toolFrequency: 0,
|
|
142
142
|
threadId: '3',
|
|
143
143
|
},
|
|
@@ -25,8 +25,8 @@ export class TokenLimiter extends MemoryProcessor {
|
|
|
25
25
|
// Token overheads per OpenAI's documentation
|
|
26
26
|
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
27
27
|
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
28
|
-
public TOKENS_PER_MESSAGE = 3; // tokens added for each message (start & end tokens)
|
|
29
|
-
public TOKENS_PER_TOOL = 2; // empirical adjustment for tool calls
|
|
28
|
+
public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)
|
|
29
|
+
public TOKENS_PER_TOOL = 2.2; // empirical adjustment for tool calls
|
|
30
30
|
public TOKENS_PER_CONVERSATION = 25; // fixed overhead for the conversation
|
|
31
31
|
|
|
32
32
|
/**
|