@mastra/memory 0.3.2-alpha.4 → 0.3.2-alpha.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.turbo/turbo-build.log
CHANGED
|
@@ -1,29 +1,29 @@
|
|
|
1
1
|
|
|
2
|
-
> @mastra/memory@0.3.2-alpha.
|
|
2
|
+
> @mastra/memory@0.3.2-alpha.6 build /home/runner/work/mastra/mastra/packages/memory
|
|
3
3
|
> pnpm run check && tsup src/index.ts src/processors/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
> @mastra/memory@0.3.2-alpha.
|
|
6
|
+
> @mastra/memory@0.3.2-alpha.6 check /home/runner/work/mastra/mastra/packages/memory
|
|
7
7
|
> tsc --noEmit
|
|
8
8
|
|
|
9
9
|
[34mCLI[39m Building entry: src/index.ts, src/processors/index.ts
|
|
10
10
|
[34mCLI[39m Using tsconfig: tsconfig.json
|
|
11
11
|
[34mCLI[39m tsup v8.4.0
|
|
12
12
|
[34mTSC[39m Build start
|
|
13
|
-
[32mTSC[39m ⚡️ Build success in
|
|
13
|
+
[32mTSC[39m ⚡️ Build success in 9693ms
|
|
14
14
|
[34mDTS[39m Build start
|
|
15
15
|
[34mCLI[39m Target: es2022
|
|
16
16
|
Analysis will use the bundled TypeScript version 5.8.3
|
|
17
17
|
[36mWriting package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.ts[39m
|
|
18
18
|
Analysis will use the bundled TypeScript version 5.8.3
|
|
19
19
|
[36mWriting package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.cts[39m
|
|
20
|
-
[32mDTS[39m ⚡️ Build success in
|
|
20
|
+
[32mDTS[39m ⚡️ Build success in 11004ms
|
|
21
21
|
[34mCLI[39m Cleaning output folder
|
|
22
22
|
[34mESM[39m Build start
|
|
23
23
|
[34mCJS[39m Build start
|
|
24
24
|
[32mESM[39m [1mdist/index.js [22m[32m18.27 KB[39m
|
|
25
25
|
[32mESM[39m [1mdist/processors/index.js [22m[32m5.38 KB[39m
|
|
26
|
-
[32mESM[39m ⚡️ Build success in
|
|
27
|
-
[32mCJS[39m [1mdist/index.cjs [22m[32m18.45 KB[39m
|
|
26
|
+
[32mESM[39m ⚡️ Build success in 718ms
|
|
28
27
|
[32mCJS[39m [1mdist/processors/index.cjs [22m[32m5.59 KB[39m
|
|
29
|
-
[32mCJS[39m
|
|
28
|
+
[32mCJS[39m [1mdist/index.cjs [22m[32m18.45 KB[39m
|
|
29
|
+
[32mCJS[39m ⚡️ Build success in 719ms
|
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,22 @@
|
|
|
1
1
|
# @mastra/memory
|
|
2
2
|
|
|
3
|
+
## 0.3.2-alpha.6
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 544767d: Improved token estimation in TokenLimiter from 96% accuracy back to 99%
|
|
8
|
+
|
|
9
|
+
## 0.3.2-alpha.5
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- Updated dependencies [3d2fb5c]
|
|
14
|
+
- Updated dependencies [7eeb2bc]
|
|
15
|
+
- Updated dependencies [8607972]
|
|
16
|
+
- Updated dependencies [7eeb2bc]
|
|
17
|
+
- Updated dependencies [fba031f]
|
|
18
|
+
- @mastra/core@0.9.2-alpha.5
|
|
19
|
+
|
|
3
20
|
## 0.3.2-alpha.4
|
|
4
21
|
|
|
5
22
|
### Patch Changes
|
|
@@ -16,9 +16,9 @@ var TokenLimiter = class extends memory.MemoryProcessor {
|
|
|
16
16
|
// Token overheads per OpenAI's documentation
|
|
17
17
|
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
18
18
|
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
19
|
-
TOKENS_PER_MESSAGE = 3;
|
|
19
|
+
TOKENS_PER_MESSAGE = 3.8;
|
|
20
20
|
// tokens added for each message (start & end tokens)
|
|
21
|
-
TOKENS_PER_TOOL = 2;
|
|
21
|
+
TOKENS_PER_TOOL = 2.2;
|
|
22
22
|
// empirical adjustment for tool calls
|
|
23
23
|
TOKENS_PER_CONVERSATION = 25;
|
|
24
24
|
// fixed overhead for the conversation
|
package/dist/processors/index.js
CHANGED
|
@@ -10,9 +10,9 @@ var TokenLimiter = class extends MemoryProcessor {
|
|
|
10
10
|
// Token overheads per OpenAI's documentation
|
|
11
11
|
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
12
12
|
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
13
|
-
TOKENS_PER_MESSAGE = 3;
|
|
13
|
+
TOKENS_PER_MESSAGE = 3.8;
|
|
14
14
|
// tokens added for each message (start & end tokens)
|
|
15
|
-
TOKENS_PER_TOOL = 2;
|
|
15
|
+
TOKENS_PER_TOOL = 2.2;
|
|
16
16
|
// empirical adjustment for tool calls
|
|
17
17
|
TOKENS_PER_CONVERSATION = 25;
|
|
18
18
|
// fixed overhead for the conversation
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/memory",
|
|
3
|
-
"version": "0.3.2-alpha.
|
|
3
|
+
"version": "0.3.2-alpha.6",
|
|
4
4
|
"description": "",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -41,7 +41,7 @@
|
|
|
41
41
|
"redis": "^4.7.0",
|
|
42
42
|
"xxhash-wasm": "^1.1.0",
|
|
43
43
|
"zod": "^3.24.2",
|
|
44
|
-
"@mastra/core": "^0.9.2-alpha.
|
|
44
|
+
"@mastra/core": "^0.9.2-alpha.5"
|
|
45
45
|
},
|
|
46
46
|
"devDependencies": {
|
|
47
47
|
"@ai-sdk/openai": "^1.3.3",
|
|
@@ -72,7 +72,7 @@ describe('TokenLimiter', () => {
|
|
|
72
72
|
estimatedTokens += testLimiter.countTokens(message as CoreMessage);
|
|
73
73
|
}
|
|
74
74
|
|
|
75
|
-
return estimatedTokens;
|
|
75
|
+
return Number(estimatedTokens.toFixed(2));
|
|
76
76
|
}
|
|
77
77
|
|
|
78
78
|
function percentDifference(a: number, b: number) {
|
|
@@ -90,7 +90,7 @@ describe('TokenLimiter', () => {
|
|
|
90
90
|
console.log(`Estimated ${estimate} tokens, used ${used} tokens.\n`, counts);
|
|
91
91
|
|
|
92
92
|
// Check if within 2% margin
|
|
93
|
-
expect(percentDifference(estimate, used)).toBeLessThanOrEqual(
|
|
93
|
+
expect(percentDifference(estimate, used)).toBeLessThanOrEqual(2);
|
|
94
94
|
}
|
|
95
95
|
|
|
96
96
|
const calculatorTool = createTool({
|
|
@@ -111,7 +111,7 @@ describe('TokenLimiter', () => {
|
|
|
111
111
|
tools: { calculatorTool },
|
|
112
112
|
});
|
|
113
113
|
|
|
114
|
-
describe.concurrent(`
|
|
114
|
+
describe.concurrent(`98% accuracy`, () => {
|
|
115
115
|
it(`20 messages, no tools`, async () => {
|
|
116
116
|
await expectTokenEstimate(
|
|
117
117
|
{
|
|
@@ -134,10 +134,10 @@ describe('TokenLimiter', () => {
|
|
|
134
134
|
);
|
|
135
135
|
});
|
|
136
136
|
|
|
137
|
-
it(`
|
|
137
|
+
it(`20 messages, 0 tools`, async () => {
|
|
138
138
|
await expectTokenEstimate(
|
|
139
139
|
{
|
|
140
|
-
messageCount:
|
|
140
|
+
messageCount: 10,
|
|
141
141
|
toolFrequency: 0,
|
|
142
142
|
threadId: '3',
|
|
143
143
|
},
|
|
@@ -25,8 +25,8 @@ export class TokenLimiter extends MemoryProcessor {
|
|
|
25
25
|
// Token overheads per OpenAI's documentation
|
|
26
26
|
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
27
27
|
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
28
|
-
public TOKENS_PER_MESSAGE = 3; // tokens added for each message (start & end tokens)
|
|
29
|
-
public TOKENS_PER_TOOL = 2; // empirical adjustment for tool calls
|
|
28
|
+
public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)
|
|
29
|
+
public TOKENS_PER_TOOL = 2.2; // empirical adjustment for tool calls
|
|
30
30
|
public TOKENS_PER_CONVERSATION = 25; // fixed overhead for the conversation
|
|
31
31
|
|
|
32
32
|
/**
|