@mastra/memory 0.3.2-alpha.5 → 0.3.2-alpha.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,29 +1,29 @@
1
1
 
2
- > @mastra/memory@0.3.2-alpha.5 build /home/runner/work/mastra/mastra/packages/memory
2
+ > @mastra/memory@0.3.2-alpha.6 build /home/runner/work/mastra/mastra/packages/memory
3
3
  > pnpm run check && tsup src/index.ts src/processors/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting
4
4
 
5
5
 
6
- > @mastra/memory@0.3.2-alpha.5 check /home/runner/work/mastra/mastra/packages/memory
6
+ > @mastra/memory@0.3.2-alpha.6 check /home/runner/work/mastra/mastra/packages/memory
7
7
  > tsc --noEmit
8
8
 
9
9
  CLI Building entry: src/index.ts, src/processors/index.ts
10
10
  CLI Using tsconfig: tsconfig.json
11
11
  CLI tsup v8.4.0
12
12
  TSC Build start
13
- TSC ⚡️ Build success in 8312ms
13
+ TSC ⚡️ Build success in 9693ms
14
14
  DTS Build start
15
15
  CLI Target: es2022
16
16
  Analysis will use the bundled TypeScript version 5.8.3
17
17
  Writing package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.ts
18
18
  Analysis will use the bundled TypeScript version 5.8.3
19
19
  Writing package typings: /home/runner/work/mastra/mastra/packages/memory/dist/_tsup-dts-rollup.d.cts
20
- DTS ⚡️ Build success in 6581ms
20
+ DTS ⚡️ Build success in 11004ms
21
21
  CLI Cleaning output folder
22
22
  ESM Build start
23
23
  CJS Build start
24
24
  ESM dist/index.js 18.27 KB
25
25
  ESM dist/processors/index.js 5.38 KB
26
- ESM ⚡️ Build success in 393ms
27
- CJS dist/index.cjs 18.45 KB
26
+ ESM ⚡️ Build success in 718ms
28
27
  CJS dist/processors/index.cjs 5.59 KB
29
- CJS ⚡️ Build success in 394ms
28
+ CJS dist/index.cjs 18.45 KB
29
+ CJS ⚡️ Build success in 719ms
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @mastra/memory
2
2
 
3
+ ## 0.3.2-alpha.6
4
+
5
+ ### Patch Changes
6
+
7
+ - 544767d: Improved token estimation in TokenLimiter from 96% accuracy back to 99%
8
+
3
9
  ## 0.3.2-alpha.5
4
10
 
5
11
  ### Patch Changes
@@ -16,9 +16,9 @@ var TokenLimiter = class extends memory.MemoryProcessor {
16
16
  // Token overheads per OpenAI's documentation
17
17
  // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
18
18
  // Every message follows <|start|>{role/name}\n{content}<|end|>
19
- TOKENS_PER_MESSAGE = 3;
19
+ TOKENS_PER_MESSAGE = 3.8;
20
20
  // tokens added for each message (start & end tokens)
21
- TOKENS_PER_TOOL = 2;
21
+ TOKENS_PER_TOOL = 2.2;
22
22
  // empirical adjustment for tool calls
23
23
  TOKENS_PER_CONVERSATION = 25;
24
24
  // fixed overhead for the conversation
@@ -10,9 +10,9 @@ var TokenLimiter = class extends MemoryProcessor {
10
10
  // Token overheads per OpenAI's documentation
11
11
  // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
12
12
  // Every message follows <|start|>{role/name}\n{content}<|end|>
13
- TOKENS_PER_MESSAGE = 3;
13
+ TOKENS_PER_MESSAGE = 3.8;
14
14
  // tokens added for each message (start & end tokens)
15
- TOKENS_PER_TOOL = 2;
15
+ TOKENS_PER_TOOL = 2.2;
16
16
  // empirical adjustment for tool calls
17
17
  TOKENS_PER_CONVERSATION = 25;
18
18
  // fixed overhead for the conversation
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/memory",
3
- "version": "0.3.2-alpha.5",
3
+ "version": "0.3.2-alpha.6",
4
4
  "description": "",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -72,7 +72,7 @@ describe('TokenLimiter', () => {
72
72
  estimatedTokens += testLimiter.countTokens(message as CoreMessage);
73
73
  }
74
74
 
75
- return estimatedTokens;
75
+ return Number(estimatedTokens.toFixed(2));
76
76
  }
77
77
 
78
78
  function percentDifference(a: number, b: number) {
@@ -90,7 +90,7 @@ describe('TokenLimiter', () => {
90
90
  console.log(`Estimated ${estimate} tokens, used ${used} tokens.\n`, counts);
91
91
 
92
92
  // Check if within 2% margin
93
- expect(percentDifference(estimate, used)).toBeLessThanOrEqual(4);
93
+ expect(percentDifference(estimate, used)).toBeLessThanOrEqual(2);
94
94
  }
95
95
 
96
96
  const calculatorTool = createTool({
@@ -111,7 +111,7 @@ describe('TokenLimiter', () => {
111
111
  tools: { calculatorTool },
112
112
  });
113
113
 
114
- describe.concurrent(`96% accuracy`, () => {
114
+ describe.concurrent(`98% accuracy`, () => {
115
115
  it(`20 messages, no tools`, async () => {
116
116
  await expectTokenEstimate(
117
117
  {
@@ -134,10 +134,10 @@ describe('TokenLimiter', () => {
134
134
  );
135
135
  });
136
136
 
137
- it(`4 messages, 0 tools`, async () => {
137
+ it(`20 messages, 0 tools`, async () => {
138
138
  await expectTokenEstimate(
139
139
  {
140
- messageCount: 2,
140
+ messageCount: 10,
141
141
  toolFrequency: 0,
142
142
  threadId: '3',
143
143
  },
@@ -25,8 +25,8 @@ export class TokenLimiter extends MemoryProcessor {
25
25
  // Token overheads per OpenAI's documentation
26
26
  // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
27
27
  // Every message follows <|start|>{role/name}\n{content}<|end|>
28
- public TOKENS_PER_MESSAGE = 3; // tokens added for each message (start & end tokens)
29
- public TOKENS_PER_TOOL = 2; // empirical adjustment for tool calls
28
+ public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)
29
+ public TOKENS_PER_TOOL = 2.2; // empirical adjustment for tool calls
30
30
  public TOKENS_PER_CONVERSATION = 25; // fixed overhead for the conversation
31
31
 
32
32
  /**