@mastra/memory 1.0.0-beta.1 → 1.0.0-beta.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +355 -0
- package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
- package/dist/chunk-DGUM43GV.js +10 -0
- package/dist/chunk-DGUM43GV.js.map +1 -0
- package/dist/chunk-JEQ2X3Z6.cjs +12 -0
- package/dist/chunk-JEQ2X3Z6.cjs.map +1 -0
- package/dist/chunk-KMQS2YEC.js +79 -0
- package/dist/chunk-KMQS2YEC.js.map +1 -0
- package/dist/chunk-MMUHFOCG.js +79 -0
- package/dist/chunk-MMUHFOCG.js.map +1 -0
- package/dist/chunk-QY6BZOPJ.js +250 -0
- package/dist/chunk-QY6BZOPJ.js.map +1 -0
- package/dist/chunk-SG3GRV3O.cjs +84 -0
- package/dist/chunk-SG3GRV3O.cjs.map +1 -0
- package/dist/chunk-W72AYUIF.cjs +252 -0
- package/dist/chunk-W72AYUIF.cjs.map +1 -0
- package/dist/chunk-WC4XBMZT.js +250 -0
- package/dist/chunk-WC4XBMZT.js.map +1 -0
- package/dist/chunk-YMNW6DEN.cjs +252 -0
- package/dist/chunk-YMNW6DEN.cjs.map +1 -0
- package/dist/chunk-ZUQPUTTO.cjs +84 -0
- package/dist/chunk-ZUQPUTTO.cjs.map +1 -0
- package/dist/docs/README.md +36 -0
- package/dist/docs/SKILL.md +42 -0
- package/dist/docs/SOURCE_MAP.json +31 -0
- package/dist/docs/agents/01-agent-memory.md +160 -0
- package/dist/docs/agents/02-networks.md +236 -0
- package/dist/docs/agents/03-agent-approval.md +317 -0
- package/dist/docs/core/01-reference.md +114 -0
- package/dist/docs/memory/01-overview.md +76 -0
- package/dist/docs/memory/02-storage.md +181 -0
- package/dist/docs/memory/03-working-memory.md +386 -0
- package/dist/docs/memory/04-semantic-recall.md +235 -0
- package/dist/docs/memory/05-memory-processors.md +319 -0
- package/dist/docs/memory/06-reference.md +617 -0
- package/dist/docs/processors/01-reference.md +81 -0
- package/dist/docs/storage/01-reference.md +972 -0
- package/dist/docs/vectors/01-reference.md +929 -0
- package/dist/index.cjs +14845 -115
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +145 -5
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +14807 -119
- package/dist/index.js.map +1 -1
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs +63 -0
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs.map +1 -0
- package/dist/token-6GSAFR2W-K2BTU23I.js +61 -0
- package/dist/token-6GSAFR2W-K2BTU23I.js.map +1 -0
- package/dist/token-6GSAFR2W-VLY2XUPA.js +61 -0
- package/dist/token-6GSAFR2W-VLY2XUPA.js.map +1 -0
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs +63 -0
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs +10 -0
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs +10 -0
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js +8 -0
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js.map +1 -0
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js +8 -0
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js.map +1 -0
- package/dist/tools/working-memory.d.ts +10 -2
- package/dist/tools/working-memory.d.ts.map +1 -1
- package/package.json +19 -25
- package/dist/processors/index.cjs +0 -165
- package/dist/processors/index.cjs.map +0 -1
- package/dist/processors/index.d.ts +0 -3
- package/dist/processors/index.d.ts.map +0 -1
- package/dist/processors/index.js +0 -158
- package/dist/processors/index.js.map +0 -1
- package/dist/processors/token-limiter.d.ts +0 -32
- package/dist/processors/token-limiter.d.ts.map +0 -1
- package/dist/processors/tool-call-filter.d.ts +0 -20
- package/dist/processors/tool-call-filter.d.ts.map +0 -1
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../../src/processors/token-limiter.ts","../../src/processors/tool-call-filter.ts"],"names":["MemoryProcessor","Tiktoken","o200k_base"],"mappings":";;;;;;;;;;;AAsBO,IAAM,YAAA,GAAN,cAA2BA,sBAAA,CAAgB;AAAA,EACxC,OAAA;AAAA,EACA,SAAA;AAAA;AAAA;AAAA;AAAA,EAKD,kBAAA,GAAqB,GAAA;AAAA;AAAA,EACrB,uBAAA,GAA0B,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMjC,YAAY,OAAA,EAAuC;AACjD,IAAA,KAAA,CAAM;AAAA,MACJ,IAAA,EAAM;AAAA,KACP,CAAA;AAED,IAAA,IAAI,OAAO,YAAY,QAAA,EAAU;AAE/B,MAAA,IAAA,CAAK,SAAA,GAAY,OAAA;AACjB,MAAA,IAAA,CAAK,OAAA,GAAU,IAAIC,aAAA,CAASC,2BAAU,CAAA;AAAA,IACxC,CAAA,MAAO;AAEL,MAAA,IAAA,CAAK,YAAY,OAAA,CAAQ,KAAA;AACzB,MAAA,IAAA,CAAK,OAAA,GAAU,IAAID,aAAA,CAAS,OAAA,CAAQ,YAAYC,2BAAU,CAAA;AAAA,IAC5D;AAAA,EACF;AAAA,EAEA,OAAA,CACE,UACA,EAAE,aAAA,EAAe,qBAAqB,WAAA,EAAY,GAAyB,EAAC,EAC7D;AAEf,IAAA,IAAI,WAAA,GAAc,CAAA;AAGlB,IAAA,WAAA,IAAe,IAAA,CAAK,uBAAA;AAEpB,IAAA,IAAI,aAAA,EAAe;AACjB,MAAA,WAAA,IAAe,IAAA,CAAK,YAAY,aAAa,CAAA;AAC7C,MAAA,WAAA,IAAe,IAAA,CAAK,kBAAA;AAAA,IACtB;AAEA,IAAA,IAAI,mBAAA,EAAqB;AACvB,MAAA,WAAA,IAAe,IAAA,CAAK,YAAY,mBAAmB,CAAA;AACnD,MAAA,WAAA,IAAe,IAAA,CAAK,kBAAA;AAAA,IACtB;AAEA,IAAA,MAAM,cAAc,CAAC,GAAG,UAAU,GAAI,WAAA,IAAe,EAAG,CAAA;AAExD,IAAA,MAAM,SAAwB,EAAC;AAG/B,IAAA,KAAA,IAAS,IAAI,WAAA,CAAY,MAAA,GAAS,CAAA,EAAG,CAAA,IAAK,GAAG,CAAA,EAAA,EAAK;AAChD,MAAA,MAAM,OAAA,GAAU,YAAY,CAAC,CAAA;AAG7B,MAAA,IAAI,CAAC,OAAA,EAAS;AAEd,MAAA,MAAM,aAAA,GAAgB,IAAA,CAAK,WAAA,CAAY,OAAO,CAAA;AAE9C,MAAA,IAAI,WAAA,GAAc,aAAA,IAAiB,IAAA,CAAK,SAAA,EAAW;AAEjD,QAAA,IAAI,CAAA,GAAI,SAAS,MAAA,EAAQ;AAEvB,UAAA,MAAA,CAAO,QAAQ,OAAO,CAAA;AAAA,QACxB;AACA,QAAA,WAAA,IAAe,aAAA;AAAA,MACjB,CAAA,MAAO;AACL,QAAA,IAAA,CAAK,MAAA,CAAO,IAAA;AAAA,UACV,CAAA,UAAA,EAAa,WAAA,CAAY,MAAA,GAAS,MAAA,CAAO,MAAM,IAAI,WAAA,CAAY,MAAM,CAAA,0BAAA,EAA6B,IAAA,CAAK,SAAS,CAAA,SAAA;AAAA,SAClH;AAEA,QAAA;AAAA,MACF;AAAA,IACF;AAEA,IAAA,OAAO,MAAA;AAAA,EACT;AAAA,EAEO,YAAY,OAAA,EAAuC;AACxD,IAAA,IAAI,OAAO,YAAY,CAAA,MAAA,CAAA,EAAU;AAC/B,MAAA,OAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAA,CAAE,MAAA;AAAA,IACtC;AAEA,IAAA,IAAI,cAAc,OAAA,CAAQ,IAAA;AAC1B,IAAA,IAAI,QAAA,GAAW,CAAA;AAEf,IAAA,IAAI,OAAO,OAAA,CAAQ,OAAA,KAAY,QAAA,IAAY,QAAQ,OAAA,EAAS;AAC1D,MAAA,WAAA,IAAe,OAAA,CAAQ,OAAA;AAAA,IACzB,CAAA,MAAA,IAAW,KAAA,CAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,CAAA,EAAG;AAEzC,MAAA,KAAA,MAAW,IAAA,IAAQ,QAAQ,OAAA,EAAS;AAClC,QAAA,IAAI,IAAA,CAAK,SAAS,MAAA,EAAQ;AACxB,UAAA,WAAA,IAAe,IAAA,CAAK,IAAA;AAAA,QACtB,WAAW,IAAA,CAAK,IAAA,KAAS,WAAA,IAAe,IAAA,CAAK,SAAS,CAAA,WAAA,CAAA,EAAe;AACnE,UAAA,IAAI,UAAU,IAAA,IAAQ,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,SAAS,CAAA,SAAA,CAAA,EAAa;AAC5D,YAAA,WAAA,IAAe,IAAA,CAAK,QAAA;AACpB,YAAA,IAAI,OAAO,IAAA,CAAK,IAAA,KAAS,QAAA,EAAU;AACjC,cAAA,WAAA,IAAe,IAAA,CAAK,IAAA;AAAA,YACtB,CAAA,MAAO;AACL,cAAA,WAAA,IAAe,IAAA,CAAK,SAAA,CAAU,IAAA,CAAK,IAAI,CAAA;AAEvC,cAAA,QAAA,IAAY,EAAA;AAAA,YACd;AAAA,UACF;AAEA,UAAA,IAAI,YAAY,IAAA,IAAQ,IAAA,CAAK,WAAW,MAAA,IAAa,IAAA,CAAK,SAAS,CAAA,WAAA,CAAA,EAAe;AAChF,YAAA,IAAI,OAAO,IAAA,CAAK,MAAA,KAAW,QAAA,EAAU;AACnC,cAAA,WAAA,IAAe,IAAA,CAAK,MAAA;AAAA,YACtB,CAAA,MAAO;AACL,cAAA,WAAA,IAAe,IAAA,CAAK,SAAA,CAAU,IAAA,CAAK,MAAM,CAAA;AAEzC,cAAA,QAAA,IAAY,EAAA;AAAA,YACd;AAAA,UACF;AAAA,QACF,CAAA,MAAO;AACL,UAAA,WAAA,IAAe,IAAA,CAAK,UAAU,IAAI,CAAA;AAAA,QACpC;AAAA,MACF;AAAA,IACF;AAEA,IAAA,IACE,OAAO,QAAQ,OAAA,KAAY,CAAA,MAAA,CAAA;AAAA,IAE3B,OAAA,CAAQ,OAAA,CAAQ,IAAA,CAAK,CAAA,CAAA,KAAK,CAAA,CAAE,SAAS,CAAA,SAAA,CAAA,IAAe,CAAA,CAAE,IAAA,KAAS,CAAA,WAAA,CAAa,CAAA,EAC5E;AAGA,MAAA,QAAA,IAAY,IAAA,CAAK,kBAAA;AAAA,IACnB;AAEA,IAAA,OAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAO,WAAW,EAAE,MAAA,GAAS,QAAA;AAAA,EACnD;AACF;ACtJO,IAAM,cAAA,GAAN,cAA6BF,sBAAAA,CAAgB;AAAA,EAC1C,OAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOR,WAAA,CAAY,OAAA,GAAkC,EAAC,EAAG;AAChD,IAAA,KAAA,CAAM,EAAE,IAAA,EAAM,gBAAA,EAAkB,CAAA;AAEhC,IAAA,IAAI,CAAC,OAAA,IAAW,CAAC,OAAA,CAAQ,OAAA,EAAS;AAChC,MAAA,IAAA,CAAK,OAAA,GAAU,KAAA;AAAA,IACjB,CAAA,MAAO;AAEL,MAAA,IAAA,CAAK,OAAA,GAAU,MAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,CAAA,GAAI,OAAA,CAAQ,UAAU,EAAC;AAAA,IACrE;AAAA,EACF;AAAA,EAEA,QAAQ,QAAA,EAAwC;AAE9C,IAAA,IAAI,IAAA,CAAK,YAAY,KAAA,EAAO;AAC1B,MAAA,OAAO,QAAA,CAAS,OAAO,CAAA,OAAA,KAAW;AAChC,QAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,CAAA,EAAG;AAClC,UAAA,OAAO,CAAC,OAAA,CAAQ,OAAA,CAAQ,IAAA,CAAK,CAAA,IAAA,KAAQ,KAAK,IAAA,KAAS,WAAA,IAAe,IAAA,CAAK,IAAA,KAAS,aAAa,CAAA;AAAA,QAC/F;AACA,QAAA,OAAO,IAAA;AAAA,MACT,CAAC,CAAA;AAAA,IACH;AAGA,IAAA,IAAI,IAAA,CAAK,OAAA,CAAQ,MAAA,GAAS,CAAA,EAAG;AAE3B,MAAA,MAAM,mBAAA,uBAA0B,GAAA,EAAY;AAE5C,MAAA,OAAO,QAAA,CAAS,OAAO,CAAA,OAAA,KAAW;AAChC,QAAA,IAAI,CAAC,KAAA,CAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,GAAG,OAAO,IAAA;AAG5C,QAAA,IAAI,OAAA,CAAQ,SAAS,WAAA,EAAa;AAChC,UAAA,IAAI,aAAA,GAAgB,KAAA;AAEpB,UAAA,KAAA,MAAW,IAAA,IAAQ,QAAQ,OAAA,EAAS;AAClC,YAAA,IAAI,IAAA,CAAK,SAAS,WAAA,IAAe,IAAA,CAAK,QAAQ,QAAA,CAAS,IAAA,CAAK,QAAQ,CAAA,EAAG;AACrE,cAAA,mBAAA,CAAoB,GAAA,CAAI,KAAK,UAAU,CAAA;AACvC,cAAA,aAAA,GAAgB,IAAA;AAAA,YAClB;AAAA,UACF;AAEA,UAAA,OAAO,CAAC,aAAA;AAAA,QACV;AAGA,QAAA,IAAI,OAAA,CAAQ,SAAS,MAAA,EAAQ;AAC3B,UAAA,MAAM,aAAA,GAAgB,QAAQ,OAAA,CAAQ,IAAA;AAAA,YACpC,UAAQ,IAAA,CAAK,IAAA,KAAS,iBAAiB,mBAAA,CAAoB,GAAA,CAAI,KAAK,UAAU;AAAA,WAChF;AAEA,UAAA,OAAO,CAAC,aAAA;AAAA,QACV;AAEA,QAAA,OAAO,IAAA;AAAA,MACT,CAAC,CAAA;AAAA,IACH;AAGA,IAAA,OAAO,QAAA;AAAA,EACT;AACF","file":"index.cjs","sourcesContent":["import type { CoreMessage } from '@mastra/core/llm';\nimport { MemoryProcessor } from '@mastra/core/memory';\nimport type { MemoryProcessorOpts } from '@mastra/core/memory';\n\nimport { Tiktoken } from 'js-tiktoken/lite';\nimport type { TiktokenBPE } from 'js-tiktoken/lite';\nimport o200k_base from 'js-tiktoken/ranks/o200k_base';\n\n/**\n * Configuration options for TokenLimiter\n */\ninterface TokenLimiterOptions {\n /** Maximum number of tokens to allow */\n limit: number;\n /** Optional encoding to use (defaults to o200k_base which is used by gpt-4o) */\n encoding?: TiktokenBPE;\n}\n\n/**\n * Limits the total number of tokens in the messages.\n * Uses js-tiktoken with o200k_base encoding by default for accurate token counting with modern models.\n */\nexport class TokenLimiter extends MemoryProcessor {\n private encoder: Tiktoken;\n private maxTokens: number;\n\n // Token overheads per OpenAI's documentation\n // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls\n // Every message follows <|start|>{role/name}\\n{content}<|end|>\n public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)\n public TOKENS_PER_CONVERSATION = 24; // fixed overhead for the conversation\n\n /**\n * Create a token limiter for messages.\n * @param options Either a number (token limit) or a configuration object\n */\n constructor(options: number | TokenLimiterOptions) {\n super({\n name: 'TokenLimiter',\n });\n\n if (typeof options === 'number') {\n // Simple number format - just the token limit with default encoding\n this.maxTokens = options;\n this.encoder = new Tiktoken(o200k_base);\n } else {\n // Object format with limit and optional encoding\n this.maxTokens = options.limit;\n this.encoder = new Tiktoken(options.encoding || o200k_base);\n }\n }\n\n process(\n messages: CoreMessage[],\n { systemMessage, memorySystemMessage, newMessages }: MemoryProcessorOpts = {},\n ): CoreMessage[] {\n // Messages are already chronologically ordered - take most recent ones up to the token limit\n let totalTokens = 0;\n\n // Start with the conversation overhead\n totalTokens += this.TOKENS_PER_CONVERSATION;\n\n if (systemMessage) {\n totalTokens += this.countTokens(systemMessage);\n totalTokens += this.TOKENS_PER_MESSAGE; // Add message overhead for system message\n }\n\n if (memorySystemMessage) {\n totalTokens += this.countTokens(memorySystemMessage);\n totalTokens += this.TOKENS_PER_MESSAGE; // Add message overhead for memory system message\n }\n\n const allMessages = [...messages, ...(newMessages || [])];\n\n const result: CoreMessage[] = [];\n\n // Process messages in reverse (newest first) so that we stop estimating tokens on old messages. Once we get to our limit of tokens there's no reason to keep processing older messages\n for (let i = allMessages.length - 1; i >= 0; i--) {\n const message = allMessages[i];\n\n // Skip undefined messages (shouldn't happen, but TypeScript is concerned)\n if (!message) continue;\n\n const messageTokens = this.countTokens(message);\n\n if (totalTokens + messageTokens <= this.maxTokens) {\n // Insert at the beginning to maintain chronological order, but only if it's not a new message\n if (i < messages.length) {\n // less than messages.length because we're iterating in reverse. If the index is greater than messages.length it's a new message\n result.unshift(message);\n }\n totalTokens += messageTokens;\n } else {\n this.logger.info(\n `filtering ${allMessages.length - result.length}/${allMessages.length} messages, token limit of ${this.maxTokens} exceeded`,\n );\n // If we can't fit the message, we stop\n break;\n }\n }\n\n return result;\n }\n\n public countTokens(message: string | CoreMessage): number {\n if (typeof message === `string`) {\n return this.encoder.encode(message).length;\n }\n\n let tokenString = message.role;\n let overhead = 0;\n\n if (typeof message.content === 'string' && message.content) {\n tokenString += message.content;\n } else if (Array.isArray(message.content)) {\n // Calculate tokens for each content part\n for (const part of message.content) {\n if (part.type === 'text') {\n tokenString += part.text;\n } else if (part.type === 'tool-call' || part.type === `tool-result`) {\n if (`args` in part && part.args && part.type === `tool-call`) {\n tokenString += part.toolName as any;\n if (typeof part.args === 'string') {\n tokenString += part.args;\n } else {\n tokenString += JSON.stringify(part.args);\n // minus some tokens for JSON\n overhead -= 12;\n }\n }\n // Token cost for result if present\n if (`result` in part && part.result !== undefined && part.type === `tool-result`) {\n if (typeof part.result === 'string') {\n tokenString += part.result;\n } else {\n tokenString += JSON.stringify(part.result);\n // minus some tokens for JSON\n overhead -= 12;\n }\n }\n } else {\n tokenString += JSON.stringify(part);\n }\n }\n }\n\n if (\n typeof message.content === `string` ||\n // if the message included non-tool parts, add our message overhead\n message.content.some(p => p.type !== `tool-call` && p.type !== `tool-result`)\n ) {\n // Ensure we account for message formatting tokens\n // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls\n overhead += this.TOKENS_PER_MESSAGE;\n }\n\n return this.encoder.encode(tokenString).length + overhead;\n }\n}\n","import type { CoreMessage } from '@mastra/core/llm';\nimport { MemoryProcessor } from '@mastra/core/memory';\n\n/**\n * Filters out tool calls and results from messages.\n * By default (with no arguments), excludes all tool calls and their results.\n * Can be configured to exclude only specific tools by name.\n */\nexport class ToolCallFilter extends MemoryProcessor {\n private exclude: string[] | 'all';\n\n /**\n * Create a filter for tool calls and results.\n * @param options Configuration options\n * @param options.exclude List of specific tool names to exclude. If not provided, all tool calls are excluded.\n */\n constructor(options: { exclude?: string[] } = {}) {\n super({ name: 'ToolCallFilter' });\n // If no options or exclude is provided, exclude all tools\n if (!options || !options.exclude) {\n this.exclude = 'all'; // Exclude all tools\n } else {\n // Exclude specific tools\n this.exclude = Array.isArray(options.exclude) ? options.exclude : [];\n }\n }\n\n process(messages: CoreMessage[]): CoreMessage[] {\n // Case 1: Exclude all tool calls and tool results\n if (this.exclude === 'all') {\n return messages.filter(message => {\n if (Array.isArray(message.content)) {\n return !message.content.some(part => part.type === 'tool-call' || part.type === 'tool-result');\n }\n return true;\n });\n }\n\n // Case 2: Exclude specific tools by name\n if (this.exclude.length > 0) {\n // Single pass approach - track excluded tool call IDs while filtering\n const excludedToolCallIds = new Set<string>();\n\n return messages.filter(message => {\n if (!Array.isArray(message.content)) return true;\n\n // For assistant messages, check for excluded tool calls and track their IDs\n if (message.role === 'assistant') {\n let shouldExclude = false;\n\n for (const part of message.content) {\n if (part.type === 'tool-call' && this.exclude.includes(part.toolName)) {\n excludedToolCallIds.add(part.toolCallId);\n shouldExclude = true;\n }\n }\n\n return !shouldExclude;\n }\n\n // For tool messages, filter out results for excluded tool calls\n if (message.role === 'tool') {\n const shouldExclude = message.content.some(\n part => part.type === 'tool-result' && excludedToolCallIds.has(part.toolCallId),\n );\n\n return !shouldExclude;\n }\n\n return true;\n });\n }\n\n // Case 3: Empty exclude array, return original messages\n return messages;\n }\n}\n"]}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/processors/index.ts"],"names":[],"mappings":"AAAA,cAAc,iBAAiB,CAAC;AAChC,cAAc,oBAAoB,CAAC"}
|
package/dist/processors/index.js
DELETED
|
@@ -1,158 +0,0 @@
|
|
|
1
|
-
import { MemoryProcessor } from '@mastra/core/memory';
|
|
2
|
-
import { Tiktoken } from 'js-tiktoken/lite';
|
|
3
|
-
import o200k_base from 'js-tiktoken/ranks/o200k_base';
|
|
4
|
-
|
|
5
|
-
// src/processors/token-limiter.ts
|
|
6
|
-
var TokenLimiter = class extends MemoryProcessor {
|
|
7
|
-
encoder;
|
|
8
|
-
maxTokens;
|
|
9
|
-
// Token overheads per OpenAI's documentation
|
|
10
|
-
// See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls
|
|
11
|
-
// Every message follows <|start|>{role/name}\n{content}<|end|>
|
|
12
|
-
TOKENS_PER_MESSAGE = 3.8;
|
|
13
|
-
// tokens added for each message (start & end tokens)
|
|
14
|
-
TOKENS_PER_CONVERSATION = 24;
|
|
15
|
-
// fixed overhead for the conversation
|
|
16
|
-
/**
|
|
17
|
-
* Create a token limiter for messages.
|
|
18
|
-
* @param options Either a number (token limit) or a configuration object
|
|
19
|
-
*/
|
|
20
|
-
constructor(options) {
|
|
21
|
-
super({
|
|
22
|
-
name: "TokenLimiter"
|
|
23
|
-
});
|
|
24
|
-
if (typeof options === "number") {
|
|
25
|
-
this.maxTokens = options;
|
|
26
|
-
this.encoder = new Tiktoken(o200k_base);
|
|
27
|
-
} else {
|
|
28
|
-
this.maxTokens = options.limit;
|
|
29
|
-
this.encoder = new Tiktoken(options.encoding || o200k_base);
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
process(messages, { systemMessage, memorySystemMessage, newMessages } = {}) {
|
|
33
|
-
let totalTokens = 0;
|
|
34
|
-
totalTokens += this.TOKENS_PER_CONVERSATION;
|
|
35
|
-
if (systemMessage) {
|
|
36
|
-
totalTokens += this.countTokens(systemMessage);
|
|
37
|
-
totalTokens += this.TOKENS_PER_MESSAGE;
|
|
38
|
-
}
|
|
39
|
-
if (memorySystemMessage) {
|
|
40
|
-
totalTokens += this.countTokens(memorySystemMessage);
|
|
41
|
-
totalTokens += this.TOKENS_PER_MESSAGE;
|
|
42
|
-
}
|
|
43
|
-
const allMessages = [...messages, ...newMessages || []];
|
|
44
|
-
const result = [];
|
|
45
|
-
for (let i = allMessages.length - 1; i >= 0; i--) {
|
|
46
|
-
const message = allMessages[i];
|
|
47
|
-
if (!message) continue;
|
|
48
|
-
const messageTokens = this.countTokens(message);
|
|
49
|
-
if (totalTokens + messageTokens <= this.maxTokens) {
|
|
50
|
-
if (i < messages.length) {
|
|
51
|
-
result.unshift(message);
|
|
52
|
-
}
|
|
53
|
-
totalTokens += messageTokens;
|
|
54
|
-
} else {
|
|
55
|
-
this.logger.info(
|
|
56
|
-
`filtering ${allMessages.length - result.length}/${allMessages.length} messages, token limit of ${this.maxTokens} exceeded`
|
|
57
|
-
);
|
|
58
|
-
break;
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
return result;
|
|
62
|
-
}
|
|
63
|
-
countTokens(message) {
|
|
64
|
-
if (typeof message === `string`) {
|
|
65
|
-
return this.encoder.encode(message).length;
|
|
66
|
-
}
|
|
67
|
-
let tokenString = message.role;
|
|
68
|
-
let overhead = 0;
|
|
69
|
-
if (typeof message.content === "string" && message.content) {
|
|
70
|
-
tokenString += message.content;
|
|
71
|
-
} else if (Array.isArray(message.content)) {
|
|
72
|
-
for (const part of message.content) {
|
|
73
|
-
if (part.type === "text") {
|
|
74
|
-
tokenString += part.text;
|
|
75
|
-
} else if (part.type === "tool-call" || part.type === `tool-result`) {
|
|
76
|
-
if (`args` in part && part.args && part.type === `tool-call`) {
|
|
77
|
-
tokenString += part.toolName;
|
|
78
|
-
if (typeof part.args === "string") {
|
|
79
|
-
tokenString += part.args;
|
|
80
|
-
} else {
|
|
81
|
-
tokenString += JSON.stringify(part.args);
|
|
82
|
-
overhead -= 12;
|
|
83
|
-
}
|
|
84
|
-
}
|
|
85
|
-
if (`result` in part && part.result !== void 0 && part.type === `tool-result`) {
|
|
86
|
-
if (typeof part.result === "string") {
|
|
87
|
-
tokenString += part.result;
|
|
88
|
-
} else {
|
|
89
|
-
tokenString += JSON.stringify(part.result);
|
|
90
|
-
overhead -= 12;
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
} else {
|
|
94
|
-
tokenString += JSON.stringify(part);
|
|
95
|
-
}
|
|
96
|
-
}
|
|
97
|
-
}
|
|
98
|
-
if (typeof message.content === `string` || // if the message included non-tool parts, add our message overhead
|
|
99
|
-
message.content.some((p) => p.type !== `tool-call` && p.type !== `tool-result`)) {
|
|
100
|
-
overhead += this.TOKENS_PER_MESSAGE;
|
|
101
|
-
}
|
|
102
|
-
return this.encoder.encode(tokenString).length + overhead;
|
|
103
|
-
}
|
|
104
|
-
};
|
|
105
|
-
var ToolCallFilter = class extends MemoryProcessor {
|
|
106
|
-
exclude;
|
|
107
|
-
/**
|
|
108
|
-
* Create a filter for tool calls and results.
|
|
109
|
-
* @param options Configuration options
|
|
110
|
-
* @param options.exclude List of specific tool names to exclude. If not provided, all tool calls are excluded.
|
|
111
|
-
*/
|
|
112
|
-
constructor(options = {}) {
|
|
113
|
-
super({ name: "ToolCallFilter" });
|
|
114
|
-
if (!options || !options.exclude) {
|
|
115
|
-
this.exclude = "all";
|
|
116
|
-
} else {
|
|
117
|
-
this.exclude = Array.isArray(options.exclude) ? options.exclude : [];
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
process(messages) {
|
|
121
|
-
if (this.exclude === "all") {
|
|
122
|
-
return messages.filter((message) => {
|
|
123
|
-
if (Array.isArray(message.content)) {
|
|
124
|
-
return !message.content.some((part) => part.type === "tool-call" || part.type === "tool-result");
|
|
125
|
-
}
|
|
126
|
-
return true;
|
|
127
|
-
});
|
|
128
|
-
}
|
|
129
|
-
if (this.exclude.length > 0) {
|
|
130
|
-
const excludedToolCallIds = /* @__PURE__ */ new Set();
|
|
131
|
-
return messages.filter((message) => {
|
|
132
|
-
if (!Array.isArray(message.content)) return true;
|
|
133
|
-
if (message.role === "assistant") {
|
|
134
|
-
let shouldExclude = false;
|
|
135
|
-
for (const part of message.content) {
|
|
136
|
-
if (part.type === "tool-call" && this.exclude.includes(part.toolName)) {
|
|
137
|
-
excludedToolCallIds.add(part.toolCallId);
|
|
138
|
-
shouldExclude = true;
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
return !shouldExclude;
|
|
142
|
-
}
|
|
143
|
-
if (message.role === "tool") {
|
|
144
|
-
const shouldExclude = message.content.some(
|
|
145
|
-
(part) => part.type === "tool-result" && excludedToolCallIds.has(part.toolCallId)
|
|
146
|
-
);
|
|
147
|
-
return !shouldExclude;
|
|
148
|
-
}
|
|
149
|
-
return true;
|
|
150
|
-
});
|
|
151
|
-
}
|
|
152
|
-
return messages;
|
|
153
|
-
}
|
|
154
|
-
};
|
|
155
|
-
|
|
156
|
-
export { TokenLimiter, ToolCallFilter };
|
|
157
|
-
//# sourceMappingURL=index.js.map
|
|
158
|
-
//# sourceMappingURL=index.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"sources":["../../src/processors/token-limiter.ts","../../src/processors/tool-call-filter.ts"],"names":["MemoryProcessor"],"mappings":";;;;;AAsBO,IAAM,YAAA,GAAN,cAA2B,eAAA,CAAgB;AAAA,EACxC,OAAA;AAAA,EACA,SAAA;AAAA;AAAA;AAAA;AAAA,EAKD,kBAAA,GAAqB,GAAA;AAAA;AAAA,EACrB,uBAAA,GAA0B,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMjC,YAAY,OAAA,EAAuC;AACjD,IAAA,KAAA,CAAM;AAAA,MACJ,IAAA,EAAM;AAAA,KACP,CAAA;AAED,IAAA,IAAI,OAAO,YAAY,QAAA,EAAU;AAE/B,MAAA,IAAA,CAAK,SAAA,GAAY,OAAA;AACjB,MAAA,IAAA,CAAK,OAAA,GAAU,IAAI,QAAA,CAAS,UAAU,CAAA;AAAA,IACxC,CAAA,MAAO;AAEL,MAAA,IAAA,CAAK,YAAY,OAAA,CAAQ,KAAA;AACzB,MAAA,IAAA,CAAK,OAAA,GAAU,IAAI,QAAA,CAAS,OAAA,CAAQ,YAAY,UAAU,CAAA;AAAA,IAC5D;AAAA,EACF;AAAA,EAEA,OAAA,CACE,UACA,EAAE,aAAA,EAAe,qBAAqB,WAAA,EAAY,GAAyB,EAAC,EAC7D;AAEf,IAAA,IAAI,WAAA,GAAc,CAAA;AAGlB,IAAA,WAAA,IAAe,IAAA,CAAK,uBAAA;AAEpB,IAAA,IAAI,aAAA,EAAe;AACjB,MAAA,WAAA,IAAe,IAAA,CAAK,YAAY,aAAa,CAAA;AAC7C,MAAA,WAAA,IAAe,IAAA,CAAK,kBAAA;AAAA,IACtB;AAEA,IAAA,IAAI,mBAAA,EAAqB;AACvB,MAAA,WAAA,IAAe,IAAA,CAAK,YAAY,mBAAmB,CAAA;AACnD,MAAA,WAAA,IAAe,IAAA,CAAK,kBAAA;AAAA,IACtB;AAEA,IAAA,MAAM,cAAc,CAAC,GAAG,UAAU,GAAI,WAAA,IAAe,EAAG,CAAA;AAExD,IAAA,MAAM,SAAwB,EAAC;AAG/B,IAAA,KAAA,IAAS,IAAI,WAAA,CAAY,MAAA,GAAS,CAAA,EAAG,CAAA,IAAK,GAAG,CAAA,EAAA,EAAK;AAChD,MAAA,MAAM,OAAA,GAAU,YAAY,CAAC,CAAA;AAG7B,MAAA,IAAI,CAAC,OAAA,EAAS;AAEd,MAAA,MAAM,aAAA,GAAgB,IAAA,CAAK,WAAA,CAAY,OAAO,CAAA;AAE9C,MAAA,IAAI,WAAA,GAAc,aAAA,IAAiB,IAAA,CAAK,SAAA,EAAW;AAEjD,QAAA,IAAI,CAAA,GAAI,SAAS,MAAA,EAAQ;AAEvB,UAAA,MAAA,CAAO,QAAQ,OAAO,CAAA;AAAA,QACxB;AACA,QAAA,WAAA,IAAe,aAAA;AAAA,MACjB,CAAA,MAAO;AACL,QAAA,IAAA,CAAK,MAAA,CAAO,IAAA;AAAA,UACV,CAAA,UAAA,EAAa,WAAA,CAAY,MAAA,GAAS,MAAA,CAAO,MAAM,IAAI,WAAA,CAAY,MAAM,CAAA,0BAAA,EAA6B,IAAA,CAAK,SAAS,CAAA,SAAA;AAAA,SAClH;AAEA,QAAA;AAAA,MACF;AAAA,IACF;AAEA,IAAA,OAAO,MAAA;AAAA,EACT;AAAA,EAEO,YAAY,OAAA,EAAuC;AACxD,IAAA,IAAI,OAAO,YAAY,CAAA,MAAA,CAAA,EAAU;AAC/B,MAAA,OAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAO,OAAO,CAAA,CAAE,MAAA;AAAA,IACtC;AAEA,IAAA,IAAI,cAAc,OAAA,CAAQ,IAAA;AAC1B,IAAA,IAAI,QAAA,GAAW,CAAA;AAEf,IAAA,IAAI,OAAO,OAAA,CAAQ,OAAA,KAAY,QAAA,IAAY,QAAQ,OAAA,EAAS;AAC1D,MAAA,WAAA,IAAe,OAAA,CAAQ,OAAA;AAAA,IACzB,CAAA,MAAA,IAAW,KAAA,CAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,CAAA,EAAG;AAEzC,MAAA,KAAA,MAAW,IAAA,IAAQ,QAAQ,OAAA,EAAS;AAClC,QAAA,IAAI,IAAA,CAAK,SAAS,MAAA,EAAQ;AACxB,UAAA,WAAA,IAAe,IAAA,CAAK,IAAA;AAAA,QACtB,WAAW,IAAA,CAAK,IAAA,KAAS,WAAA,IAAe,IAAA,CAAK,SAAS,CAAA,WAAA,CAAA,EAAe;AACnE,UAAA,IAAI,UAAU,IAAA,IAAQ,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,SAAS,CAAA,SAAA,CAAA,EAAa;AAC5D,YAAA,WAAA,IAAe,IAAA,CAAK,QAAA;AACpB,YAAA,IAAI,OAAO,IAAA,CAAK,IAAA,KAAS,QAAA,EAAU;AACjC,cAAA,WAAA,IAAe,IAAA,CAAK,IAAA;AAAA,YACtB,CAAA,MAAO;AACL,cAAA,WAAA,IAAe,IAAA,CAAK,SAAA,CAAU,IAAA,CAAK,IAAI,CAAA;AAEvC,cAAA,QAAA,IAAY,EAAA;AAAA,YACd;AAAA,UACF;AAEA,UAAA,IAAI,YAAY,IAAA,IAAQ,IAAA,CAAK,WAAW,MAAA,IAAa,IAAA,CAAK,SAAS,CAAA,WAAA,CAAA,EAAe;AAChF,YAAA,IAAI,OAAO,IAAA,CAAK,MAAA,KAAW,QAAA,EAAU;AACnC,cAAA,WAAA,IAAe,IAAA,CAAK,MAAA;AAAA,YACtB,CAAA,MAAO;AACL,cAAA,WAAA,IAAe,IAAA,CAAK,SAAA,CAAU,IAAA,CAAK,MAAM,CAAA;AAEzC,cAAA,QAAA,IAAY,EAAA;AAAA,YACd;AAAA,UACF;AAAA,QACF,CAAA,MAAO;AACL,UAAA,WAAA,IAAe,IAAA,CAAK,UAAU,IAAI,CAAA;AAAA,QACpC;AAAA,MACF;AAAA,IACF;AAEA,IAAA,IACE,OAAO,QAAQ,OAAA,KAAY,CAAA,MAAA,CAAA;AAAA,IAE3B,OAAA,CAAQ,OAAA,CAAQ,IAAA,CAAK,CAAA,CAAA,KAAK,CAAA,CAAE,SAAS,CAAA,SAAA,CAAA,IAAe,CAAA,CAAE,IAAA,KAAS,CAAA,WAAA,CAAa,CAAA,EAC5E;AAGA,MAAA,QAAA,IAAY,IAAA,CAAK,kBAAA;AAAA,IACnB;AAEA,IAAA,OAAO,IAAA,CAAK,OAAA,CAAQ,MAAA,CAAO,WAAW,EAAE,MAAA,GAAS,QAAA;AAAA,EACnD;AACF;ACtJO,IAAM,cAAA,GAAN,cAA6BA,eAAAA,CAAgB;AAAA,EAC1C,OAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOR,WAAA,CAAY,OAAA,GAAkC,EAAC,EAAG;AAChD,IAAA,KAAA,CAAM,EAAE,IAAA,EAAM,gBAAA,EAAkB,CAAA;AAEhC,IAAA,IAAI,CAAC,OAAA,IAAW,CAAC,OAAA,CAAQ,OAAA,EAAS;AAChC,MAAA,IAAA,CAAK,OAAA,GAAU,KAAA;AAAA,IACjB,CAAA,MAAO;AAEL,MAAA,IAAA,CAAK,OAAA,GAAU,MAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,CAAA,GAAI,OAAA,CAAQ,UAAU,EAAC;AAAA,IACrE;AAAA,EACF;AAAA,EAEA,QAAQ,QAAA,EAAwC;AAE9C,IAAA,IAAI,IAAA,CAAK,YAAY,KAAA,EAAO;AAC1B,MAAA,OAAO,QAAA,CAAS,OAAO,CAAA,OAAA,KAAW;AAChC,QAAA,IAAI,KAAA,CAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,CAAA,EAAG;AAClC,UAAA,OAAO,CAAC,OAAA,CAAQ,OAAA,CAAQ,IAAA,CAAK,CAAA,IAAA,KAAQ,KAAK,IAAA,KAAS,WAAA,IAAe,IAAA,CAAK,IAAA,KAAS,aAAa,CAAA;AAAA,QAC/F;AACA,QAAA,OAAO,IAAA;AAAA,MACT,CAAC,CAAA;AAAA,IACH;AAGA,IAAA,IAAI,IAAA,CAAK,OAAA,CAAQ,MAAA,GAAS,CAAA,EAAG;AAE3B,MAAA,MAAM,mBAAA,uBAA0B,GAAA,EAAY;AAE5C,MAAA,OAAO,QAAA,CAAS,OAAO,CAAA,OAAA,KAAW;AAChC,QAAA,IAAI,CAAC,KAAA,CAAM,OAAA,CAAQ,OAAA,CAAQ,OAAO,GAAG,OAAO,IAAA;AAG5C,QAAA,IAAI,OAAA,CAAQ,SAAS,WAAA,EAAa;AAChC,UAAA,IAAI,aAAA,GAAgB,KAAA;AAEpB,UAAA,KAAA,MAAW,IAAA,IAAQ,QAAQ,OAAA,EAAS;AAClC,YAAA,IAAI,IAAA,CAAK,SAAS,WAAA,IAAe,IAAA,CAAK,QAAQ,QAAA,CAAS,IAAA,CAAK,QAAQ,CAAA,EAAG;AACrE,cAAA,mBAAA,CAAoB,GAAA,CAAI,KAAK,UAAU,CAAA;AACvC,cAAA,aAAA,GAAgB,IAAA;AAAA,YAClB;AAAA,UACF;AAEA,UAAA,OAAO,CAAC,aAAA;AAAA,QACV;AAGA,QAAA,IAAI,OAAA,CAAQ,SAAS,MAAA,EAAQ;AAC3B,UAAA,MAAM,aAAA,GAAgB,QAAQ,OAAA,CAAQ,IAAA;AAAA,YACpC,UAAQ,IAAA,CAAK,IAAA,KAAS,iBAAiB,mBAAA,CAAoB,GAAA,CAAI,KAAK,UAAU;AAAA,WAChF;AAEA,UAAA,OAAO,CAAC,aAAA;AAAA,QACV;AAEA,QAAA,OAAO,IAAA;AAAA,MACT,CAAC,CAAA;AAAA,IACH;AAGA,IAAA,OAAO,QAAA;AAAA,EACT;AACF","file":"index.js","sourcesContent":["import type { CoreMessage } from '@mastra/core/llm';\nimport { MemoryProcessor } from '@mastra/core/memory';\nimport type { MemoryProcessorOpts } from '@mastra/core/memory';\n\nimport { Tiktoken } from 'js-tiktoken/lite';\nimport type { TiktokenBPE } from 'js-tiktoken/lite';\nimport o200k_base from 'js-tiktoken/ranks/o200k_base';\n\n/**\n * Configuration options for TokenLimiter\n */\ninterface TokenLimiterOptions {\n /** Maximum number of tokens to allow */\n limit: number;\n /** Optional encoding to use (defaults to o200k_base which is used by gpt-4o) */\n encoding?: TiktokenBPE;\n}\n\n/**\n * Limits the total number of tokens in the messages.\n * Uses js-tiktoken with o200k_base encoding by default for accurate token counting with modern models.\n */\nexport class TokenLimiter extends MemoryProcessor {\n private encoder: Tiktoken;\n private maxTokens: number;\n\n // Token overheads per OpenAI's documentation\n // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls\n // Every message follows <|start|>{role/name}\\n{content}<|end|>\n public TOKENS_PER_MESSAGE = 3.8; // tokens added for each message (start & end tokens)\n public TOKENS_PER_CONVERSATION = 24; // fixed overhead for the conversation\n\n /**\n * Create a token limiter for messages.\n * @param options Either a number (token limit) or a configuration object\n */\n constructor(options: number | TokenLimiterOptions) {\n super({\n name: 'TokenLimiter',\n });\n\n if (typeof options === 'number') {\n // Simple number format - just the token limit with default encoding\n this.maxTokens = options;\n this.encoder = new Tiktoken(o200k_base);\n } else {\n // Object format with limit and optional encoding\n this.maxTokens = options.limit;\n this.encoder = new Tiktoken(options.encoding || o200k_base);\n }\n }\n\n process(\n messages: CoreMessage[],\n { systemMessage, memorySystemMessage, newMessages }: MemoryProcessorOpts = {},\n ): CoreMessage[] {\n // Messages are already chronologically ordered - take most recent ones up to the token limit\n let totalTokens = 0;\n\n // Start with the conversation overhead\n totalTokens += this.TOKENS_PER_CONVERSATION;\n\n if (systemMessage) {\n totalTokens += this.countTokens(systemMessage);\n totalTokens += this.TOKENS_PER_MESSAGE; // Add message overhead for system message\n }\n\n if (memorySystemMessage) {\n totalTokens += this.countTokens(memorySystemMessage);\n totalTokens += this.TOKENS_PER_MESSAGE; // Add message overhead for memory system message\n }\n\n const allMessages = [...messages, ...(newMessages || [])];\n\n const result: CoreMessage[] = [];\n\n // Process messages in reverse (newest first) so that we stop estimating tokens on old messages. Once we get to our limit of tokens there's no reason to keep processing older messages\n for (let i = allMessages.length - 1; i >= 0; i--) {\n const message = allMessages[i];\n\n // Skip undefined messages (shouldn't happen, but TypeScript is concerned)\n if (!message) continue;\n\n const messageTokens = this.countTokens(message);\n\n if (totalTokens + messageTokens <= this.maxTokens) {\n // Insert at the beginning to maintain chronological order, but only if it's not a new message\n if (i < messages.length) {\n // less than messages.length because we're iterating in reverse. If the index is greater than messages.length it's a new message\n result.unshift(message);\n }\n totalTokens += messageTokens;\n } else {\n this.logger.info(\n `filtering ${allMessages.length - result.length}/${allMessages.length} messages, token limit of ${this.maxTokens} exceeded`,\n );\n // If we can't fit the message, we stop\n break;\n }\n }\n\n return result;\n }\n\n public countTokens(message: string | CoreMessage): number {\n if (typeof message === `string`) {\n return this.encoder.encode(message).length;\n }\n\n let tokenString = message.role;\n let overhead = 0;\n\n if (typeof message.content === 'string' && message.content) {\n tokenString += message.content;\n } else if (Array.isArray(message.content)) {\n // Calculate tokens for each content part\n for (const part of message.content) {\n if (part.type === 'text') {\n tokenString += part.text;\n } else if (part.type === 'tool-call' || part.type === `tool-result`) {\n if (`args` in part && part.args && part.type === `tool-call`) {\n tokenString += part.toolName as any;\n if (typeof part.args === 'string') {\n tokenString += part.args;\n } else {\n tokenString += JSON.stringify(part.args);\n // minus some tokens for JSON\n overhead -= 12;\n }\n }\n // Token cost for result if present\n if (`result` in part && part.result !== undefined && part.type === `tool-result`) {\n if (typeof part.result === 'string') {\n tokenString += part.result;\n } else {\n tokenString += JSON.stringify(part.result);\n // minus some tokens for JSON\n overhead -= 12;\n }\n }\n } else {\n tokenString += JSON.stringify(part);\n }\n }\n }\n\n if (\n typeof message.content === `string` ||\n // if the message included non-tool parts, add our message overhead\n message.content.some(p => p.type !== `tool-call` && p.type !== `tool-result`)\n ) {\n // Ensure we account for message formatting tokens\n // See: https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#6-counting-tokens-for-chat-completions-api-calls\n overhead += this.TOKENS_PER_MESSAGE;\n }\n\n return this.encoder.encode(tokenString).length + overhead;\n }\n}\n","import type { CoreMessage } from '@mastra/core/llm';\nimport { MemoryProcessor } from '@mastra/core/memory';\n\n/**\n * Filters out tool calls and results from messages.\n * By default (with no arguments), excludes all tool calls and their results.\n * Can be configured to exclude only specific tools by name.\n */\nexport class ToolCallFilter extends MemoryProcessor {\n private exclude: string[] | 'all';\n\n /**\n * Create a filter for tool calls and results.\n * @param options Configuration options\n * @param options.exclude List of specific tool names to exclude. If not provided, all tool calls are excluded.\n */\n constructor(options: { exclude?: string[] } = {}) {\n super({ name: 'ToolCallFilter' });\n // If no options or exclude is provided, exclude all tools\n if (!options || !options.exclude) {\n this.exclude = 'all'; // Exclude all tools\n } else {\n // Exclude specific tools\n this.exclude = Array.isArray(options.exclude) ? options.exclude : [];\n }\n }\n\n process(messages: CoreMessage[]): CoreMessage[] {\n // Case 1: Exclude all tool calls and tool results\n if (this.exclude === 'all') {\n return messages.filter(message => {\n if (Array.isArray(message.content)) {\n return !message.content.some(part => part.type === 'tool-call' || part.type === 'tool-result');\n }\n return true;\n });\n }\n\n // Case 2: Exclude specific tools by name\n if (this.exclude.length > 0) {\n // Single pass approach - track excluded tool call IDs while filtering\n const excludedToolCallIds = new Set<string>();\n\n return messages.filter(message => {\n if (!Array.isArray(message.content)) return true;\n\n // For assistant messages, check for excluded tool calls and track their IDs\n if (message.role === 'assistant') {\n let shouldExclude = false;\n\n for (const part of message.content) {\n if (part.type === 'tool-call' && this.exclude.includes(part.toolName)) {\n excludedToolCallIds.add(part.toolCallId);\n shouldExclude = true;\n }\n }\n\n return !shouldExclude;\n }\n\n // For tool messages, filter out results for excluded tool calls\n if (message.role === 'tool') {\n const shouldExclude = message.content.some(\n part => part.type === 'tool-result' && excludedToolCallIds.has(part.toolCallId),\n );\n\n return !shouldExclude;\n }\n\n return true;\n });\n }\n\n // Case 3: Empty exclude array, return original messages\n return messages;\n }\n}\n"]}
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
import type { CoreMessage } from '@mastra/core/llm';
|
|
2
|
-
import { MemoryProcessor } from '@mastra/core/memory';
|
|
3
|
-
import type { MemoryProcessorOpts } from '@mastra/core/memory';
|
|
4
|
-
import type { TiktokenBPE } from 'js-tiktoken/lite';
|
|
5
|
-
/**
|
|
6
|
-
* Configuration options for TokenLimiter
|
|
7
|
-
*/
|
|
8
|
-
interface TokenLimiterOptions {
|
|
9
|
-
/** Maximum number of tokens to allow */
|
|
10
|
-
limit: number;
|
|
11
|
-
/** Optional encoding to use (defaults to o200k_base which is used by gpt-4o) */
|
|
12
|
-
encoding?: TiktokenBPE;
|
|
13
|
-
}
|
|
14
|
-
/**
|
|
15
|
-
* Limits the total number of tokens in the messages.
|
|
16
|
-
* Uses js-tiktoken with o200k_base encoding by default for accurate token counting with modern models.
|
|
17
|
-
*/
|
|
18
|
-
export declare class TokenLimiter extends MemoryProcessor {
|
|
19
|
-
private encoder;
|
|
20
|
-
private maxTokens;
|
|
21
|
-
TOKENS_PER_MESSAGE: number;
|
|
22
|
-
TOKENS_PER_CONVERSATION: number;
|
|
23
|
-
/**
|
|
24
|
-
* Create a token limiter for messages.
|
|
25
|
-
* @param options Either a number (token limit) or a configuration object
|
|
26
|
-
*/
|
|
27
|
-
constructor(options: number | TokenLimiterOptions);
|
|
28
|
-
process(messages: CoreMessage[], { systemMessage, memorySystemMessage, newMessages }?: MemoryProcessorOpts): CoreMessage[];
|
|
29
|
-
countTokens(message: string | CoreMessage): number;
|
|
30
|
-
}
|
|
31
|
-
export {};
|
|
32
|
-
//# sourceMappingURL=token-limiter.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"token-limiter.d.ts","sourceRoot":"","sources":["../../src/processors/token-limiter.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AACpD,OAAO,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AACtD,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,qBAAqB,CAAC;AAG/D,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAGpD;;GAEG;AACH,UAAU,mBAAmB;IAC3B,wCAAwC;IACxC,KAAK,EAAE,MAAM,CAAC;IACd,gFAAgF;IAChF,QAAQ,CAAC,EAAE,WAAW,CAAC;CACxB;AAED;;;GAGG;AACH,qBAAa,YAAa,SAAQ,eAAe;IAC/C,OAAO,CAAC,OAAO,CAAW;IAC1B,OAAO,CAAC,SAAS,CAAS;IAKnB,kBAAkB,SAAO;IACzB,uBAAuB,SAAM;IAEpC;;;OAGG;gBACS,OAAO,EAAE,MAAM,GAAG,mBAAmB;IAgBjD,OAAO,CACL,QAAQ,EAAE,WAAW,EAAE,EACvB,EAAE,aAAa,EAAE,mBAAmB,EAAE,WAAW,EAAE,GAAE,mBAAwB,GAC5E,WAAW,EAAE;IAiDT,WAAW,CAAC,OAAO,EAAE,MAAM,GAAG,WAAW,GAAG,MAAM;CAsD1D"}
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import type { CoreMessage } from '@mastra/core/llm';
|
|
2
|
-
import { MemoryProcessor } from '@mastra/core/memory';
|
|
3
|
-
/**
|
|
4
|
-
* Filters out tool calls and results from messages.
|
|
5
|
-
* By default (with no arguments), excludes all tool calls and their results.
|
|
6
|
-
* Can be configured to exclude only specific tools by name.
|
|
7
|
-
*/
|
|
8
|
-
export declare class ToolCallFilter extends MemoryProcessor {
|
|
9
|
-
private exclude;
|
|
10
|
-
/**
|
|
11
|
-
* Create a filter for tool calls and results.
|
|
12
|
-
* @param options Configuration options
|
|
13
|
-
* @param options.exclude List of specific tool names to exclude. If not provided, all tool calls are excluded.
|
|
14
|
-
*/
|
|
15
|
-
constructor(options?: {
|
|
16
|
-
exclude?: string[];
|
|
17
|
-
});
|
|
18
|
-
process(messages: CoreMessage[]): CoreMessage[];
|
|
19
|
-
}
|
|
20
|
-
//# sourceMappingURL=tool-call-filter.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"tool-call-filter.d.ts","sourceRoot":"","sources":["../../src/processors/tool-call-filter.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AACpD,OAAO,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AAEtD;;;;GAIG;AACH,qBAAa,cAAe,SAAQ,eAAe;IACjD,OAAO,CAAC,OAAO,CAAmB;IAElC;;;;OAIG;gBACS,OAAO,GAAE;QAAE,OAAO,CAAC,EAAE,MAAM,EAAE,CAAA;KAAO;IAWhD,OAAO,CAAC,QAAQ,EAAE,WAAW,EAAE,GAAG,WAAW,EAAE;CAiDhD"}
|