@townco/agent 0.1.121 → 0.1.123

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,4 +24,4 @@ export interface ContextSize {
24
24
  * @param mcpOverheadTokens - Pre-calculated MCP tool overhead (MCP tool definitions)
25
25
  * @param modelContextWindow - Model's max context window size
26
26
  */
27
- export declare function calculateContextSize(messages: SessionMessage[], systemPrompt?: string, toolOverheadTokens?: number, mcpOverheadTokens?: number, modelContextWindow?: number): ContextSize;
27
+ export declare function calculateContextSize(messages: SessionMessage[], systemPrompt?: string, toolOverheadTokens?: number, mcpOverheadTokens?: number, modelContextWindow?: number): Promise<ContextSize>;
@@ -6,10 +6,10 @@ import { countTokens, countToolResultTokens } from "./token-counter.js";
6
6
  /**
7
7
  * Extract and count tokens from a content block based on its type
8
8
  */
9
- function countContentBlock(block) {
9
+ async function countContentBlock(block) {
10
10
  if (block.type === "text") {
11
11
  return {
12
- textTokens: countTokens(block.text),
12
+ textTokens: await countTokens(block.text),
13
13
  toolInputTokens: 0,
14
14
  toolResultTokens: 0,
15
15
  };
@@ -18,10 +18,10 @@ function countContentBlock(block) {
18
18
  return {
19
19
  textTokens: 0,
20
20
  toolInputTokens: block.rawInput
21
- ? countToolResultTokens(block.rawInput)
21
+ ? await countToolResultTokens(block.rawInput)
22
22
  : 0,
23
23
  toolResultTokens: block.rawOutput
24
- ? countToolResultTokens(block.rawOutput)
24
+ ? await countToolResultTokens(block.rawOutput)
25
25
  : 0,
26
26
  };
27
27
  }
@@ -37,8 +37,8 @@ function countContentBlock(block) {
37
37
  * @param mcpOverheadTokens - Pre-calculated MCP tool overhead (MCP tool definitions)
38
38
  * @param modelContextWindow - Model's max context window size
39
39
  */
40
- export function calculateContextSize(messages, systemPrompt, toolOverheadTokens, mcpOverheadTokens, modelContextWindow) {
41
- const systemPromptTokens = systemPrompt ? countTokens(systemPrompt) : 0;
40
+ export async function calculateContextSize(messages, systemPrompt, toolOverheadTokens, mcpOverheadTokens, modelContextWindow) {
41
+ const systemPromptTokens = systemPrompt ? await countTokens(systemPrompt) : 0;
42
42
  const toolOverheadTokensEstimate = toolOverheadTokens ?? 0;
43
43
  const mcpOverheadTokensEstimate = mcpOverheadTokens ?? 0;
44
44
  let userMessagesTokens = 0;
@@ -48,7 +48,7 @@ export function calculateContextSize(messages, systemPrompt, toolOverheadTokens,
48
48
  // Go through ALL messages in this context snapshot
49
49
  for (const message of messages) {
50
50
  for (const block of message.content) {
51
- const counts = countContentBlock(block);
51
+ const counts = await countContentBlock(block);
52
52
  // Accumulate based on message role
53
53
  if (message.role === "user") {
54
54
  userMessagesTokens += counts.textTokens;
@@ -66,6 +66,8 @@ export function calculateContextSize(messages, systemPrompt, toolOverheadTokens,
66
66
  assistantMessagesTokens,
67
67
  toolInputTokens,
68
68
  toolResultsTokens,
69
+ toolOverheadTokens: toolOverheadTokensEstimate,
70
+ mcpOverheadTokens: mcpOverheadTokensEstimate,
69
71
  totalEstimated: systemPromptTokens +
70
72
  toolOverheadTokensEstimate +
71
73
  mcpOverheadTokensEstimate +
@@ -74,13 +76,6 @@ export function calculateContextSize(messages, systemPrompt, toolOverheadTokens,
74
76
  toolInputTokens +
75
77
  toolResultsTokens,
76
78
  };
77
- // Only include optional fields if they have values
78
- if (toolOverheadTokensEstimate > 0) {
79
- result.toolOverheadTokens = toolOverheadTokensEstimate;
80
- }
81
- if (mcpOverheadTokensEstimate > 0) {
82
- result.mcpOverheadTokens = mcpOverheadTokensEstimate;
83
- }
84
79
  if (modelContextWindow !== undefined) {
85
80
  result.modelContextWindow = modelContextWindow;
86
81
  }
@@ -1,19 +1,21 @@
1
1
  /**
2
2
  * Token counting utilities for tracking context size
3
- * Uses Anthropic's tokenizer for rough approximation
3
+ * Uses ai-tokenizer for accurate local estimation (97-99% accuracy for Claude)
4
+ * Source: https://github.com/coder/ai-tokenizer
4
5
  */
5
6
  /**
6
- * Count tokens in a string using Anthropic's tokenizer
7
- * Note: This is a rough approximation for Claude 3+ models
8
- * For exact counts, use the API's usage_metadata where available
7
+ * Count tokens in a string using ai-tokenizer
8
+ * This provides fast local estimation with 97-99% accuracy for Claude models
9
+ * @param text - The text to count tokens for
10
+ * @returns Promise resolving to the token count
9
11
  */
10
- export declare function countTokens(text: string): number;
12
+ export declare function countTokens(text: string): Promise<number>;
11
13
  /**
12
14
  * Count tokens in a tool result (rawOutput)
13
15
  * Handles various data types that might be in tool outputs
14
16
  */
15
- export declare function countToolResultTokens(rawOutput: Record<string, unknown> | undefined): number;
17
+ export declare function countToolResultTokens(rawOutput: Record<string, unknown> | undefined): Promise<number>;
16
18
  /**
17
19
  * Count tokens in multiple tool results
18
20
  */
19
- export declare function countMultipleToolResults(results: Array<Record<string, unknown> | undefined>): number;
21
+ export declare function countMultipleToolResults(results: Array<Record<string, unknown> | undefined>): Promise<number>;
@@ -1,16 +1,34 @@
1
1
  /**
2
2
  * Token counting utilities for tracking context size
3
- * Uses Anthropic's tokenizer for rough approximation
3
+ * Uses ai-tokenizer for accurate local estimation (97-99% accuracy for Claude)
4
+ * Source: https://github.com/coder/ai-tokenizer
4
5
  */
5
- import { countTokens as anthropicCountTokens } from "@anthropic-ai/tokenizer";
6
+ import { Tokenizer } from "ai-tokenizer";
7
+ import * as claudeEncoding from "ai-tokenizer/encoding/claude";
8
+ // Create a singleton tokenizer instance for Claude
9
+ let _claudeTokenizer = null;
10
+ function getClaudeTokenizer() {
11
+ if (!_claudeTokenizer) {
12
+ _claudeTokenizer = new Tokenizer(claudeEncoding);
13
+ }
14
+ return _claudeTokenizer;
15
+ }
6
16
  /**
7
- * Count tokens in a string using Anthropic's tokenizer
8
- * Note: This is a rough approximation for Claude 3+ models
9
- * For exact counts, use the API's usage_metadata where available
17
+ * Count tokens in a string using ai-tokenizer
18
+ * This provides fast local estimation with 97-99% accuracy for Claude models
19
+ * @param text - The text to count tokens for
20
+ * @returns Promise resolving to the token count
10
21
  */
11
- export function countTokens(text) {
22
+ export async function countTokens(text) {
23
+ if (!text || text.length === 0) {
24
+ return 0;
25
+ }
12
26
  try {
13
- return anthropicCountTokens(text);
27
+ // Use ai-tokenizer with Claude encoding
28
+ // Achieves 98.48-99.79% accuracy depending on text length
29
+ const tokenizer = getClaudeTokenizer();
30
+ const tokens = tokenizer.encode(text);
31
+ return tokens.length;
14
32
  }
15
33
  catch (_error) {
16
34
  // Fallback to rough estimation if tokenizer fails
@@ -22,14 +40,14 @@ export function countTokens(text) {
22
40
  * Count tokens in a tool result (rawOutput)
23
41
  * Handles various data types that might be in tool outputs
24
42
  */
25
- export function countToolResultTokens(rawOutput) {
43
+ export async function countToolResultTokens(rawOutput) {
26
44
  if (!rawOutput) {
27
45
  return 0;
28
46
  }
29
47
  try {
30
48
  // Convert to string representation (as it would be sent to the LLM)
31
49
  const text = JSON.stringify(rawOutput);
32
- return countTokens(text);
50
+ return await countTokens(text);
33
51
  }
34
52
  catch (_error) {
35
53
  // If JSON.stringify fails, return 0
@@ -39,6 +57,7 @@ export function countToolResultTokens(rawOutput) {
39
57
  /**
40
58
  * Count tokens in multiple tool results
41
59
  */
42
- export function countMultipleToolResults(results) {
43
- return results.reduce((total, result) => total + countToolResultTokens(result), 0);
60
+ export async function countMultipleToolResults(results) {
61
+ const counts = await Promise.all(results.map((result) => countToolResultTokens(result)));
62
+ return counts.reduce((total, count) => total + count, 0);
44
63
  }
@@ -14,11 +14,11 @@ export interface ToolMetadata {
14
14
  * Note: Different LLM providers (Anthropic, OpenAI, Gemini) serialize tools
15
15
  * slightly differently. This is a rough approximation based on the general format.
16
16
  */
17
- export declare function estimateToolDefinitionTokens(tool: ToolMetadata): number;
17
+ export declare function estimateToolDefinitionTokens(tool: ToolMetadata): Promise<number>;
18
18
  /**
19
19
  * Estimate total tokens for all tool definitions
20
20
  */
21
- export declare function estimateAllToolsOverhead(tools: ToolMetadata[]): number;
21
+ export declare function estimateAllToolsOverhead(tools: ToolMetadata[]): Promise<number>;
22
22
  /**
23
23
  * Extract metadata from LangChain tools
24
24
  * LangChain tools have .name, .description, and .schema properties
@@ -10,7 +10,7 @@ import { countTokens } from "./token-counter.js";
10
10
  * Note: Different LLM providers (Anthropic, OpenAI, Gemini) serialize tools
11
11
  * slightly differently. This is a rough approximation based on the general format.
12
12
  */
13
- export function estimateToolDefinitionTokens(tool) {
13
+ export async function estimateToolDefinitionTokens(tool) {
14
14
  // Rough serialization of how tools are sent to APIs:
15
15
  // {"name": "tool_name", "description": "...", "input_schema": {...}}
16
16
  const approximateJson = JSON.stringify({
@@ -18,13 +18,14 @@ export function estimateToolDefinitionTokens(tool) {
18
18
  description: tool.description,
19
19
  input_schema: tool.schema,
20
20
  });
21
- return countTokens(approximateJson);
21
+ return await countTokens(approximateJson);
22
22
  }
23
23
  /**
24
24
  * Estimate total tokens for all tool definitions
25
25
  */
26
- export function estimateAllToolsOverhead(tools) {
27
- return tools.reduce((total, tool) => total + estimateToolDefinitionTokens(tool), 0);
26
+ export async function estimateAllToolsOverhead(tools) {
27
+ const tokens = await Promise.all(tools.map((tool) => estimateToolDefinitionTokens(tool)));
28
+ return tokens.reduce((total, count) => total + count, 0);
28
29
  }
29
30
  /**
30
31
  * Extract metadata from LangChain tools
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@townco/agent",
3
- "version": "0.1.121",
3
+ "version": "0.1.123",
4
4
  "type": "module",
5
5
  "module": "index.ts",
6
6
  "files": [
@@ -83,12 +83,13 @@
83
83
  "@opentelemetry/sdk-trace-base": "^1.28.0",
84
84
  "@opentelemetry/sdk-trace-node": "^1.28.0",
85
85
  "@opentelemetry/semantic-conventions": "^1.28.0",
86
- "@townco/apiclient": "0.0.33",
87
- "@townco/core": "0.0.91",
88
- "@townco/gui-template": "0.1.110",
89
- "@townco/tsconfig": "0.1.110",
90
- "@townco/tui-template": "0.1.110",
91
- "@townco/ui": "0.1.113",
86
+ "@townco/apiclient": "0.0.35",
87
+ "@townco/core": "0.0.93",
88
+ "@townco/gui-template": "0.1.112",
89
+ "@townco/tsconfig": "0.1.112",
90
+ "@townco/tui-template": "0.1.112",
91
+ "@townco/ui": "0.1.115",
92
+ "ai-tokenizer": "^1.0.6",
92
93
  "exa-js": "^2.0.0",
93
94
  "hono": "^4.10.4",
94
95
  "langchain": "^1.0.3",