@townco/agent 0.1.51 → 0.1.53
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/acp-server/adapter.d.ts +2 -0
- package/dist/acp-server/adapter.js +28 -3
- package/dist/acp-server/index.js +5 -0
- package/dist/acp-server/session-storage.d.ts +2 -0
- package/dist/acp-server/session-storage.js +2 -0
- package/dist/runner/agent-runner.d.ts +4 -0
- package/dist/runner/langchain/index.d.ts +0 -1
- package/dist/runner/langchain/index.js +88 -20
- package/dist/runner/langchain/otel-callbacks.js +67 -1
- package/dist/telemetry/setup.d.ts +3 -1
- package/dist/telemetry/setup.js +33 -3
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/dist/utils/__tests__/tool-overhead-calculator.test.d.ts +1 -0
- package/dist/utils/__tests__/tool-overhead-calculator.test.js +153 -0
- package/dist/utils/context-size-calculator.d.ts +9 -4
- package/dist/utils/context-size-calculator.js +23 -6
- package/dist/utils/tool-overhead-calculator.d.ts +30 -0
- package/dist/utils/tool-overhead-calculator.js +54 -0
- package/package.json +6 -6
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
import { describe, expect, test } from "bun:test";
|
|
2
|
+
import { estimateAllToolsOverhead, estimateToolDefinitionTokens, extractToolMetadata, } from "../tool-overhead-calculator.js";
|
|
3
|
+
describe("tool-overhead-calculator", () => {
|
|
4
|
+
describe("estimateToolDefinitionTokens", () => {
|
|
5
|
+
test("estimates tokens for a simple tool", () => {
|
|
6
|
+
const tool = {
|
|
7
|
+
name: "simple_tool",
|
|
8
|
+
description: "A simple tool",
|
|
9
|
+
schema: {
|
|
10
|
+
type: "object",
|
|
11
|
+
properties: {
|
|
12
|
+
param1: { type: "string" },
|
|
13
|
+
},
|
|
14
|
+
},
|
|
15
|
+
};
|
|
16
|
+
const tokens = estimateToolDefinitionTokens(tool);
|
|
17
|
+
// Should return a positive number
|
|
18
|
+
expect(tokens).toBeGreaterThan(0);
|
|
19
|
+
// Rough sanity check: a simple tool should be at least 20 tokens
|
|
20
|
+
expect(tokens).toBeGreaterThanOrEqual(20);
|
|
21
|
+
});
|
|
22
|
+
test("estimates more tokens for a complex tool", () => {
|
|
23
|
+
const simpleTool = {
|
|
24
|
+
name: "simple",
|
|
25
|
+
description: "Simple",
|
|
26
|
+
schema: { type: "object" },
|
|
27
|
+
};
|
|
28
|
+
const complexTool = {
|
|
29
|
+
name: "complex_tool_with_long_name",
|
|
30
|
+
description: "A very complex tool with a long description that explains what it does in great detail and provides usage notes and examples.",
|
|
31
|
+
schema: {
|
|
32
|
+
type: "object",
|
|
33
|
+
properties: {
|
|
34
|
+
param1: {
|
|
35
|
+
type: "string",
|
|
36
|
+
description: "First parameter description",
|
|
37
|
+
},
|
|
38
|
+
param2: {
|
|
39
|
+
type: "number",
|
|
40
|
+
description: "Second parameter description",
|
|
41
|
+
},
|
|
42
|
+
param3: {
|
|
43
|
+
type: "array",
|
|
44
|
+
items: { type: "string" },
|
|
45
|
+
description: "Third parameter description",
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
required: ["param1", "param2"],
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
const simpleTokens = estimateToolDefinitionTokens(simpleTool);
|
|
52
|
+
const complexTokens = estimateToolDefinitionTokens(complexTool);
|
|
53
|
+
expect(complexTokens).toBeGreaterThan(simpleTokens);
|
|
54
|
+
});
|
|
55
|
+
test("handles empty description", () => {
|
|
56
|
+
const tool = {
|
|
57
|
+
name: "test",
|
|
58
|
+
description: "",
|
|
59
|
+
schema: {},
|
|
60
|
+
};
|
|
61
|
+
const tokens = estimateToolDefinitionTokens(tool);
|
|
62
|
+
expect(tokens).toBeGreaterThan(0);
|
|
63
|
+
});
|
|
64
|
+
});
|
|
65
|
+
describe("estimateAllToolsOverhead", () => {
|
|
66
|
+
test("returns 0 for empty array", () => {
|
|
67
|
+
const tools = [];
|
|
68
|
+
const tokens = estimateAllToolsOverhead(tools);
|
|
69
|
+
expect(tokens).toBe(0);
|
|
70
|
+
});
|
|
71
|
+
test("sums tokens for multiple tools", () => {
|
|
72
|
+
const tools = [
|
|
73
|
+
{
|
|
74
|
+
name: "tool1",
|
|
75
|
+
description: "First tool",
|
|
76
|
+
schema: { type: "object" },
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
name: "tool2",
|
|
80
|
+
description: "Second tool",
|
|
81
|
+
schema: { type: "object" },
|
|
82
|
+
},
|
|
83
|
+
];
|
|
84
|
+
const totalTokens = estimateAllToolsOverhead(tools);
|
|
85
|
+
const tool1Tokens = estimateToolDefinitionTokens(tools[0]);
|
|
86
|
+
const tool2Tokens = estimateToolDefinitionTokens(tools[1]);
|
|
87
|
+
expect(totalTokens).toBe(tool1Tokens + tool2Tokens);
|
|
88
|
+
});
|
|
89
|
+
test("handles large tool collections", () => {
|
|
90
|
+
const tools = Array.from({ length: 10 }, (_, i) => ({
|
|
91
|
+
name: `tool_${i}`,
|
|
92
|
+
description: `Tool number ${i} with description`,
|
|
93
|
+
schema: {
|
|
94
|
+
type: "object",
|
|
95
|
+
properties: {
|
|
96
|
+
param: { type: "string" },
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
}));
|
|
100
|
+
const totalTokens = estimateAllToolsOverhead(tools);
|
|
101
|
+
// With 10 tools, should be substantial
|
|
102
|
+
expect(totalTokens).toBeGreaterThan(100);
|
|
103
|
+
});
|
|
104
|
+
});
|
|
105
|
+
describe("extractToolMetadata", () => {
|
|
106
|
+
test("extracts metadata from LangChain tool object", () => {
|
|
107
|
+
const langchainTool = {
|
|
108
|
+
name: "test_tool",
|
|
109
|
+
description: "Test description",
|
|
110
|
+
schema: {
|
|
111
|
+
type: "object",
|
|
112
|
+
properties: { x: { type: "number" } },
|
|
113
|
+
},
|
|
114
|
+
};
|
|
115
|
+
const metadata = extractToolMetadata(langchainTool);
|
|
116
|
+
expect(metadata.name).toBe("test_tool");
|
|
117
|
+
expect(metadata.description).toBe("Test description");
|
|
118
|
+
expect(metadata.schema).toEqual({
|
|
119
|
+
type: "object",
|
|
120
|
+
properties: { x: { type: "number" } },
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
test("handles missing description", () => {
|
|
124
|
+
const langchainTool = {
|
|
125
|
+
name: "test_tool",
|
|
126
|
+
schema: { type: "object" },
|
|
127
|
+
};
|
|
128
|
+
const metadata = extractToolMetadata(langchainTool);
|
|
129
|
+
expect(metadata.name).toBe("test_tool");
|
|
130
|
+
expect(metadata.description).toBe("");
|
|
131
|
+
expect(metadata.schema).toEqual({ type: "object" });
|
|
132
|
+
});
|
|
133
|
+
test("handles missing schema", () => {
|
|
134
|
+
const langchainTool = {
|
|
135
|
+
name: "test_tool",
|
|
136
|
+
description: "Test",
|
|
137
|
+
};
|
|
138
|
+
const metadata = extractToolMetadata(langchainTool);
|
|
139
|
+
expect(metadata.name).toBe("test_tool");
|
|
140
|
+
expect(metadata.description).toBe("Test");
|
|
141
|
+
expect(metadata.schema).toEqual({});
|
|
142
|
+
});
|
|
143
|
+
test("handles missing description and schema", () => {
|
|
144
|
+
const langchainTool = {
|
|
145
|
+
name: "test_tool",
|
|
146
|
+
};
|
|
147
|
+
const metadata = extractToolMetadata(langchainTool);
|
|
148
|
+
expect(metadata.name).toBe("test_tool");
|
|
149
|
+
expect(metadata.description).toBe("");
|
|
150
|
+
expect(metadata.schema).toEqual({});
|
|
151
|
+
});
|
|
152
|
+
});
|
|
153
|
+
});
|
|
@@ -5,6 +5,8 @@
|
|
|
5
5
|
import type { SessionMessage } from "../acp-server/session-storage.js";
|
|
6
6
|
export interface ContextSize {
|
|
7
7
|
systemPromptTokens: number;
|
|
8
|
+
toolOverheadTokens?: number;
|
|
9
|
+
mcpOverheadTokens?: number;
|
|
8
10
|
userMessagesTokens: number;
|
|
9
11
|
assistantMessagesTokens: number;
|
|
10
12
|
toolInputTokens: number;
|
|
@@ -22,8 +24,11 @@ export interface ContextSize {
|
|
|
22
24
|
* all messages, and all tool results
|
|
23
25
|
* - We pass this as `llmReportedTokens` for comparison with our estimate
|
|
24
26
|
* - This helps us validate the accuracy of our tokenizer estimates
|
|
25
|
-
*
|
|
26
|
-
*
|
|
27
|
+
*
|
|
28
|
+
* @param messages - Resolved messages from context entry
|
|
29
|
+
* @param systemPrompt - Base system prompt (without TODO instructions)
|
|
30
|
+
* @param llmReportedTokens - From API usage_metadata.input_tokens
|
|
31
|
+
* @param toolOverheadTokens - Pre-calculated tool overhead (built-in/custom/filesystem tool definitions + TODO instructions)
|
|
32
|
+
* @param mcpOverheadTokens - Pre-calculated MCP tool overhead (MCP tool definitions)
|
|
27
33
|
*/
|
|
28
|
-
export declare function calculateContextSize(messages: SessionMessage[],
|
|
29
|
-
systemPrompt?: string, llmReportedTokens?: number): ContextSize;
|
|
34
|
+
export declare function calculateContextSize(messages: SessionMessage[], systemPrompt?: string, llmReportedTokens?: number, toolOverheadTokens?: number, mcpOverheadTokens?: number): ContextSize;
|
|
@@ -37,12 +37,17 @@ function countContentBlock(block) {
|
|
|
37
37
|
* all messages, and all tool results
|
|
38
38
|
* - We pass this as `llmReportedTokens` for comparison with our estimate
|
|
39
39
|
* - This helps us validate the accuracy of our tokenizer estimates
|
|
40
|
-
*
|
|
41
|
-
*
|
|
40
|
+
*
|
|
41
|
+
* @param messages - Resolved messages from context entry
|
|
42
|
+
* @param systemPrompt - Base system prompt (without TODO instructions)
|
|
43
|
+
* @param llmReportedTokens - From API usage_metadata.input_tokens
|
|
44
|
+
* @param toolOverheadTokens - Pre-calculated tool overhead (built-in/custom/filesystem tool definitions + TODO instructions)
|
|
45
|
+
* @param mcpOverheadTokens - Pre-calculated MCP tool overhead (MCP tool definitions)
|
|
42
46
|
*/
|
|
43
|
-
export function calculateContextSize(messages,
|
|
44
|
-
systemPrompt, llmReportedTokens) {
|
|
47
|
+
export function calculateContextSize(messages, systemPrompt, llmReportedTokens, toolOverheadTokens, mcpOverheadTokens) {
|
|
45
48
|
const systemPromptTokens = systemPrompt ? countTokens(systemPrompt) : 0;
|
|
49
|
+
const toolOverheadTokensEstimate = toolOverheadTokens ?? 0;
|
|
50
|
+
const mcpOverheadTokensEstimate = mcpOverheadTokens ?? 0;
|
|
46
51
|
let userMessagesTokens = 0;
|
|
47
52
|
let assistantMessagesTokens = 0;
|
|
48
53
|
let toolInputTokens = 0;
|
|
@@ -62,17 +67,29 @@ systemPrompt, llmReportedTokens) {
|
|
|
62
67
|
}
|
|
63
68
|
}
|
|
64
69
|
}
|
|
65
|
-
|
|
70
|
+
const result = {
|
|
66
71
|
systemPromptTokens,
|
|
67
72
|
userMessagesTokens,
|
|
68
73
|
assistantMessagesTokens,
|
|
69
74
|
toolInputTokens,
|
|
70
75
|
toolResultsTokens,
|
|
71
76
|
totalEstimated: systemPromptTokens +
|
|
77
|
+
toolOverheadTokensEstimate +
|
|
78
|
+
mcpOverheadTokensEstimate +
|
|
72
79
|
userMessagesTokens +
|
|
73
80
|
assistantMessagesTokens +
|
|
74
81
|
toolInputTokens +
|
|
75
82
|
toolResultsTokens,
|
|
76
|
-
llmReportedInputTokens: llmReportedTokens,
|
|
77
83
|
};
|
|
84
|
+
// Only include optional fields if they have values
|
|
85
|
+
if (toolOverheadTokensEstimate > 0) {
|
|
86
|
+
result.toolOverheadTokens = toolOverheadTokensEstimate;
|
|
87
|
+
}
|
|
88
|
+
if (mcpOverheadTokensEstimate > 0) {
|
|
89
|
+
result.mcpOverheadTokens = mcpOverheadTokensEstimate;
|
|
90
|
+
}
|
|
91
|
+
if (llmReportedTokens !== undefined) {
|
|
92
|
+
result.llmReportedInputTokens = llmReportedTokens;
|
|
93
|
+
}
|
|
94
|
+
return result;
|
|
78
95
|
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool overhead calculation utilities
|
|
3
|
+
* Estimates token overhead from tool definitions sent to LLM APIs
|
|
4
|
+
*/
|
|
5
|
+
export interface ToolMetadata {
|
|
6
|
+
name: string;
|
|
7
|
+
description: string;
|
|
8
|
+
schema: Record<string, unknown>;
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Estimate tokens for a single tool definition
|
|
12
|
+
* LLMs receive tools as JSON with name, description, and parameters schema
|
|
13
|
+
*
|
|
14
|
+
* Note: Different LLM providers (Anthropic, OpenAI, Gemini) serialize tools
|
|
15
|
+
* slightly differently. This is a rough approximation based on the general format.
|
|
16
|
+
*/
|
|
17
|
+
export declare function estimateToolDefinitionTokens(tool: ToolMetadata): number;
|
|
18
|
+
/**
|
|
19
|
+
* Estimate total tokens for all tool definitions
|
|
20
|
+
*/
|
|
21
|
+
export declare function estimateAllToolsOverhead(tools: ToolMetadata[]): number;
|
|
22
|
+
/**
|
|
23
|
+
* Extract metadata from LangChain tools
|
|
24
|
+
* LangChain tools have .name, .description, and .schema properties
|
|
25
|
+
*/
|
|
26
|
+
export declare function extractToolMetadata(langchainTool: {
|
|
27
|
+
name: string;
|
|
28
|
+
description?: string;
|
|
29
|
+
schema?: unknown;
|
|
30
|
+
}): ToolMetadata;
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool overhead calculation utilities
|
|
3
|
+
* Estimates token overhead from tool definitions sent to LLM APIs
|
|
4
|
+
*/
|
|
5
|
+
import { countTokens } from "./token-counter.js";
|
|
6
|
+
/**
|
|
7
|
+
* Estimate tokens for a single tool definition
|
|
8
|
+
* LLMs receive tools as JSON with name, description, and parameters schema
|
|
9
|
+
*
|
|
10
|
+
* Note: Different LLM providers (Anthropic, OpenAI, Gemini) serialize tools
|
|
11
|
+
* slightly differently. This is a rough approximation based on the general format.
|
|
12
|
+
*/
|
|
13
|
+
export function estimateToolDefinitionTokens(tool) {
|
|
14
|
+
// Rough serialization of how tools are sent to APIs:
|
|
15
|
+
// {"name": "tool_name", "description": "...", "input_schema": {...}}
|
|
16
|
+
const approximateJson = JSON.stringify({
|
|
17
|
+
name: tool.name,
|
|
18
|
+
description: tool.description,
|
|
19
|
+
input_schema: tool.schema,
|
|
20
|
+
});
|
|
21
|
+
return countTokens(approximateJson);
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Estimate total tokens for all tool definitions
|
|
25
|
+
*/
|
|
26
|
+
export function estimateAllToolsOverhead(tools) {
|
|
27
|
+
return tools.reduce((total, tool) => total + estimateToolDefinitionTokens(tool), 0);
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Extract metadata from LangChain tools
|
|
31
|
+
* LangChain tools have .name, .description, and .schema properties
|
|
32
|
+
*/
|
|
33
|
+
export function extractToolMetadata(langchainTool) {
|
|
34
|
+
// LangChain tools may have Zod schemas - serialize them to JSON
|
|
35
|
+
let schemaObject = {};
|
|
36
|
+
if (langchainTool.schema) {
|
|
37
|
+
// If it's a Zod schema, it might have a _def property
|
|
38
|
+
if (typeof langchainTool.schema === "object" &&
|
|
39
|
+
langchainTool.schema !== null) {
|
|
40
|
+
// Try to serialize to JSON, fallback to empty object
|
|
41
|
+
try {
|
|
42
|
+
schemaObject = JSON.parse(JSON.stringify(langchainTool.schema));
|
|
43
|
+
}
|
|
44
|
+
catch {
|
|
45
|
+
schemaObject = {};
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
return {
|
|
50
|
+
name: langchainTool.name,
|
|
51
|
+
description: langchainTool.description || "",
|
|
52
|
+
schema: schemaObject,
|
|
53
|
+
};
|
|
54
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@townco/agent",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.53",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"module": "index.ts",
|
|
6
6
|
"files": [
|
|
@@ -73,11 +73,11 @@
|
|
|
73
73
|
"@opentelemetry/sdk-trace-base": "^1.28.0",
|
|
74
74
|
"@opentelemetry/sdk-trace-node": "^1.28.0",
|
|
75
75
|
"@opentelemetry/semantic-conventions": "^1.28.0",
|
|
76
|
-
"@townco/core": "0.0.
|
|
77
|
-
"@townco/gui-template": "0.1.
|
|
78
|
-
"@townco/tsconfig": "0.1.
|
|
79
|
-
"@townco/tui-template": "0.1.
|
|
80
|
-
"@townco/ui": "0.1.
|
|
76
|
+
"@townco/core": "0.0.26",
|
|
77
|
+
"@townco/gui-template": "0.1.45",
|
|
78
|
+
"@townco/tsconfig": "0.1.45",
|
|
79
|
+
"@townco/tui-template": "0.1.45",
|
|
80
|
+
"@townco/ui": "0.1.48",
|
|
81
81
|
"exa-js": "^2.0.0",
|
|
82
82
|
"hono": "^4.10.4",
|
|
83
83
|
"langchain": "^1.0.3",
|