@townco/agent 0.1.82 → 0.1.84

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/acp-server/adapter.js +150 -49
  2. package/dist/acp-server/http.js +56 -1
  3. package/dist/acp-server/session-storage.d.ts +44 -12
  4. package/dist/acp-server/session-storage.js +153 -59
  5. package/dist/definition/index.d.ts +2 -2
  6. package/dist/definition/index.js +1 -1
  7. package/dist/runner/agent-runner.d.ts +4 -2
  8. package/dist/runner/hooks/executor.d.ts +1 -0
  9. package/dist/runner/hooks/executor.js +18 -2
  10. package/dist/runner/hooks/predefined/compaction-tool.js +3 -2
  11. package/dist/runner/hooks/predefined/tool-response-compactor.d.ts +0 -4
  12. package/dist/runner/hooks/predefined/tool-response-compactor.js +30 -16
  13. package/dist/runner/hooks/types.d.ts +4 -5
  14. package/dist/runner/langchain/index.d.ts +1 -0
  15. package/dist/runner/langchain/index.js +156 -33
  16. package/dist/runner/langchain/tools/artifacts.d.ts +68 -0
  17. package/dist/runner/langchain/tools/artifacts.js +466 -0
  18. package/dist/runner/langchain/tools/browser.js +15 -3
  19. package/dist/runner/langchain/tools/filesystem.d.ts +8 -4
  20. package/dist/runner/langchain/tools/filesystem.js +118 -82
  21. package/dist/runner/langchain/tools/generate_image.d.ts +19 -0
  22. package/dist/runner/langchain/tools/generate_image.js +54 -14
  23. package/dist/runner/langchain/tools/subagent.js +2 -2
  24. package/dist/runner/langchain/tools/todo.js +3 -0
  25. package/dist/runner/langchain/tools/web_search.js +6 -0
  26. package/dist/runner/session-context.d.ts +40 -0
  27. package/dist/runner/session-context.js +69 -0
  28. package/dist/runner/tools.d.ts +2 -2
  29. package/dist/runner/tools.js +2 -0
  30. package/dist/scaffold/project-scaffold.js +7 -3
  31. package/dist/telemetry/setup.js +1 -1
  32. package/dist/templates/index.d.ts +1 -1
  33. package/dist/tsconfig.tsbuildinfo +1 -1
  34. package/dist/utils/context-size-calculator.d.ts +1 -10
  35. package/dist/utils/context-size-calculator.js +1 -12
  36. package/dist/utils/token-counter.js +2 -2
  37. package/package.json +10 -10
  38. package/templates/index.ts +1 -1
@@ -12,25 +12,16 @@ export interface ContextSize {
12
12
  toolInputTokens: number;
13
13
  toolResultsTokens: number;
14
14
  totalEstimated: number;
15
- llmReportedInputTokens?: number | undefined;
16
15
  modelContextWindow?: number;
17
16
  }
18
17
  /**
19
18
  * Calculate the full context size by counting ALL tokens in the provided messages.
20
19
  * This should be called every time a new context entry is created.
21
20
  *
22
- * How LLM-reported tokens work:
23
- * - The LLM API returns `usage_metadata.input_tokens` which is the ACTUAL token
24
- * count for EVERYTHING sent to the API: system prompt, tool declarations,
25
- * all messages, and all tool results
26
- * - We pass this as `llmReportedTokens` for comparison with our estimate
27
- * - This helps us validate the accuracy of our tokenizer estimates
28
- *
29
21
  * @param messages - Resolved messages from context entry
30
22
  * @param systemPrompt - Base system prompt (without TODO instructions)
31
- * @param llmReportedTokens - From API usage_metadata.input_tokens
32
23
  * @param toolOverheadTokens - Pre-calculated tool overhead (built-in/custom/filesystem tool definitions + TODO instructions)
33
24
  * @param mcpOverheadTokens - Pre-calculated MCP tool overhead (MCP tool definitions)
34
25
  * @param modelContextWindow - Model's max context window size
35
26
  */
36
- export declare function calculateContextSize(messages: SessionMessage[], systemPrompt?: string, llmReportedTokens?: number, toolOverheadTokens?: number, mcpOverheadTokens?: number, modelContextWindow?: number): ContextSize;
27
+ export declare function calculateContextSize(messages: SessionMessage[], systemPrompt?: string, toolOverheadTokens?: number, mcpOverheadTokens?: number, modelContextWindow?: number): ContextSize;
@@ -31,21 +31,13 @@ function countContentBlock(block) {
31
31
  * Calculate the full context size by counting ALL tokens in the provided messages.
32
32
  * This should be called every time a new context entry is created.
33
33
  *
34
- * How LLM-reported tokens work:
35
- * - The LLM API returns `usage_metadata.input_tokens` which is the ACTUAL token
36
- * count for EVERYTHING sent to the API: system prompt, tool declarations,
37
- * all messages, and all tool results
38
- * - We pass this as `llmReportedTokens` for comparison with our estimate
39
- * - This helps us validate the accuracy of our tokenizer estimates
40
- *
41
34
  * @param messages - Resolved messages from context entry
42
35
  * @param systemPrompt - Base system prompt (without TODO instructions)
43
- * @param llmReportedTokens - From API usage_metadata.input_tokens
44
36
  * @param toolOverheadTokens - Pre-calculated tool overhead (built-in/custom/filesystem tool definitions + TODO instructions)
45
37
  * @param mcpOverheadTokens - Pre-calculated MCP tool overhead (MCP tool definitions)
46
38
  * @param modelContextWindow - Model's max context window size
47
39
  */
48
- export function calculateContextSize(messages, systemPrompt, llmReportedTokens, toolOverheadTokens, mcpOverheadTokens, modelContextWindow) {
40
+ export function calculateContextSize(messages, systemPrompt, toolOverheadTokens, mcpOverheadTokens, modelContextWindow) {
49
41
  const systemPromptTokens = systemPrompt ? countTokens(systemPrompt) : 0;
50
42
  const toolOverheadTokensEstimate = toolOverheadTokens ?? 0;
51
43
  const mcpOverheadTokensEstimate = mcpOverheadTokens ?? 0;
@@ -89,9 +81,6 @@ export function calculateContextSize(messages, systemPrompt, llmReportedTokens,
89
81
  if (mcpOverheadTokensEstimate > 0) {
90
82
  result.mcpOverheadTokens = mcpOverheadTokensEstimate;
91
83
  }
92
- if (llmReportedTokens !== undefined) {
93
- result.llmReportedInputTokens = llmReportedTokens;
94
- }
95
84
  if (modelContextWindow !== undefined) {
96
85
  result.modelContextWindow = modelContextWindow;
97
86
  }
@@ -12,7 +12,7 @@ export function countTokens(text) {
12
12
  try {
13
13
  return anthropicCountTokens(text);
14
14
  }
15
- catch (error) {
15
+ catch (_error) {
16
16
  // Fallback to rough estimation if tokenizer fails
17
17
  // Approximate: ~4 characters per token
18
18
  return Math.ceil(text.length / 4);
@@ -31,7 +31,7 @@ export function countToolResultTokens(rawOutput) {
31
31
  const text = JSON.stringify(rawOutput);
32
32
  return countTokens(text);
33
33
  }
34
- catch (error) {
34
+ catch (_error) {
35
35
  // If JSON.stringify fails, return 0
36
36
  return 0;
37
37
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@townco/agent",
3
- "version": "0.1.82",
3
+ "version": "0.1.84",
4
4
  "type": "module",
5
5
  "module": "index.ts",
6
6
  "files": [
@@ -59,35 +59,35 @@
59
59
  "@agentclientprotocol/sdk": "^0.5.1",
60
60
  "@anthropic-ai/sandbox-runtime": "^0.0.2",
61
61
  "@anthropic-ai/sdk": "^0.70.0",
62
- "@google/genai": "^0.14.1",
63
62
  "@anthropic-ai/tokenizer": "^0.0.4",
64
- "@onkernel/sdk": "^0.16.0",
65
63
  "@electric-sql/pglite": "^0.2.15",
64
+ "@google/genai": "^0.14.1",
66
65
  "@langchain/anthropic": "1.0.1",
67
66
  "@langchain/core": "^1.0.3",
68
67
  "@langchain/google-genai": "^1.0.3",
69
68
  "@langchain/google-vertexai": "^1.0.3",
70
69
  "@langchain/mcp-adapters": "^1.0.0",
70
+ "@onkernel/sdk": "^0.16.0",
71
71
  "@opentelemetry/api": "^1.9.0",
72
72
  "@opentelemetry/api-logs": "^0.56.0",
73
73
  "@opentelemetry/core": "^1.28.0",
74
74
  "@opentelemetry/exporter-logs-otlp-http": "^0.56.0",
75
75
  "@opentelemetry/exporter-trace-otlp-http": "^0.56.0",
76
- "@opentelemetry/instrumentation": "^0.56.0",
77
76
  "@opentelemetry/resources": "^1.28.0",
78
77
  "@opentelemetry/sdk-logs": "^0.56.0",
79
78
  "@opentelemetry/sdk-trace-base": "^1.28.0",
80
79
  "@opentelemetry/sdk-trace-node": "^1.28.0",
81
80
  "@opentelemetry/semantic-conventions": "^1.28.0",
82
- "@townco/core": "0.0.55",
83
- "@townco/gui-template": "0.1.74",
84
- "@townco/tsconfig": "0.1.74",
85
- "@townco/tui-template": "0.1.74",
86
- "@townco/ui": "0.1.77",
81
+ "@townco/core": "0.0.57",
82
+ "@townco/gui-template": "0.1.76",
83
+ "@townco/supabase": "workspace:*",
84
+ "@townco/tsconfig": "0.1.76",
85
+ "@townco/tui-template": "0.1.76",
86
+ "@townco/ui": "0.1.79",
87
87
  "exa-js": "^2.0.0",
88
88
  "hono": "^4.10.4",
89
89
  "langchain": "^1.0.3",
90
- "prettier": "^3.6.2",
90
+ "prettier": "^3.7.4",
91
91
  "zod": "^4.1.12"
92
92
  },
93
93
  "devDependencies": {
@@ -35,7 +35,7 @@ export interface TemplateVars {
35
35
  type: "tool_response";
36
36
  setting?:
37
37
  | {
38
- maxContextThreshold?: number | undefined;
38
+ maxTokensSize?: number | undefined;
39
39
  responseTruncationThreshold?: number | undefined;
40
40
  }
41
41
  | undefined;