@townco/agent 0.1.49 → 0.1.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/dist/acp-server/adapter.d.ts +15 -0
  2. package/dist/acp-server/adapter.js +445 -67
  3. package/dist/acp-server/http.js +8 -1
  4. package/dist/acp-server/session-storage.d.ts +19 -0
  5. package/dist/acp-server/session-storage.js +9 -0
  6. package/dist/definition/index.d.ts +16 -4
  7. package/dist/definition/index.js +17 -4
  8. package/dist/index.d.ts +2 -1
  9. package/dist/index.js +10 -1
  10. package/dist/runner/agent-runner.d.ts +13 -2
  11. package/dist/runner/agent-runner.js +4 -0
  12. package/dist/runner/hooks/executor.d.ts +18 -1
  13. package/dist/runner/hooks/executor.js +74 -62
  14. package/dist/runner/hooks/predefined/compaction-tool.js +19 -3
  15. package/dist/runner/hooks/predefined/tool-response-compactor.d.ts +6 -0
  16. package/dist/runner/hooks/predefined/tool-response-compactor.js +461 -0
  17. package/dist/runner/hooks/registry.js +2 -0
  18. package/dist/runner/hooks/types.d.ts +39 -3
  19. package/dist/runner/hooks/types.js +9 -1
  20. package/dist/runner/langchain/index.d.ts +1 -0
  21. package/dist/runner/langchain/index.js +523 -321
  22. package/dist/runner/langchain/model-factory.js +1 -1
  23. package/dist/runner/langchain/otel-callbacks.d.ts +18 -0
  24. package/dist/runner/langchain/otel-callbacks.js +123 -0
  25. package/dist/runner/langchain/tools/subagent.js +21 -1
  26. package/dist/scaffold/link-local.d.ts +1 -0
  27. package/dist/scaffold/link-local.js +54 -0
  28. package/dist/scaffold/project-scaffold.js +1 -0
  29. package/dist/telemetry/index.d.ts +83 -0
  30. package/dist/telemetry/index.js +172 -0
  31. package/dist/telemetry/setup.d.ts +22 -0
  32. package/dist/telemetry/setup.js +141 -0
  33. package/dist/templates/index.d.ts +7 -0
  34. package/dist/tsconfig.tsbuildinfo +1 -1
  35. package/dist/utils/context-size-calculator.d.ts +29 -0
  36. package/dist/utils/context-size-calculator.js +78 -0
  37. package/dist/utils/index.d.ts +2 -0
  38. package/dist/utils/index.js +2 -0
  39. package/dist/utils/token-counter.d.ts +19 -0
  40. package/dist/utils/token-counter.js +44 -0
  41. package/index.ts +16 -1
  42. package/package.json +24 -7
  43. package/templates/index.ts +18 -6
@@ -0,0 +1,29 @@
1
+ /**
2
+ * Context size calculation utilities
3
+ * Calculates full context size by counting ALL tokens in messages
4
+ */
5
+ import type { SessionMessage } from "../acp-server/session-storage.js";
6
+ export interface ContextSize {
7
+ systemPromptTokens: number;
8
+ userMessagesTokens: number;
9
+ assistantMessagesTokens: number;
10
+ toolInputTokens: number;
11
+ toolResultsTokens: number;
12
+ totalEstimated: number;
13
+ llmReportedInputTokens?: number | undefined;
14
+ }
15
+ /**
16
+ * Calculate the full context size by counting ALL tokens in the provided messages.
17
+ * This should be called every time a new context entry is created.
18
+ *
19
+ * How LLM-reported tokens work:
20
+ * - The LLM API returns `usage_metadata.input_tokens` which is the ACTUAL token
21
+ * count for EVERYTHING sent to the API: system prompt, tool declarations,
22
+ * all messages, and all tool results
23
+ * - We pass this as `llmReportedTokens` for comparison with our estimate
24
+ * - This helps us validate the accuracy of our tokenizer estimates
25
+ * - Tool declarations are NOT counted separately in our estimate since they're
26
+ * included in the LLM-reported value
27
+ */
28
+ export declare function calculateContextSize(messages: SessionMessage[], // Resolved messages from context entry
29
+ systemPrompt?: string, llmReportedTokens?: number): ContextSize;
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Context size calculation utilities
3
+ * Calculates full context size by counting ALL tokens in messages
4
+ */
5
+ import { countTokens, countToolResultTokens } from "./token-counter.js";
6
+ /**
7
+ * Extract and count tokens from a content block based on its type
8
+ */
9
+ function countContentBlock(block) {
10
+ if (block.type === "text") {
11
+ return {
12
+ textTokens: countTokens(block.text),
13
+ toolInputTokens: 0,
14
+ toolResultTokens: 0,
15
+ };
16
+ }
17
+ if (block.type === "tool_call") {
18
+ return {
19
+ textTokens: 0,
20
+ toolInputTokens: block.rawInput
21
+ ? countToolResultTokens(block.rawInput)
22
+ : 0,
23
+ toolResultTokens: block.rawOutput
24
+ ? countToolResultTokens(block.rawOutput)
25
+ : 0,
26
+ };
27
+ }
28
+ return { textTokens: 0, toolInputTokens: 0, toolResultTokens: 0 };
29
+ }
30
+ /**
31
+ * Calculate the full context size by counting ALL tokens in the provided messages.
32
+ * This should be called every time a new context entry is created.
33
+ *
34
+ * How LLM-reported tokens work:
35
+ * - The LLM API returns `usage_metadata.input_tokens` which is the ACTUAL token
36
+ * count for EVERYTHING sent to the API: system prompt, tool declarations,
37
+ * all messages, and all tool results
38
+ * - We pass this as `llmReportedTokens` for comparison with our estimate
39
+ * - This helps us validate the accuracy of our tokenizer estimates
40
+ * - Tool declarations are NOT counted separately in our estimate since they're
41
+ * included in the LLM-reported value
42
+ */
43
+ export function calculateContextSize(messages, // Resolved messages from context entry
44
+ systemPrompt, llmReportedTokens) {
45
+ const systemPromptTokens = systemPrompt ? countTokens(systemPrompt) : 0;
46
+ let userMessagesTokens = 0;
47
+ let assistantMessagesTokens = 0;
48
+ let toolInputTokens = 0;
49
+ let toolResultsTokens = 0;
50
+ // Go through ALL messages in this context snapshot
51
+ for (const message of messages) {
52
+ for (const block of message.content) {
53
+ const counts = countContentBlock(block);
54
+ // Accumulate based on message role
55
+ if (message.role === "user") {
56
+ userMessagesTokens += counts.textTokens;
57
+ }
58
+ else if (message.role === "assistant") {
59
+ assistantMessagesTokens += counts.textTokens;
60
+ toolInputTokens += counts.toolInputTokens;
61
+ toolResultsTokens += counts.toolResultTokens;
62
+ }
63
+ }
64
+ }
65
+ return {
66
+ systemPromptTokens,
67
+ userMessagesTokens,
68
+ assistantMessagesTokens,
69
+ toolInputTokens,
70
+ toolResultsTokens,
71
+ totalEstimated: systemPromptTokens +
72
+ userMessagesTokens +
73
+ assistantMessagesTokens +
74
+ toolInputTokens +
75
+ toolResultsTokens,
76
+ llmReportedInputTokens: llmReportedTokens,
77
+ };
78
+ }
@@ -1,2 +1,4 @@
1
1
  export { makeSubagentsTool } from "../runner/langchain/tools/subagent.js";
2
+ export * from "./context-size-calculator.js";
3
+ export * from "./token-counter.js";
2
4
  export * from "./tool.js";
@@ -1,2 +1,4 @@
1
1
  export { makeSubagentsTool } from "../runner/langchain/tools/subagent.js";
2
+ export * from "./context-size-calculator.js";
3
+ export * from "./token-counter.js";
2
4
  export * from "./tool.js";
@@ -0,0 +1,19 @@
1
+ /**
2
+ * Token counting utilities for tracking context size
3
+ * Uses Anthropic's tokenizer for rough approximation
4
+ */
5
+ /**
6
+ * Count tokens in a string using Anthropic's tokenizer
7
+ * Note: This is a rough approximation for Claude 3+ models
8
+ * For exact counts, use the API's usage_metadata where available
9
+ */
10
+ export declare function countTokens(text: string): number;
11
+ /**
12
+ * Count tokens in a tool result (rawOutput)
13
+ * Handles various data types that might be in tool outputs
14
+ */
15
+ export declare function countToolResultTokens(rawOutput: Record<string, unknown> | undefined): number;
16
+ /**
17
+ * Count tokens in multiple tool results
18
+ */
19
+ export declare function countMultipleToolResults(results: Array<Record<string, unknown> | undefined>): number;
@@ -0,0 +1,44 @@
1
+ /**
2
+ * Token counting utilities for tracking context size
3
+ * Uses Anthropic's tokenizer for rough approximation
4
+ */
5
+ import { countTokens as anthropicCountTokens } from "@anthropic-ai/tokenizer";
6
+ /**
7
+ * Count tokens in a string using Anthropic's tokenizer
8
+ * Note: This is a rough approximation for Claude 3+ models
9
+ * For exact counts, use the API's usage_metadata where available
10
+ */
11
+ export function countTokens(text) {
12
+ try {
13
+ return anthropicCountTokens(text);
14
+ }
15
+ catch (error) {
16
+ // Fallback to rough estimation if tokenizer fails
17
+ // Approximate: ~4 characters per token
18
+ return Math.ceil(text.length / 4);
19
+ }
20
+ }
21
+ /**
22
+ * Count tokens in a tool result (rawOutput)
23
+ * Handles various data types that might be in tool outputs
24
+ */
25
+ export function countToolResultTokens(rawOutput) {
26
+ if (!rawOutput) {
27
+ return 0;
28
+ }
29
+ try {
30
+ // Convert to string representation (as it would be sent to the LLM)
31
+ const text = JSON.stringify(rawOutput);
32
+ return countTokens(text);
33
+ }
34
+ catch (error) {
35
+ // If JSON.stringify fails, return 0
36
+ return 0;
37
+ }
38
+ }
39
+ /**
40
+ * Count tokens in multiple tool results
41
+ */
42
+ export function countMultipleToolResults(results) {
43
+ return results.reduce((total, result) => total + countToolResultTokens(result), 0);
44
+ }
package/index.ts CHANGED
@@ -2,8 +2,23 @@ import { basename } from "node:path";
2
2
  import { createLogger } from "@townco/core";
3
3
  import { makeHttpTransport, makeStdioTransport } from "./acp-server";
4
4
  import type { AgentDefinition } from "./definition";
5
+ import { initializeOpenTelemetryFromEnv } from "./telemetry/setup.js";
5
6
  import { makeSubagentsTool } from "./utils";
6
7
 
8
+ // Re-export telemetry configuration for library users
9
+ export { configureTelemetry, type TelemetryConfig } from "./telemetry/index.js";
10
+ export {
11
+ initializeOpenTelemetry,
12
+ initializeOpenTelemetryFromEnv,
13
+ type TelemetrySetupOptions,
14
+ } from "./telemetry/setup.js";
15
+
16
+ // Configure OpenTelemetry if enabled via environment variable
17
+ // Example: ENABLE_TELEMETRY=true bun run index.ts stdio
18
+ if (process.env.ENABLE_TELEMETRY === "true") {
19
+ initializeOpenTelemetryFromEnv();
20
+ }
21
+
7
22
  const logger = createLogger("agent-index");
8
23
 
9
24
  const exampleAgent: AgentDefinition = {
@@ -30,7 +45,7 @@ const exampleAgent: AgentDefinition = {
30
45
  {
31
46
  type: "context_size",
32
47
  setting: {
33
- threshold: 95,
48
+ threshold: 80,
34
49
  },
35
50
  callback: "compaction_tool",
36
51
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@townco/agent",
3
- "version": "0.1.49",
3
+ "version": "0.1.51",
4
4
  "type": "module",
5
5
  "module": "index.ts",
6
6
  "files": [
@@ -35,6 +35,10 @@
35
35
  "import": "./dist/storage/index.js",
36
36
  "types": "./dist/storage/index.d.ts"
37
37
  },
38
+ "./telemetry": {
39
+ "import": "./dist/telemetry/index.js",
40
+ "types": "./dist/telemetry/index.d.ts"
41
+ },
38
42
  "./utils": {
39
43
  "import": "./dist/utils/index.js",
40
44
  "types": "./dist/utils/index.d.ts"
@@ -44,23 +48,36 @@
44
48
  "build": "tsc && cp -r scaffold/templates dist/scaffold/",
45
49
  "check": "tsc --noEmit",
46
50
  "start": "bun index.ts stdio",
47
- "start-http": "PORT=3100 bun index.ts http"
51
+ "start-http": "PORT=3100 bun index.ts http",
52
+ "start-http:telemetry": "ENABLE_TELEMETRY=true PORT=3100 bun index.ts http"
48
53
  },
49
54
  "dependencies": {
50
55
  "@agentclientprotocol/sdk": "^0.5.1",
51
56
  "@anthropic-ai/sandbox-runtime": "^0.0.2",
52
57
  "@anthropic-ai/sdk": "^0.70.0",
58
+ "@anthropic-ai/tokenizer": "^0.0.4",
53
59
  "@electric-sql/pglite": "^0.2.15",
54
60
  "@langchain/anthropic": "1.0.1",
55
61
  "@langchain/core": "^1.0.3",
56
62
  "@langchain/google-genai": "^1.0.3",
57
63
  "@langchain/google-vertexai": "^1.0.3",
58
64
  "@langchain/mcp-adapters": "^1.0.0",
59
- "@townco/core": "0.0.22",
60
- "@townco/gui-template": "0.1.41",
61
- "@townco/tui-template": "0.1.41",
62
- "@townco/tsconfig": "0.1.41",
63
- "@townco/ui": "0.1.44",
65
+ "@opentelemetry/api": "^1.9.0",
66
+ "@opentelemetry/api-logs": "^0.56.0",
67
+ "@opentelemetry/core": "^1.28.0",
68
+ "@opentelemetry/exporter-logs-otlp-http": "^0.56.0",
69
+ "@opentelemetry/exporter-trace-otlp-http": "^0.56.0",
70
+ "@opentelemetry/instrumentation": "^0.56.0",
71
+ "@opentelemetry/resources": "^1.28.0",
72
+ "@opentelemetry/sdk-logs": "^0.56.0",
73
+ "@opentelemetry/sdk-trace-base": "^1.28.0",
74
+ "@opentelemetry/sdk-trace-node": "^1.28.0",
75
+ "@opentelemetry/semantic-conventions": "^1.28.0",
76
+ "@townco/core": "0.0.24",
77
+ "@townco/gui-template": "0.1.43",
78
+ "@townco/tsconfig": "0.1.43",
79
+ "@townco/tui-template": "0.1.43",
80
+ "@townco/ui": "0.1.46",
64
81
  "exa-js": "^2.0.0",
65
82
  "hono": "^4.10.4",
66
83
  "langchain": "^1.0.3",
@@ -20,11 +20,23 @@ export interface TemplateVars {
20
20
  systemPrompt: string | null;
21
21
  hasWebSearch: boolean;
22
22
  hooks?:
23
- | Array<{
24
- type: "context_size";
25
- setting?: { threshold: number } | undefined;
26
- callback: string;
27
- }>
23
+ | Array<
24
+ | {
25
+ type: "context_size";
26
+ setting?: { threshold: number } | undefined;
27
+ callback: string;
28
+ }
29
+ | {
30
+ type: "tool_response";
31
+ setting?:
32
+ | {
33
+ maxContextThreshold?: number | undefined;
34
+ responseTruncationThreshold?: number | undefined;
35
+ }
36
+ | undefined;
37
+ callback: string;
38
+ }
39
+ >
28
40
  | undefined;
29
41
  }
30
42
 
@@ -41,7 +53,7 @@ export function getTemplateVars(
41
53
  hasWebSearch: tools.some(
42
54
  (tool) => typeof tool === "string" && tool === "web_search",
43
55
  ),
44
- hooks: definition.hooks,
56
+ hooks: definition.hooks as TemplateVars["hooks"],
45
57
  };
46
58
  }
47
59