@townco/agent 0.1.52 → 0.1.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/acp-server/adapter.d.ts +2 -0
  2. package/dist/acp-server/adapter.js +28 -3
  3. package/dist/acp-server/cli.d.ts +3 -1
  4. package/dist/acp-server/session-storage.d.ts +2 -0
  5. package/dist/acp-server/session-storage.js +2 -0
  6. package/dist/bin.js +0 -0
  7. package/dist/definition/mcp.d.ts +0 -0
  8. package/dist/definition/mcp.js +0 -0
  9. package/dist/definition/tools/todo.d.ts +49 -0
  10. package/dist/definition/tools/todo.js +80 -0
  11. package/dist/definition/tools/web_search.d.ts +4 -0
  12. package/dist/definition/tools/web_search.js +26 -0
  13. package/dist/dev-agent/index.d.ts +2 -0
  14. package/dist/dev-agent/index.js +18 -0
  15. package/dist/example.d.ts +2 -0
  16. package/dist/example.js +19 -0
  17. package/dist/runner/agent-runner.d.ts +4 -0
  18. package/dist/runner/index.d.ts +3 -1
  19. package/dist/runner/langchain/index.d.ts +0 -1
  20. package/dist/runner/langchain/index.js +88 -27
  21. package/dist/tsconfig.tsbuildinfo +1 -1
  22. package/dist/utils/__tests__/tool-overhead-calculator.test.d.ts +1 -0
  23. package/dist/utils/__tests__/tool-overhead-calculator.test.js +153 -0
  24. package/dist/utils/context-size-calculator.d.ts +9 -4
  25. package/dist/utils/context-size-calculator.js +23 -6
  26. package/dist/utils/tool-overhead-calculator.d.ts +30 -0
  27. package/dist/utils/tool-overhead-calculator.js +54 -0
  28. package/package.json +6 -6
  29. package/dist/check-jaeger.d.ts +0 -5
  30. package/dist/check-jaeger.js +0 -82
  31. package/dist/run-subagents.d.ts +0 -9
  32. package/dist/run-subagents.js +0 -110
  33. package/dist/runner/langchain/custom-stream-types.d.ts +0 -36
  34. package/dist/runner/langchain/custom-stream-types.js +0 -23
  35. package/dist/runner/langchain/tools/bash.d.ts +0 -14
  36. package/dist/runner/langchain/tools/bash.js +0 -135
  37. package/dist/test-telemetry.d.ts +0 -5
  38. package/dist/test-telemetry.js +0 -88
@@ -24,6 +24,8 @@ export declare class AgentAcpAdapter implements acp.Agent {
24
24
  private agentVersion;
25
25
  private agentDescription;
26
26
  private agentSuggestedPrompts;
27
+ private currentToolOverheadTokens;
28
+ private currentMcpOverheadTokens;
27
29
  constructor(agent: AgentRunner, connection: acp.AgentSideConnection, agentDir?: string, agentName?: string);
28
30
  /**
29
31
  * Helper to save session to disk
@@ -103,6 +103,8 @@ export class AgentAcpAdapter {
103
103
  agentVersion;
104
104
  agentDescription;
105
105
  agentSuggestedPrompts;
106
+ currentToolOverheadTokens = 0; // Track tool overhead for current turn
107
+ currentMcpOverheadTokens = 0; // Track MCP overhead for current turn
106
108
  constructor(agent, connection, agentDir, agentName) {
107
109
  this.connection = connection;
108
110
  this.sessions = new Map();
@@ -338,6 +340,9 @@ export class AgentAcpAdapter {
338
340
  }
339
341
  session.pendingPrompt?.abort();
340
342
  session.pendingPrompt = new AbortController();
343
+ // Reset tool overhead for new turn (will be set by harness)
344
+ this.currentToolOverheadTokens = 0;
345
+ this.currentMcpOverheadTokens = 0;
341
346
  // Generate a unique messageId for this assistant response
342
347
  const messageId = Math.random().toString(36).substring(2);
343
348
  // Extract and store the user message
@@ -397,7 +402,9 @@ export class AgentAcpAdapter {
397
402
  }
398
403
  }
399
404
  // Calculate context size - no LLM call yet, so only estimated values
400
- const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, undefined);
405
+ const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, undefined, // No LLM-reported tokens yet
406
+ this.currentToolOverheadTokens, // Include tool overhead
407
+ this.currentMcpOverheadTokens);
401
408
  const contextSnapshot = createContextSnapshot(session.messages.length, new Date().toISOString(), previousContext, context_size);
402
409
  session.context.push(contextSnapshot);
403
410
  await this.saveSessionToDisk(params.sessionId, session);
@@ -461,6 +468,20 @@ export class AgentAcpAdapter {
461
468
  let iterResult = await generator.next();
462
469
  while (!iterResult.done) {
463
470
  const msg = iterResult.value;
471
+ // Capture tool overhead info if provided by harness
472
+ if ("sessionUpdate" in msg &&
473
+ msg.sessionUpdate === "tool_overhead_info") {
474
+ const overheadInfo = msg;
475
+ this.currentToolOverheadTokens = overheadInfo.toolOverheadTokens;
476
+ this.currentMcpOverheadTokens = overheadInfo.mcpOverheadTokens;
477
+ logger.debug("Received tool overhead info from harness", {
478
+ toolOverheadTokens: this.currentToolOverheadTokens,
479
+ mcpOverheadTokens: this.currentMcpOverheadTokens,
480
+ });
481
+ // Don't send this update to client, it's internal metadata
482
+ iterResult = await generator.next();
483
+ continue;
484
+ }
464
485
  // Extract and accumulate token usage from message chunks
465
486
  if ("sessionUpdate" in msg &&
466
487
  msg.sessionUpdate === "agent_message_chunk" &&
@@ -659,7 +680,9 @@ export class AgentAcpAdapter {
659
680
  }
660
681
  }
661
682
  // Calculate context size - tool result is now in the message, but hasn't been sent to LLM yet
662
- const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, undefined);
683
+ const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, undefined, // Tool result hasn't been sent to LLM yet, so no new LLM-reported tokens
684
+ this.currentToolOverheadTokens, // Include tool overhead
685
+ this.currentMcpOverheadTokens);
663
686
  // Create snapshot with a pointer to the partial message (not a full copy!)
664
687
  const midTurnSnapshot = {
665
688
  timestamp: new Date().toISOString(),
@@ -779,7 +802,9 @@ export class AgentAcpAdapter {
779
802
  }
780
803
  }
781
804
  // Calculate context size with LLM-reported tokens from this turn
782
- const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, turnTokenUsage.inputTokens);
805
+ const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, turnTokenUsage.inputTokens, // Final LLM-reported tokens from this turn
806
+ this.currentToolOverheadTokens, // Include tool overhead
807
+ this.currentMcpOverheadTokens);
783
808
  const contextSnapshot = createContextSnapshot(session.messages.length, new Date().toISOString(), previousContext, context_size);
784
809
  session.context.push(contextSnapshot);
785
810
  await this.saveSessionToDisk(params.sessionId, session);
@@ -1,3 +1,5 @@
1
1
  import type { AgentDefinition } from "../definition";
2
2
  import { type AgentRunner } from "../runner";
3
- export declare function makeStdioTransport(agent: AgentRunner | AgentDefinition): void;
3
+ export declare function makeStdioTransport(
4
+ agent: AgentRunner | AgentDefinition,
5
+ ): void;
@@ -62,6 +62,8 @@ export interface ContextEntry {
62
62
  */
63
63
  context_size: {
64
64
  systemPromptTokens: number;
65
+ toolOverheadTokens?: number | undefined;
66
+ mcpOverheadTokens?: number | undefined;
65
67
  userMessagesTokens: number;
66
68
  assistantMessagesTokens: number;
67
69
  toolInputTokens: number;
@@ -58,6 +58,8 @@ const contextEntrySchema = z.object({
58
58
  compactedUpTo: z.number().optional(),
59
59
  context_size: z.object({
60
60
  systemPromptTokens: z.number(),
61
+ toolOverheadTokens: z.number().optional(),
62
+ mcpOverheadTokens: z.number().optional(),
61
63
  userMessagesTokens: z.number(),
62
64
  assistantMessagesTokens: z.number(),
63
65
  toolInputTokens: z.number(),
package/dist/bin.js CHANGED
File without changes
File without changes
File without changes
@@ -0,0 +1,49 @@
1
+ import { z } from "zod";
2
+ export declare const todoItemSchema: z.ZodObject<
3
+ {
4
+ content: z.ZodString;
5
+ status: z.ZodEnum<{
6
+ pending: "pending";
7
+ in_progress: "in_progress";
8
+ completed: "completed";
9
+ }>;
10
+ activeForm: z.ZodString;
11
+ },
12
+ z.core.$strip
13
+ >;
14
+ export declare const todoWrite: import("langchain").DynamicStructuredTool<
15
+ z.ZodObject<
16
+ {
17
+ todos: z.ZodArray<
18
+ z.ZodObject<
19
+ {
20
+ content: z.ZodString;
21
+ status: z.ZodEnum<{
22
+ pending: "pending";
23
+ in_progress: "in_progress";
24
+ completed: "completed";
25
+ }>;
26
+ activeForm: z.ZodString;
27
+ },
28
+ z.core.$strip
29
+ >
30
+ >;
31
+ },
32
+ z.core.$strip
33
+ >,
34
+ {
35
+ todos: {
36
+ content: string;
37
+ status: "pending" | "in_progress" | "completed";
38
+ activeForm: string;
39
+ }[];
40
+ },
41
+ {
42
+ todos: {
43
+ content: string;
44
+ status: "pending" | "in_progress" | "completed";
45
+ activeForm: string;
46
+ }[];
47
+ },
48
+ string
49
+ >;
@@ -0,0 +1,80 @@
1
+ import { tool } from "langchain";
2
+ import { z } from "zod";
3
+ export const todoItemSchema = z.object({
4
+ content: z.string().min(1),
5
+ status: z.enum(["pending", "in_progress", "completed"]),
6
+ activeForm: z.string().min(1),
7
+ });
8
+ export const todoWrite = tool(
9
+ ({ todos }) => {
10
+ // Simple implementation that confirms the todos were written
11
+ return `Successfully updated todo list with ${todos.length} items`;
12
+ },
13
+ {
14
+ name: "todo_write",
15
+ description: `Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.
16
+ It also helps the user understand the progress of the task and overall progress of their requests.
17
+
18
+ ## When to Use This Tool
19
+ Use this tool proactively in these scenarios:
20
+
21
+ 1. Complex multi-step tasks - When a task requires 3 or more distinct steps or actions
22
+ 2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations
23
+ 3. User explicitly requests todo list - When the user directly asks you to use the todo list
24
+ 4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)
25
+ 5. After receiving new instructions - Immediately capture user requirements as todos
26
+ 6. When you start working on a task - Mark it as in_progress BEFORE beginning work. Ideally you should only have one todo as in_progress at a time
27
+ 7. After completing a task - Mark it as completed and add any new follow-up tasks discovered during implementation
28
+
29
+ ## When NOT to Use This Tool
30
+
31
+ Skip using this tool when:
32
+ 1. There is only a single, straightforward task
33
+ 2. The task is trivial and tracking it provides no organizational benefit
34
+ 3. The task can be completed in less than 3 trivial steps
35
+ 4. The task is purely conversational or informational
36
+
37
+ NOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly.
38
+
39
+ ## Task States and Management
40
+
41
+ 1. **Task States**: Use these states to track progress:
42
+ - pending: Task not yet started
43
+ - in_progress: Currently working on (limit to ONE task at a time)
44
+ - completed: Task finished successfully
45
+
46
+ **IMPORTANT**: Task descriptions must have two forms:
47
+ - content: The imperative form describing what needs to be done (e.g., "Run tests", "Build the project")
48
+ - activeForm: The present continuous form shown during execution (e.g., "Running tests", "Building the project")
49
+
50
+ 2. **Task Management**:
51
+ - Update task status in real-time as you work
52
+ - Mark tasks complete IMMEDIATELY after finishing (don't batch completions)
53
+ - Exactly ONE task must be in_progress at any time (not less, not more)
54
+ - Complete current tasks before starting new ones
55
+ - Remove tasks that are no longer relevant from the list entirely
56
+
57
+ 3. **Task Completion Requirements**:
58
+ - ONLY mark a task as completed when you have FULLY accomplished it
59
+ - If you encounter errors, blockers, or cannot finish, keep the task as in_progress
60
+ - When blocked, create a new task describing what needs to be resolved
61
+ - Never mark a task as completed if:
62
+ - Tests are failing
63
+ - Implementation is partial
64
+ - You encountered unresolved errors
65
+ - You couldn't find necessary files or dependencies
66
+
67
+ 4. **Task Breakdown**:
68
+ - Create specific, actionable items
69
+ - Break complex tasks into smaller, manageable steps
70
+ - Use clear, descriptive task names
71
+ - Always provide both forms:
72
+ - content: "Fix authentication bug"
73
+ - activeForm: "Fixing authentication bug"
74
+
75
+ When in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.`,
76
+ schema: z.object({
77
+ todos: z.array(todoItemSchema),
78
+ }),
79
+ },
80
+ );
@@ -0,0 +1,4 @@
1
+ import { ExaSearchResults } from "@langchain/exa";
2
+ export declare function makeWebSearchTool(): ExaSearchResults<{
3
+ text: true;
4
+ }>;
@@ -0,0 +1,26 @@
1
+ import { ExaSearchResults } from "@langchain/exa";
2
+ import Exa from "exa-js";
3
+
4
+ let _webSearchInstance = null;
5
+ export function makeWebSearchTool() {
6
+ if (_webSearchInstance) {
7
+ return _webSearchInstance;
8
+ }
9
+ const apiKey = process.env.EXA_API_KEY;
10
+ if (!apiKey) {
11
+ throw new Error(
12
+ "EXA_API_KEY environment variable is required to use the web_search tool. " +
13
+ "Please set it to your Exa API key from https://exa.ai",
14
+ );
15
+ }
16
+ const client = new Exa(apiKey);
17
+ _webSearchInstance = new ExaSearchResults({
18
+ client,
19
+ searchArgs: {
20
+ numResults: 5,
21
+ type: "auto",
22
+ text: true,
23
+ },
24
+ });
25
+ return _webSearchInstance;
26
+ }
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env bun
2
+ export {};
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env bun
2
+ import { readFileSync } from "node:fs";
3
+ import { join } from "node:path";
4
+ import { makeHttpTransport, makeStdioTransport } from "../acp-server/index";
5
+ // Load agent definition from JSON file
6
+ const configPath = join(import.meta.dir, "agent.json");
7
+ const agent = JSON.parse(readFileSync(configPath, "utf-8"));
8
+ const transport = process.argv[2] || "stdio";
9
+ if (transport === "http") {
10
+ makeHttpTransport(agent);
11
+ }
12
+ else if (transport === "stdio") {
13
+ makeStdioTransport(agent);
14
+ }
15
+ else {
16
+ console.error(`Invalid transport: ${transport}`);
17
+ process.exit(1);
18
+ }
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env bun
2
+ export {};
@@ -0,0 +1,19 @@
1
+ #!/usr/bin/env bun
2
+ import { makeHttpTransport, makeStdioTransport } from "./acp-server/index.js";
3
+
4
+ const exampleAgent = {
5
+ model: "claude-sonnet-4-5-20250929",
6
+ systemPrompt: "You are a helpful assistant.",
7
+ tools: ["todo_write", "get_weather", "web_search"],
8
+ };
9
+ // Parse transport type from command line argument
10
+ const transport = process.argv[2] || "stdio";
11
+ if (transport === "http") {
12
+ makeHttpTransport(exampleAgent);
13
+ } else if (transport === "stdio") {
14
+ makeStdioTransport(exampleAgent);
15
+ } else {
16
+ console.error(`Invalid transport: ${transport}`);
17
+ console.error("Usage: bun run example.ts [stdio|http]");
18
+ process.exit(1);
19
+ }
@@ -109,6 +109,10 @@ export type ExtendedSessionUpdate = (SessionNotification["update"] & {
109
109
  contextInputTokens?: number;
110
110
  [key: string]: unknown;
111
111
  };
112
+ } | {
113
+ sessionUpdate: "tool_overhead_info";
114
+ toolOverheadTokens: number;
115
+ mcpOverheadTokens: number;
112
116
  } | AgentMessageChunkWithTokens | HookNotificationUpdate;
113
117
  /** Describes an object that can run an agent definition */
114
118
  export interface AgentRunner {
@@ -1,4 +1,6 @@
1
1
  import type { AgentDefinition } from "../definition";
2
2
  import { type AgentRunner } from "./agent-runner";
3
3
  export type { AgentRunner };
4
- export declare const makeRunnerFromDefinition: (definition: AgentDefinition) => AgentRunner;
4
+ export declare const makeRunnerFromDefinition: (
5
+ definition: AgentDefinition,
6
+ ) => AgentRunner;
@@ -10,7 +10,6 @@ type MakeLazy<T> = T extends LangchainTool ? () => T : never;
10
10
  export declare const TOOL_REGISTRY: Record<BuiltInToolType, LangchainTool | LazyLangchainTool | LazyLangchainTools>;
11
11
  export declare class LangchainAgent implements AgentRunner {
12
12
  definition: CreateAgentRunnerParams;
13
- private toolSpans;
14
13
  constructor(params: CreateAgentRunnerParams);
15
14
  invoke(req: InvokeRequest): AsyncGenerator<ExtendedSessionUpdate, PromptResponse, undefined>;
16
15
  }
@@ -54,7 +54,6 @@ async function loadCustomTools(modulePaths) {
54
54
  }
55
55
  export class LangchainAgent {
56
56
  definition;
57
- toolSpans = new Map();
58
57
  constructor(params) {
59
58
  this.definition = params;
60
59
  }
@@ -157,10 +156,41 @@ export class LangchainAgent {
157
156
  const customTools = await loadCustomTools(customToolPaths);
158
157
  enabledTools.push(...customTools);
159
158
  }
160
- // MCP tools
159
+ // Calculate tool overhead tokens for non-MCP tools
160
+ const { countTokens } = await import("../../utils/token-counter.js");
161
+ const { extractToolMetadata, estimateAllToolsOverhead } = await import("../../utils/tool-overhead-calculator.js");
162
+ // Calculate overhead for non-MCP tools (built-in, custom, filesystem)
163
+ const nonMcpToolMetadata = enabledTools.map(extractToolMetadata);
164
+ const nonMcpToolDefinitionsTokens = estimateAllToolsOverhead(nonMcpToolMetadata);
165
+ // Calculate TODO_WRITE_INSTRUCTIONS overhead if applicable
166
+ const hasTodoWriteTool = builtInNames.includes("todo_write");
167
+ const todoInstructionsTokens = hasTodoWriteTool
168
+ ? countTokens(TODO_WRITE_INSTRUCTIONS)
169
+ : 0;
170
+ // Total non-MCP tool overhead: tool definitions + TODO instructions
171
+ const toolOverheadTokens = nonMcpToolDefinitionsTokens + todoInstructionsTokens;
172
+ // MCP tools - calculate overhead separately
173
+ let mcpOverheadTokens = 0;
161
174
  if ((this.definition.mcps?.length ?? 0) > 0) {
162
- enabledTools.push(...(await makeMcpToolsClient(this.definition.mcps).getTools()));
175
+ const mcpTools = await makeMcpToolsClient(this.definition.mcps).getTools();
176
+ const mcpToolMetadata = mcpTools.map(extractToolMetadata);
177
+ mcpOverheadTokens = estimateAllToolsOverhead(mcpToolMetadata);
178
+ enabledTools.push(...mcpTools);
163
179
  }
180
+ _logger.debug("Calculated tool overhead for context sizing", {
181
+ enabledToolCount: enabledTools.length,
182
+ nonMcpToolDefinitionsTokens,
183
+ mcpToolDefinitionsTokens: mcpOverheadTokens,
184
+ todoInstructionsTokens,
185
+ totalNonMcpOverheadTokens: toolOverheadTokens,
186
+ totalMcpOverheadTokens: mcpOverheadTokens,
187
+ });
188
+ // Yield tool overhead info to adapter early in the turn
189
+ yield {
190
+ sessionUpdate: "tool_overhead_info",
191
+ toolOverheadTokens,
192
+ mcpOverheadTokens,
193
+ };
164
194
  // Wrap tools with response compaction if hook is configured
165
195
  const hooks = this.definition.hooks ?? [];
166
196
  const hasToolResponseHook = hooks.some((h) => h.type === "tool_response");
@@ -255,9 +285,12 @@ export class LangchainAgent {
255
285
  }
256
286
  // Filter tools if running in subagent mode
257
287
  const isSubagent = req.sessionMeta?.[SUBAGENT_MODE_KEY] === true;
258
- const finalTools = isSubagent
288
+ const filteredTools = isSubagent
259
289
  ? wrappedTools.filter((t) => t.name !== TODO_WRITE_TOOL_NAME && t.name !== TASK_TOOL_NAME)
260
290
  : wrappedTools;
291
+ // Wrap tools with tracing so each tool executes within its own span context.
292
+ // This ensures subagent spans are children of the Task tool span.
293
+ const finalTools = filteredTools.map((t) => wrapToolWithTracing(t, req.sessionId));
261
294
  // Create the model instance using the factory
262
295
  // This detects the provider from the model string:
263
296
  // - "gemini-2.0-flash" → Google Generative AI
@@ -362,16 +395,6 @@ export class LangchainAgent {
362
395
  if (toolCall.id == null) {
363
396
  throw new Error(`Tool call is missing id: ${JSON.stringify(toolCall)}`);
364
397
  }
365
- // Create tool span within the invocation context
366
- // This makes the tool span a child of the invocation span
367
- const toolInputJson = JSON.stringify(toolCall.args);
368
- const toolSpan = context.with(invocationContext, () => telemetry.startSpan("agent.tool_call", {
369
- "tool.name": toolCall.name,
370
- "tool.id": toolCall.id,
371
- "tool.input": toolInputJson,
372
- "agent.session_id": req.sessionId,
373
- }));
374
- this.toolSpans.set(toolCall.id, toolSpan);
375
398
  telemetry.log("info", `Tool call started: ${toolCall.name}`, {
376
399
  toolCallId: toolCall.id,
377
400
  toolName: toolCall.name,
@@ -553,19 +576,9 @@ export class LangchainAgent {
553
576
  // Skip tool_call_update for todo_write tools
554
577
  continue;
555
578
  }
556
- // End telemetry span for this tool call
557
- const toolSpan = this.toolSpans.get(aiMessage.tool_call_id);
558
- if (toolSpan) {
559
- // Add tool output to span before ending
560
- telemetry.setSpanAttributes(toolSpan, {
561
- "tool.output": aiMessage.content,
562
- });
563
- telemetry.log("info", "Tool call completed", {
564
- toolCallId: aiMessage.tool_call_id,
565
- });
566
- telemetry.endSpan(toolSpan);
567
- this.toolSpans.delete(aiMessage.tool_call_id);
568
- }
579
+ telemetry.log("info", "Tool call completed", {
580
+ toolCallId: aiMessage.tool_call_id,
581
+ });
569
582
  // Send status update (metadata only, no content)
570
583
  yield {
571
584
  sessionUpdate: "tool_call_update",
@@ -715,3 +728,51 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
715
728
  `.trim();
716
729
  // Re-export subagent tool utility
717
730
  export { makeSubagentsTool } from "./tools/subagent.js";
731
+ /**
732
+ * Wraps a LangChain tool with OpenTelemetry tracing.
733
+ * This ensures the tool executes within its own span context,
734
+ * so any child operations (like subagent spawning) become children
735
+ * of the tool span rather than the parent invocation span.
736
+ */
737
+ function wrapToolWithTracing(originalTool, sessionId) {
738
+ const wrappedFunc = async (input) => {
739
+ const toolInputJson = JSON.stringify(input);
740
+ const toolSpan = telemetry.startSpan("agent.tool_call", {
741
+ "tool.name": originalTool.name,
742
+ "tool.input": toolInputJson,
743
+ "agent.session_id": sessionId,
744
+ });
745
+ // Create a context with the tool span as active
746
+ const spanContext = toolSpan
747
+ ? trace.setSpan(context.active(), toolSpan)
748
+ : context.active();
749
+ try {
750
+ // Execute within the tool span's context
751
+ const result = await context.with(spanContext, () => originalTool.invoke(input));
752
+ const resultStr = typeof result === "string" ? result : JSON.stringify(result);
753
+ if (toolSpan) {
754
+ telemetry.setSpanAttributes(toolSpan, {
755
+ "tool.output": resultStr,
756
+ });
757
+ telemetry.endSpan(toolSpan);
758
+ }
759
+ return result;
760
+ }
761
+ catch (error) {
762
+ if (toolSpan) {
763
+ telemetry.endSpan(toolSpan, error);
764
+ }
765
+ throw error;
766
+ }
767
+ };
768
+ // Create new tool with wrapped function
769
+ const wrappedTool = tool(wrappedFunc, {
770
+ name: originalTool.name,
771
+ description: originalTool.description,
772
+ schema: originalTool.schema,
773
+ });
774
+ // Preserve metadata
775
+ wrappedTool.prettyName = originalTool.prettyName;
776
+ wrappedTool.icon = originalTool.icon;
777
+ return wrappedTool;
778
+ }