@townco/agent 0.1.50 → 0.1.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/acp-server/adapter.d.ts +10 -0
  2. package/dist/acp-server/adapter.js +287 -80
  3. package/dist/acp-server/cli.d.ts +1 -3
  4. package/dist/acp-server/http.js +8 -1
  5. package/dist/acp-server/index.js +5 -0
  6. package/dist/acp-server/session-storage.d.ts +17 -3
  7. package/dist/acp-server/session-storage.js +9 -0
  8. package/dist/bin.js +0 -0
  9. package/dist/check-jaeger.d.ts +5 -0
  10. package/dist/check-jaeger.js +82 -0
  11. package/dist/definition/index.d.ts +16 -4
  12. package/dist/definition/index.js +17 -4
  13. package/dist/index.js +1 -1
  14. package/dist/run-subagents.d.ts +9 -0
  15. package/dist/run-subagents.js +110 -0
  16. package/dist/runner/agent-runner.d.ts +10 -2
  17. package/dist/runner/agent-runner.js +4 -0
  18. package/dist/runner/hooks/executor.d.ts +17 -0
  19. package/dist/runner/hooks/executor.js +66 -0
  20. package/dist/runner/hooks/predefined/compaction-tool.js +9 -1
  21. package/dist/runner/hooks/predefined/tool-response-compactor.d.ts +6 -0
  22. package/dist/runner/hooks/predefined/tool-response-compactor.js +461 -0
  23. package/dist/runner/hooks/registry.js +2 -0
  24. package/dist/runner/hooks/types.d.ts +39 -3
  25. package/dist/runner/hooks/types.js +9 -4
  26. package/dist/runner/index.d.ts +1 -3
  27. package/dist/runner/langchain/custom-stream-types.d.ts +36 -0
  28. package/dist/runner/langchain/custom-stream-types.js +23 -0
  29. package/dist/runner/langchain/index.js +102 -76
  30. package/dist/runner/langchain/otel-callbacks.js +67 -1
  31. package/dist/runner/langchain/tools/bash.d.ts +14 -0
  32. package/dist/runner/langchain/tools/bash.js +135 -0
  33. package/dist/scaffold/link-local.d.ts +1 -0
  34. package/dist/scaffold/link-local.js +54 -0
  35. package/dist/scaffold/project-scaffold.js +1 -0
  36. package/dist/telemetry/setup.d.ts +3 -1
  37. package/dist/telemetry/setup.js +33 -3
  38. package/dist/templates/index.d.ts +7 -0
  39. package/dist/test-telemetry.d.ts +5 -0
  40. package/dist/test-telemetry.js +88 -0
  41. package/dist/tsconfig.tsbuildinfo +1 -1
  42. package/dist/utils/context-size-calculator.d.ts +29 -0
  43. package/dist/utils/context-size-calculator.js +78 -0
  44. package/dist/utils/index.d.ts +2 -0
  45. package/dist/utils/index.js +2 -0
  46. package/dist/utils/token-counter.d.ts +19 -0
  47. package/dist/utils/token-counter.js +44 -0
  48. package/index.ts +1 -1
  49. package/package.json +7 -6
  50. package/templates/index.ts +18 -6
  51. package/dist/definition/mcp.d.ts +0 -0
  52. package/dist/definition/mcp.js +0 -0
  53. package/dist/definition/tools/todo.d.ts +0 -49
  54. package/dist/definition/tools/todo.js +0 -80
  55. package/dist/definition/tools/web_search.d.ts +0 -4
  56. package/dist/definition/tools/web_search.js +0 -26
  57. package/dist/dev-agent/index.d.ts +0 -2
  58. package/dist/dev-agent/index.js +0 -18
  59. package/dist/example.d.ts +0 -2
  60. package/dist/example.js +0 -19
@@ -161,11 +161,103 @@ export class LangchainAgent {
161
161
  if ((this.definition.mcps?.length ?? 0) > 0) {
162
162
  enabledTools.push(...(await makeMcpToolsClient(this.definition.mcps).getTools()));
163
163
  }
164
+ // Wrap tools with response compaction if hook is configured
165
+ const hooks = this.definition.hooks ?? [];
166
+ const hasToolResponseHook = hooks.some((h) => h.type === "tool_response");
167
+ const noSession = req.sessionMeta?.[SUBAGENT_MODE_KEY] === true; // Subagents don't have session storage
168
+ // Track cumulative tool output tokens in this turn for proper context calculation
169
+ let cumulativeToolOutputTokens = 0;
170
+ let wrappedTools = enabledTools;
171
+ if (hasToolResponseHook && !noSession) {
172
+ const { countToolResultTokens } = await import("../../utils/token-counter.js");
173
+ const { toolResponseCompactor } = await import("../hooks/predefined/tool-response-compactor.js");
174
+ wrappedTools = enabledTools.map((originalTool) => {
175
+ const wrappedFunc = async (input) => {
176
+ // Execute the original tool
177
+ const result = await originalTool.invoke(input);
178
+ // Check if result should be compacted
179
+ const resultStr = typeof result === "string" ? result : JSON.stringify(result);
180
+ const rawOutput = { content: resultStr };
181
+ const outputTokens = countToolResultTokens(rawOutput);
182
+ // Skip compaction for small results (under 10k tokens)
183
+ if (outputTokens < 10000) {
184
+ // Still track this in cumulative total
185
+ cumulativeToolOutputTokens += outputTokens;
186
+ return result;
187
+ }
188
+ _logger.info("Tool wrapper: compacting large tool result", {
189
+ toolName: originalTool.name,
190
+ originalTokens: outputTokens,
191
+ cumulativeToolOutputTokens,
192
+ });
193
+ // Calculate current context including all tool outputs so far in this turn
194
+ // This ensures we account for multiple large tool calls in the same turn
195
+ const baseContextTokens = turnTokenUsage.inputTokens || 10000;
196
+ const currentTokens = baseContextTokens + cumulativeToolOutputTokens;
197
+ const maxTokens = 200000; // Claude's limit
198
+ // Build proper hook context with all required fields
199
+ const hookContext = {
200
+ session: {
201
+ messages: req.contextMessages || [],
202
+ context: [],
203
+ requestParams: {
204
+ hookSettings: hooks.find((h) => h.type === "tool_response")
205
+ ?.setting,
206
+ },
207
+ },
208
+ currentTokens,
209
+ maxTokens,
210
+ percentage: (currentTokens / maxTokens) * 100,
211
+ model: this.definition.model,
212
+ toolResponse: {
213
+ toolCallId: "pending",
214
+ toolName: originalTool.name,
215
+ toolInput: input,
216
+ rawOutput,
217
+ outputTokens,
218
+ },
219
+ };
220
+ // Call the tool response compactor directly
221
+ const hookResult = await toolResponseCompactor(hookContext);
222
+ // Extract modified output from metadata
223
+ if (hookResult?.metadata?.modifiedOutput) {
224
+ const modifiedOutput = hookResult.metadata
225
+ .modifiedOutput;
226
+ const compactedTokens = countToolResultTokens(modifiedOutput);
227
+ // Update cumulative total with the compacted size (not original!)
228
+ cumulativeToolOutputTokens += compactedTokens;
229
+ _logger.info("Tool wrapper: compaction complete", {
230
+ toolName: originalTool.name,
231
+ originalTokens: outputTokens,
232
+ compactedTokens,
233
+ reduction: `${((1 - compactedTokens / outputTokens) * 100).toFixed(1)}%`,
234
+ totalCumulativeTokens: cumulativeToolOutputTokens,
235
+ });
236
+ return typeof result === "string"
237
+ ? modifiedOutput.content
238
+ : JSON.stringify(modifiedOutput);
239
+ }
240
+ // No compaction happened, count original size
241
+ cumulativeToolOutputTokens += outputTokens;
242
+ return result;
243
+ };
244
+ // Create new tool with wrapped function
245
+ const wrappedTool = tool(wrappedFunc, {
246
+ name: originalTool.name,
247
+ description: originalTool.description,
248
+ schema: originalTool.schema,
249
+ });
250
+ // Preserve metadata
251
+ wrappedTool.prettyName = originalTool.prettyName;
252
+ wrappedTool.icon = originalTool.icon;
253
+ return wrappedTool;
254
+ });
255
+ }
164
256
  // Filter tools if running in subagent mode
165
257
  const isSubagent = req.sessionMeta?.[SUBAGENT_MODE_KEY] === true;
166
258
  const finalTools = isSubagent
167
- ? enabledTools.filter((t) => t.name !== TODO_WRITE_TOOL_NAME && t.name !== TASK_TOOL_NAME)
168
- : enabledTools;
259
+ ? wrappedTools.filter((t) => t.name !== TODO_WRITE_TOOL_NAME && t.name !== TASK_TOOL_NAME)
260
+ : wrappedTools;
169
261
  // Create the model instance using the factory
170
262
  // This detects the provider from the model string:
171
263
  // - "gemini-2.0-flash" → Google Generative AI
@@ -187,79 +279,6 @@ export class LangchainAgent {
187
279
  const agent = createAgent(agentConfig);
188
280
  // Add logging callbacks for model requests
189
281
  const provider = detectProvider(this.definition.model);
190
- const loggingCallback = {
191
- handleChatModelStart: async (_llm, messages, runId, parentRunId, extraParams) => {
192
- _logger.info("Model request started", {
193
- provider,
194
- model: this.definition.model,
195
- runId,
196
- parentRunId,
197
- messageCount: messages.length,
198
- extraParams,
199
- });
200
- },
201
- handleLLMEnd: async (output, runId, parentRunId, tags, extraParams) => {
202
- // Extract token usage from output
203
- const llmResult = output;
204
- _logger.info("Model request completed", {
205
- provider,
206
- model: this.definition.model,
207
- runId,
208
- parentRunId,
209
- tags,
210
- tokenUsage: llmResult.llmOutput?.tokenUsage,
211
- generationCount: llmResult.generations?.length,
212
- extraParams,
213
- });
214
- },
215
- handleLLMError: async (error, runId, parentRunId, tags) => {
216
- _logger.error("Model request failed", {
217
- provider,
218
- model: this.definition.model,
219
- runId,
220
- parentRunId,
221
- tags,
222
- error: error.message,
223
- stack: error.stack,
224
- });
225
- },
226
- handleToolStart: async (_tool, input, runId, parentRunId, tags, metadata, runName) => {
227
- if (process.env.DEBUG_TELEMETRY === "true") {
228
- console.log(`[handleToolStart] runId=${runId}, runName=${runName}, parentRunId=${parentRunId}`);
229
- console.log(`[handleToolStart] Active context span:`, trace.getSpan(context.active())?.spanContext());
230
- }
231
- _logger.info("Tool started", {
232
- runId,
233
- parentRunId,
234
- runName,
235
- tags,
236
- metadata,
237
- input: input.substring(0, 200), // Truncate for logging
238
- });
239
- },
240
- handleToolEnd: async (_output, runId, parentRunId, tags) => {
241
- if (process.env.DEBUG_TELEMETRY === "true") {
242
- console.log(`[handleToolEnd] runId=${runId}, parentRunId=${parentRunId}`);
243
- }
244
- _logger.info("Tool completed", {
245
- runId,
246
- parentRunId,
247
- tags,
248
- });
249
- },
250
- handleToolError: async (error, runId, parentRunId, tags) => {
251
- if (process.env.DEBUG_TELEMETRY === "true") {
252
- console.log(`[handleToolError] runId=${runId}, error=${error.message}`);
253
- }
254
- _logger.error("Tool failed", {
255
- runId,
256
- parentRunId,
257
- tags,
258
- error: error.message,
259
- stack: error.stack,
260
- });
261
- },
262
- };
263
282
  // Build messages from context history if available, otherwise use just the prompt
264
283
  let messages;
265
284
  if (req.contextMessages && req.contextMessages.length > 0) {
@@ -303,7 +322,7 @@ export class LangchainAgent {
303
322
  const stream = context.with(invocationContext, () => agent.stream({ messages }, {
304
323
  streamMode: ["updates", "messages"],
305
324
  recursionLimit: 200,
306
- callbacks: [loggingCallback, otelCallbacks],
325
+ callbacks: [otelCallbacks],
307
326
  }));
308
327
  for await (const streamItem of await stream) {
309
328
  const [streamMode, chunk] = streamItem;
@@ -345,9 +364,12 @@ export class LangchainAgent {
345
364
  }
346
365
  // Create tool span within the invocation context
347
366
  // This makes the tool span a child of the invocation span
367
+ const toolInputJson = JSON.stringify(toolCall.args);
348
368
  const toolSpan = context.with(invocationContext, () => telemetry.startSpan("agent.tool_call", {
349
369
  "tool.name": toolCall.name,
350
370
  "tool.id": toolCall.id,
371
+ "tool.input": toolInputJson,
372
+ "agent.session_id": req.sessionId,
351
373
  }));
352
374
  this.toolSpans.set(toolCall.id, toolSpan);
353
375
  telemetry.log("info", `Tool call started: ${toolCall.name}`, {
@@ -534,6 +556,10 @@ export class LangchainAgent {
534
556
  // End telemetry span for this tool call
535
557
  const toolSpan = this.toolSpans.get(aiMessage.tool_call_id);
536
558
  if (toolSpan) {
559
+ // Add tool output to span before ending
560
+ telemetry.setSpanAttributes(toolSpan, {
561
+ "tool.output": aiMessage.content,
562
+ });
537
563
  telemetry.log("info", "Tool call completed", {
538
564
  toolCallId: aiMessage.tool_call_id,
539
565
  });
@@ -1,4 +1,4 @@
1
- import { context } from "@opentelemetry/api";
1
+ import { context, trace } from "@opentelemetry/api";
2
2
  import { telemetry } from "../../telemetry/index.js";
3
3
  /**
4
4
  * OpenTelemetry callback handler for LangChain LLM calls.
@@ -45,6 +45,38 @@ function extractSystemPrompt(messages) {
45
45
  return undefined;
46
46
  }
47
47
  }
48
+ /**
49
+ * Serializes LLM output to a string for logging.
50
+ * Preserves the raw provider format (content blocks, tool_calls, etc.)
51
+ */
52
+ function serializeOutput(output) {
53
+ try {
54
+ const generations = output.generations.flat();
55
+ const serialized = generations.map((gen) => {
56
+ // ChatGeneration has a message property with the full AIMessage
57
+ const chatGen = gen;
58
+ if (chatGen.message) {
59
+ const msg = chatGen.message;
60
+ const result = {
61
+ role: msg._getType?.() ?? "assistant",
62
+ content: msg.content, // Keep as-is: string or ContentBlock[]
63
+ };
64
+ // Include tool_calls if present (LangChain's normalized format)
65
+ const aiMsg = msg;
66
+ if (aiMsg.tool_calls && aiMsg.tool_calls.length > 0) {
67
+ result.tool_calls = aiMsg.tool_calls;
68
+ }
69
+ return result;
70
+ }
71
+ // Fallback for non-chat generations
72
+ return { text: gen.text };
73
+ });
74
+ return JSON.stringify(serialized);
75
+ }
76
+ catch (error) {
77
+ return `[Error serializing output: ${error}]`;
78
+ }
79
+ }
48
80
  /**
49
81
  * Creates OpenTelemetry callback handlers for LangChain LLM calls.
50
82
  * These handlers instrument model invocations with OTEL spans and record token usage.
@@ -84,6 +116,18 @@ export function makeOtelCallbacks(opts) {
84
116
  }));
85
117
  if (span) {
86
118
  spansByRunId.set(runId, span);
119
+ // Emit log for LLM request with trace context
120
+ const spanContext = span.spanContext();
121
+ telemetry.log("info", "LLM Request", {
122
+ "gen_ai.operation.name": "chat",
123
+ "gen_ai.provider.name": opts.provider,
124
+ "gen_ai.request.model": opts.model,
125
+ "gen_ai.input.messages": serializedMessages,
126
+ "langchain.run_id": runId,
127
+ // Include trace context for correlation
128
+ trace_id: spanContext.traceId,
129
+ span_id: spanContext.spanId,
130
+ });
87
131
  }
88
132
  },
89
133
  /**
@@ -105,6 +149,28 @@ export function makeOtelCallbacks(opts) {
105
149
  : 0);
106
150
  telemetry.recordTokenUsage(inputTokens, outputTokens, span);
107
151
  }
152
+ // Serialize output and attach to span
153
+ const serializedOutput = serializeOutput(output);
154
+ telemetry.setSpanAttributes(span, {
155
+ "gen_ai.output.messages": serializedOutput,
156
+ });
157
+ // Emit log for LLM response with trace context
158
+ const spanContext = span.spanContext();
159
+ telemetry.log("info", "LLM Response", {
160
+ "gen_ai.operation.name": "chat",
161
+ "gen_ai.output.messages": serializedOutput,
162
+ "langchain.run_id": runId,
163
+ // Include token usage in log
164
+ ...(tokenUsage
165
+ ? {
166
+ "gen_ai.usage.input_tokens": tokenUsage.inputTokens ?? 0,
167
+ "gen_ai.usage.output_tokens": tokenUsage.outputTokens ?? 0,
168
+ }
169
+ : {}),
170
+ // Include trace context for correlation
171
+ trace_id: spanContext.traceId,
172
+ span_id: spanContext.spanId,
173
+ });
108
174
  telemetry.endSpan(span);
109
175
  spansByRunId.delete(runId);
110
176
  },
@@ -0,0 +1,14 @@
1
+ import { z } from "zod";
2
+ export declare function makeBashTool(workingDirectory?: string): import("langchain").DynamicStructuredTool<z.ZodObject<{
3
+ command: z.ZodString;
4
+ timeout: z.ZodOptional<z.ZodNumber>;
5
+ description: z.ZodOptional<z.ZodString>;
6
+ }, z.core.$strip>, {
7
+ command: string;
8
+ timeout?: number | undefined;
9
+ description?: string | undefined;
10
+ }, {
11
+ command: string;
12
+ timeout?: number | undefined;
13
+ description?: string | undefined;
14
+ }, unknown>;
@@ -0,0 +1,135 @@
1
+ import { spawn } from "node:child_process";
2
+ import { once } from "node:events";
3
+ import * as path from "node:path";
4
+ import { SandboxManager, } from "@anthropic-ai/sandbox-runtime";
5
+ import { tool } from "langchain";
6
+ import { z } from "zod";
7
+ /**
8
+ * Lazily initialize Sandbox Runtime with write access limited to workingDirectory.
9
+ */
10
+ let initialized = false;
11
+ async function ensureSandbox(workingDirectory) {
12
+ if (initialized)
13
+ return;
14
+ const cfg = {
15
+ network: {
16
+ // No outbound network needed for basic bash commands; block by default.
17
+ allowedDomains: [],
18
+ deniedDomains: [],
19
+ },
20
+ filesystem: {
21
+ // Allow writes only within the configured sandbox directory.
22
+ allowWrite: [workingDirectory],
23
+ denyWrite: [],
24
+ // Optional: harden reads a bit (deny common sensitive dirs)
25
+ denyRead: ["~/.ssh", "~/.gnupg", "/etc/ssh"],
26
+ },
27
+ };
28
+ await SandboxManager.initialize(cfg);
29
+ initialized = true;
30
+ }
31
+ /** Run a command string inside the sandbox, returning { stdout, stderr, code, timedOut }. */
32
+ async function runSandboxed(cmd, cwd, timeoutMs) {
33
+ const wrapped = await SandboxManager.wrapWithSandbox(cmd);
34
+ const child = spawn(wrapped, { shell: true, cwd });
35
+ const stdout = [];
36
+ const stderr = [];
37
+ child.stdout?.on("data", (d) => stdout.push(Buffer.from(d)));
38
+ child.stderr?.on("data", (d) => stderr.push(Buffer.from(d)));
39
+ let timedOut = false;
40
+ let timeoutHandle;
41
+ // Set up timeout if specified
42
+ if (timeoutMs !== undefined && timeoutMs > 0) {
43
+ timeoutHandle = setTimeout(() => {
44
+ timedOut = true;
45
+ child.kill("SIGTERM");
46
+ // If process doesn't exit after SIGTERM, force kill after 1 second
47
+ setTimeout(() => {
48
+ if (!child.killed) {
49
+ child.kill("SIGKILL");
50
+ }
51
+ }, 1000);
52
+ }, timeoutMs);
53
+ }
54
+ const [code] = (await once(child, "exit"));
55
+ if (timeoutHandle) {
56
+ clearTimeout(timeoutHandle);
57
+ }
58
+ return {
59
+ stdout: Buffer.concat(stdout),
60
+ stderr: Buffer.concat(stderr),
61
+ code: code ?? 1,
62
+ timedOut,
63
+ };
64
+ }
65
+ /** Truncate output if it exceeds the maximum length. */
66
+ function truncateOutput(output, maxLength = 30000) {
67
+ if (output.length <= maxLength) {
68
+ return output;
69
+ }
70
+ return `${output.slice(0, maxLength)}\n\n... (output truncated, ${output.length - maxLength} characters omitted)`;
71
+ }
72
+ export function makeBashTool(workingDirectory) {
73
+ const resolvedWd = path.resolve(workingDirectory ?? process.cwd());
74
+ const bash = tool(async ({ command, timeout, description }) => {
75
+ await ensureSandbox(resolvedWd);
76
+ // Validate timeout
77
+ const DEFAULT_TIMEOUT = 120000; // 2 minutes
78
+ const MAX_TIMEOUT = 600000; // 10 minutes
79
+ let timeoutMs = timeout ?? DEFAULT_TIMEOUT;
80
+ if (timeoutMs > MAX_TIMEOUT) {
81
+ throw new Error(`Timeout cannot exceed ${MAX_TIMEOUT}ms (10 minutes), got ${timeoutMs}ms`);
82
+ }
83
+ if (timeoutMs < 0) {
84
+ throw new Error(`Timeout must be positive, got ${timeoutMs}ms`);
85
+ }
86
+ // Execute the command
87
+ const startTime = Date.now();
88
+ const { stdout, stderr, code, timedOut } = await runSandboxed(command, resolvedWd, timeoutMs);
89
+ const executionTime = Date.now() - startTime;
90
+ // Convert buffers to strings
91
+ const stdoutStr = stdout.toString("utf8");
92
+ const stderrStr = stderr.toString("utf8");
93
+ // Truncate outputs if necessary
94
+ const truncatedStdout = truncateOutput(stdoutStr);
95
+ const truncatedStderr = truncateOutput(stderrStr);
96
+ // Build result message
97
+ let result = "";
98
+ if (timedOut) {
99
+ result += `Command timed out after ${timeoutMs}ms\n\n`;
100
+ }
101
+ if (truncatedStdout) {
102
+ result += truncatedStdout;
103
+ }
104
+ if (truncatedStderr) {
105
+ if (result)
106
+ result += "\n\n";
107
+ result += `STDERR:\n${truncatedStderr}`;
108
+ }
109
+ if (!truncatedStdout && !truncatedStderr) {
110
+ result = "(no output)";
111
+ }
112
+ // Add exit code info
113
+ result += `\n\nExit code: ${code}`;
114
+ if (description) {
115
+ result += `\nDescription: ${description}`;
116
+ }
117
+ result += `\nExecution time: ${executionTime}ms`;
118
+ return result;
119
+ }, {
120
+ name: "Bash",
121
+ description: 'Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.\n\nBefore executing the command, please follow these steps:\n\n1. Directory Verification:\n - If the command will create new directories or files, first use `ls` to verify the parent directory exists and is the correct location\n - For example, before running "mkdir foo/bar", first use `ls foo` to check that "foo" exists and is the intended parent directory\n\n2. Command Execution:\n - Always quote file paths that contain spaces with double quotes (e.g., cd "path with spaces/file.txt")\n - Examples of proper quoting:\n - cd "/Users/name/My Documents" (correct)\n - cd /Users/name/My Documents (incorrect - will fail)\n - python "/path/with spaces/script.py" (correct)\n - python /path/with spaces/script.py (incorrect - will fail)\n - After ensuring proper quoting, execute the command.\n - Capture the output of the command.\n\nUsage notes:\n - The command argument is required.\n - You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 120000ms (2 minutes).\n - It is very helpful if you write a clear, concise description of what this command does in 5-10 words.\n - If the output exceeds 30000 characters, output will be truncated before being returned to you.\n - You can use the `run_in_background` parameter to run the command in the background, which allows you to continue working while the command runs. You can monitor the output using the Bash tool as it becomes available. Never use `run_in_background` to run \'sleep\' as it will return immediately. You do not need to use \'&\' at the end of the command when using this parameter.\n - VERY IMPORTANT: You MUST avoid using search commands like `find` and `grep`. Instead use Grep, Glob, or Task to search. You MUST avoid read tools like `cat`, `head`, and `tail`, and use Read to read files.\n - If you _still_ need to run `grep`, STOP. ALWAYS USE ripgrep at `rg` first, which all Claude Code users have pre-installed.\n - When issuing multiple commands, use the \';\' or \'&&\' operator to separate them. DO NOT use newlines (newlines are ok in quoted strings).\n - Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of `cd`. You may use `cd` if the User explicitly requests it.\n <good-example>\n pytest /foo/bar/tests\n </good-example>\n <bad-example>\n cd /foo/bar && pytest tests\n </bad-example>\n\n# Committing changes with git\n\nWhen the user asks you to create a new git commit, follow these steps carefully:\n\n1. You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. ALWAYS run the following bash commands in parallel, each using the Bash tool:\n - Run a git status command to see all untracked files.\n - Run a git diff command to see both staged and unstaged changes that will be committed.\n - Run a git log command to see recent commit messages, so that you can follow this repository\'s commit message style.\n2. Analyze all staged changes (both previously staged and newly added) and draft a commit message:\n - Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.). Ensure the message accurately reflects the changes and their purpose (i.e. "add" means a wholly new feature, "update" means an enhancement to an existing feature, "fix" means a bug fix, etc.).\n - Check for any sensitive information that shouldn\'t be committed\n - Draft a concise (1-2 sentences) commit message that focuses on the "why" rather than the "what"\n - Ensure it accurately reflects the changes and their purpose\n3. You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. ALWAYS run the following commands in parallel:\n - Add relevant untracked files to the staging area.\n - Create the commit with a message ending with:\n 🤖 Generated with [Claude Code](https://claude.ai/code)\n\n Co-Authored-By: Claude <noreply@anthropic.com>\n - Run git status to make sure the commit succeeded.\n4. If the commit fails due to pre-commit hook changes, retry the commit ONCE to include these automated changes. If it fails again, it usually means a pre-commit hook is preventing the commit. If the commit succeeds but you notice that files were modified by the pre-commit hook, you MUST amend your commit to include them.\n\nImportant notes:\n- NEVER update the git config\n- NEVER run additional commands to read or explore code, besides git bash commands\n- NEVER use the TodoWrite or Task tools\n- DO NOT push to the remote repository unless the user explicitly asks you to do so\n- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.\n- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit\n- In order to ensure good formatting, ALWAYS pass the commit message via a HEREDOC, a la this example:\n<example>\ngit commit -m "$(cat <<\'EOF\'\n Commit message here.\n\n 🤖 Generated with [Claude Code](https://claude.ai/code)\n\n Co-Authored-By: Claude <noreply@anthropic.com>\n EOF\n )"\n</example>\n\n# Creating pull requests\nUse the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a Github URL use the gh command to get the information needed.\n\nIMPORTANT: When the user asks you to create a pull request, follow these steps carefully:\n\n1. You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. ALWAYS run the following bash commands in parallel using the Bash tool, in order to understand the current state of the branch since it diverged from the main branch:\n - Run a git status command to see all untracked files\n - Run a git diff command to see both staged and unstaged changes that will be committed\n - Check if the current branch tracks a remote branch and is up to date with the remote, so you know if you need to push to the remote\n - Run a git log command and `git diff [base-branch]...HEAD` to understand the full commit history for the current branch (from the time it diverged from the base branch)\n2. Analyze all changes that will be included in the pull request, making sure to look at all relevant commits (NOT just the latest commit, but ALL commits that will be included in the pull request!!!), and draft a pull request summary\n3. You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. ALWAYS run the following commands in parallel:\n - Create new branch if needed\n - Push to remote with -u flag if needed\n - Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.\n<example>\ngh pr create --title "the pr title" --body "$(cat <<\'EOF\'\n## Summary\n<1-3 bullet points>\n\n## Test plan\n[Checklist of TODOs for testing the pull request...]\n\n🤖 Generated with [Claude Code](https://claude.ai/code)\nEOF\n)"\n</example>\n\nImportant:\n- NEVER update the git config\n- DO NOT use the TodoWrite or Task tools\n- Return the PR URL when you\'re done, so the user can see it\n\n# Other common operations\n- View comments on a Github PR: gh api repos/foo/bar/pulls/123/comments',
122
+ schema: z.object({
123
+ command: z.string().describe("The command to execute"),
124
+ timeout: z
125
+ .number()
126
+ .optional()
127
+ .describe("Optional timeout in milliseconds (max 600000)"),
128
+ description: z
129
+ .string()
130
+ .optional()
131
+ .describe("Clear, concise description of what this command does in 5-10 words, in active voice. Examples:\nInput: ls\nOutput: List files in current directory\n\nInput: git status\nOutput: Show working tree status\n\nInput: npm install\nOutput: Install package dependencies\n\nInput: mkdir foo\nOutput: Create directory 'foo'"),
132
+ }),
133
+ });
134
+ return bash;
135
+ }
@@ -0,0 +1 @@
1
+ export declare function linkLocalPackages(projectPath: string): Promise<void>;
@@ -0,0 +1,54 @@
1
+ import { exists } from "node:fs/promises";
2
+ import { join } from "node:path";
3
+ import { $ } from "bun";
4
+ const PACKAGE_PATHS = {
5
+ "@townco/ui": "packages/ui",
6
+ "@townco/core": "packages/core",
7
+ "@townco/tsconfig": "packages/tsconfig",
8
+ "@townco/tui-template": "apps/tui",
9
+ "@townco/gui-template": "apps/gui",
10
+ "@townco/secret": "packages/secret",
11
+ "@townco/agent": "packages/agent",
12
+ "@townco/cli": "apps/cli",
13
+ };
14
+ async function getMonorepoRoot() {
15
+ try {
16
+ // 1. Get git repo root
17
+ const result = await $ `git rev-parse --show-toplevel`.quiet();
18
+ const repoRoot = result.text().trim();
19
+ // 2. Check package.json name === "town"
20
+ const pkgJsonPath = join(repoRoot, "package.json");
21
+ const pkgJson = await Bun.file(pkgJsonPath).json();
22
+ if (pkgJson.name !== "town")
23
+ return null;
24
+ // 3. Check packages/agent and packages/ui exist
25
+ const agentExists = await exists(join(repoRoot, "packages/agent"));
26
+ const uiExists = await exists(join(repoRoot, "packages/ui"));
27
+ if (!agentExists || !uiExists)
28
+ return null;
29
+ return repoRoot;
30
+ }
31
+ catch {
32
+ return null;
33
+ }
34
+ }
35
+ export async function linkLocalPackages(projectPath) {
36
+ const repoRoot = await getMonorepoRoot();
37
+ if (!repoRoot)
38
+ return; // Not in monorepo, no-op
39
+ console.log("Detected town monorepo, linking local packages...");
40
+ // 1. Register each local package globally
41
+ for (const [, localPath] of Object.entries(PACKAGE_PATHS)) {
42
+ const pkgPath = join(repoRoot, localPath);
43
+ await $ `bun link`.cwd(pkgPath).quiet();
44
+ }
45
+ // 2. Parse project's package.json for @townco/* deps
46
+ const pkgJson = await Bun.file(join(projectPath, "package.json")).json();
47
+ const deps = { ...pkgJson.dependencies, ...pkgJson.devDependencies };
48
+ const towncoPackages = Object.keys(deps).filter((name) => name.startsWith("@townco/"));
49
+ // 3. Link each package in the project
50
+ for (const pkgName of towncoPackages) {
51
+ await $ `bun link ${pkgName}`.cwd(projectPath);
52
+ console.log(`Linked ${pkgName}`);
53
+ }
54
+ }
@@ -21,6 +21,7 @@ function generateProjectPackageJson() {
21
21
  "@radix-ui/react-select": "^2.2.6",
22
22
  "@radix-ui/react-slot": "^1.2.4",
23
23
  "@radix-ui/react-tabs": "^1.1.13",
24
+ "@townco/core": "~0.0.23",
24
25
  "@townco/ui": "^0.1.0",
25
26
  "@townco/agent": "^0.1.20",
26
27
  "class-variance-authority": "^0.7.1",
@@ -1,7 +1,8 @@
1
1
  /**
2
2
  * OpenTelemetry provider setup for @townco/agent
3
- * Initializes the trace provider, exporter, and propagator
3
+ * Initializes the trace provider, log provider, exporters, and propagator
4
4
  */
5
+ import { LoggerProvider } from "@opentelemetry/sdk-logs";
5
6
  import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
6
7
  export interface TelemetrySetupOptions {
7
8
  serviceName?: string;
@@ -14,6 +15,7 @@ export interface TelemetrySetupOptions {
14
15
  */
15
16
  export declare function initializeOpenTelemetry(options?: TelemetrySetupOptions): {
16
17
  provider: NodeTracerProvider;
18
+ loggerProvider: LoggerProvider;
17
19
  shutdown: () => Promise<void>;
18
20
  };
19
21
  /**
@@ -1,11 +1,14 @@
1
1
  /**
2
2
  * OpenTelemetry provider setup for @townco/agent
3
- * Initializes the trace provider, exporter, and propagator
3
+ * Initializes the trace provider, log provider, exporters, and propagator
4
4
  */
5
5
  import { propagation } from "@opentelemetry/api";
6
+ import { logs } from "@opentelemetry/api-logs";
6
7
  import { W3CTraceContextPropagator } from "@opentelemetry/core";
8
+ import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http";
7
9
  import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
8
10
  import { Resource } from "@opentelemetry/resources";
11
+ import { BatchLogRecordProcessor, LoggerProvider, } from "@opentelemetry/sdk-logs";
9
12
  import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
10
13
  import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
11
14
  import { ATTR_SERVICE_NAME } from "@opentelemetry/semantic-conventions";
@@ -83,13 +86,38 @@ export function initializeOpenTelemetry(options = {}) {
83
86
  const batchProcessor = new BatchSpanProcessor(loggingExporter, {
84
87
  maxQueueSize: 100,
85
88
  maxExportBatchSize: 10,
86
- scheduledDelayMillis: 5000, // Export every 5 seconds (default)
89
+ scheduledDelayMillis: 2000, // Export every 2 seconds
87
90
  });
88
91
  provider.addSpanProcessor(batchProcessor);
89
92
  // Register the provider globally
90
93
  provider.register();
91
94
  // Configure W3C Trace Context propagator for cross-process traces
92
95
  propagation.setGlobalPropagator(new W3CTraceContextPropagator());
96
+ // Set up LoggerProvider for OTLP log export
97
+ const logUrl = otlpEndpoint.endsWith("/")
98
+ ? `${otlpEndpoint}v1/logs`
99
+ : `${otlpEndpoint}/v1/logs`;
100
+ if (debug) {
101
+ console.log(`OTLP log URL: ${logUrl}`);
102
+ }
103
+ const logExporter = new OTLPLogExporter({
104
+ url: logUrl,
105
+ });
106
+ const loggerProvider = new LoggerProvider({
107
+ resource: new Resource({
108
+ [ATTR_SERVICE_NAME]: serviceName,
109
+ }),
110
+ });
111
+ loggerProvider.addLogRecordProcessor(new BatchLogRecordProcessor(logExporter, {
112
+ maxQueueSize: 100,
113
+ maxExportBatchSize: 10,
114
+ scheduledDelayMillis: 2000,
115
+ }));
116
+ // Register the logger provider globally
117
+ logs.setGlobalLoggerProvider(loggerProvider);
118
+ if (debug) {
119
+ console.log("✓ Log exporter configured");
120
+ }
93
121
  // Now configure our telemetry wrapper
94
122
  configureTelemetry({
95
123
  enabled: true,
@@ -106,6 +134,8 @@ export function initializeOpenTelemetry(options = {}) {
106
134
  try {
107
135
  await provider.forceFlush();
108
136
  await provider.shutdown();
137
+ await loggerProvider.forceFlush();
138
+ await loggerProvider.shutdown();
109
139
  if (debug) {
110
140
  console.log("✓ Telemetry flushed");
111
141
  }
@@ -114,7 +144,7 @@ export function initializeOpenTelemetry(options = {}) {
114
144
  console.error("Error flushing telemetry:", error);
115
145
  }
116
146
  };
117
- return { provider, shutdown };
147
+ return { provider, loggerProvider, shutdown };
118
148
  }
119
149
  /**
120
150
  * Initialize OpenTelemetry from environment variables and register shutdown handlers
@@ -23,6 +23,13 @@ export interface TemplateVars {
23
23
  threshold: number;
24
24
  } | undefined;
25
25
  callback: string;
26
+ } | {
27
+ type: "tool_response";
28
+ setting?: {
29
+ maxContextThreshold?: number | undefined;
30
+ responseTruncationThreshold?: number | undefined;
31
+ } | undefined;
32
+ callback: string;
26
33
  }> | undefined;
27
34
  }
28
35
  export declare function getTemplateVars(name: string, definition: AgentDefinition): TemplateVars;
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Simple test script to verify OpenTelemetry is working
3
+ * Run with: ENABLE_TELEMETRY=true DEBUG_TELEMETRY=true bun test-telemetry.ts
4
+ */
5
+ export {};