@townco/agent 0.1.84 → 0.1.87

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/acp-server/adapter.d.ts +0 -8
  2. package/dist/definition/index.d.ts +0 -2
  3. package/dist/definition/index.js +0 -1
  4. package/dist/runner/agent-runner.d.ts +1 -2
  5. package/dist/runner/hooks/executor.d.ts +4 -8
  6. package/dist/runner/hooks/executor.js +9 -1
  7. package/dist/runner/hooks/predefined/document-context-extractor/chunk-manager.d.ts +37 -0
  8. package/dist/runner/hooks/predefined/document-context-extractor/chunk-manager.js +134 -0
  9. package/dist/runner/hooks/predefined/document-context-extractor/content-extractor.d.ts +20 -0
  10. package/dist/runner/hooks/predefined/document-context-extractor/content-extractor.js +171 -0
  11. package/dist/runner/hooks/predefined/document-context-extractor/extraction-state.d.ts +57 -0
  12. package/dist/runner/hooks/predefined/document-context-extractor/extraction-state.js +126 -0
  13. package/dist/runner/hooks/predefined/document-context-extractor/index.d.ts +22 -0
  14. package/dist/runner/hooks/predefined/document-context-extractor/index.js +338 -0
  15. package/dist/runner/hooks/predefined/document-context-extractor/relevance-scorer.d.ts +19 -0
  16. package/dist/runner/hooks/predefined/document-context-extractor/relevance-scorer.js +156 -0
  17. package/dist/runner/hooks/predefined/document-context-extractor/types.d.ts +130 -0
  18. package/dist/runner/hooks/predefined/document-context-extractor/types.js +8 -0
  19. package/dist/runner/hooks/predefined/tool-response-compactor.js +77 -212
  20. package/dist/runner/hooks/types.d.ts +15 -8
  21. package/dist/runner/index.d.ts +51 -2
  22. package/dist/runner/langchain/index.js +27 -0
  23. package/dist/runner/langchain/tools/artifacts.d.ts +6 -6
  24. package/dist/runner/langchain/tools/artifacts.js +98 -93
  25. package/dist/runner/langchain/tools/browser.d.ts +9 -9
  26. package/dist/runner/langchain/tools/document_extract.d.ts +26 -0
  27. package/dist/runner/langchain/tools/document_extract.js +135 -0
  28. package/dist/runner/langchain/tools/filesystem.d.ts +3 -3
  29. package/dist/runner/langchain/tools/generate_image.d.ts +8 -8
  30. package/dist/runner/langchain/tools/todo.d.ts +10 -10
  31. package/dist/runner/tools.d.ts +2 -2
  32. package/dist/runner/tools.js +1 -0
  33. package/dist/scaffold/project-scaffold.js +4 -4
  34. package/dist/templates/index.d.ts +0 -1
  35. package/dist/tsconfig.tsbuildinfo +1 -1
  36. package/package.json +11 -13
  37. package/templates/index.ts +0 -1
@@ -2,6 +2,7 @@ import Anthropic from "@anthropic-ai/sdk";
2
2
  import { createLogger } from "../../../logger.js";
3
3
  import { telemetry } from "../../../telemetry/index.js";
4
4
  import { countToolResultTokens } from "../../../utils/token-counter.js";
5
+ import { extractDocumentContext } from "./document-context-extractor/index.js";
5
6
  const logger = createLogger("tool-response-compactor");
6
7
  // Create Anthropic client directly (not using LangChain)
7
8
  // This ensures compaction LLM calls don't get captured by LangGraph's streaming
@@ -32,7 +33,6 @@ export const toolResponseCompactor = async (ctx) => {
32
33
  // Get settings from hook configuration
33
34
  const settings = ctx.session.requestParams.hookSettings;
34
35
  const maxTokensSize = settings?.maxTokensSize ?? 20000; // Default: 20000 tokens
35
- const responseTruncationThreshold = settings?.responseTruncationThreshold ?? 30;
36
36
  // Use maxTokensSize directly as it's now in tokens
37
37
  const maxAllowedResponseSize = maxTokensSize;
38
38
  // Calculate available space in context
@@ -41,7 +41,9 @@ export const toolResponseCompactor = async (ctx) => {
41
41
  const effectiveMaxResponseSize = availableSpace < maxAllowedResponseSize
42
42
  ? Math.floor(availableSpace * 0.9)
43
43
  : maxAllowedResponseSize;
44
- const compactionLimit = COMPACTION_MODEL_CONTEXT * (responseTruncationThreshold / 100);
44
+ // Calculate compaction limit: max response size that can fit in a single LLM compaction call
45
+ const COMPACTION_OVERHEAD = 10000;
46
+ const compactionLimit = Math.floor((COMPACTION_MODEL_CONTEXT - COMPACTION_OVERHEAD) * 0.9); // ~175K tokens
45
47
  logger.info("Tool response compaction hook triggered", {
46
48
  toolCallId,
47
49
  toolName,
@@ -66,14 +68,12 @@ export const toolResponseCompactor = async (ctx) => {
66
68
  },
67
69
  };
68
70
  }
69
- // Response would exceed threshold, need to compact or truncate
70
- // Determine target size: use effectiveMaxResponseSize, but cap at compactionLimit for truncation
71
+ // Response would exceed threshold, need to compact or extract
72
+ // Target size is the effectiveMaxResponseSize (what we want the final output to be)
71
73
  // IMPORTANT: If context is already very full, availableSpace might be very small
72
74
  // In that case, use a minimum reasonable target size (e.g., 10% of the output or 1000 tokens)
73
75
  const minTargetSize = Math.max(Math.floor(outputTokens * 0.1), 1000);
74
- const targetSize = effectiveMaxResponseSize > 0
75
- ? Math.min(effectiveMaxResponseSize, compactionLimit)
76
- : minTargetSize;
76
+ const targetSize = effectiveMaxResponseSize > 0 ? effectiveMaxResponseSize : minTargetSize;
77
77
  logger.info("Calculated target size for compaction", {
78
78
  availableSpace,
79
79
  effectiveMaxResponseSize,
@@ -82,73 +82,79 @@ export const toolResponseCompactor = async (ctx) => {
82
82
  targetSize,
83
83
  contextAlreadyOverThreshold: availableSpace <= maxAllowedResponseSize,
84
84
  });
85
- // Case 2: Huge response, must truncate (too large for LLM compaction)
85
+ // Case 2: Huge response - use document context extractor (with truncation fallback)
86
86
  if (outputTokens >= compactionLimit) {
87
- logger.warn("Tool response exceeds compaction capacity, truncating", {
87
+ logger.info("Tool response exceeds compaction capacity, using document context extractor", {
88
88
  outputTokens,
89
89
  compactionLimit,
90
90
  targetSize,
91
91
  availableSpace,
92
92
  });
93
- const truncated = truncateToolResponse(rawOutput, targetSize);
94
- const finalTokens = countToolResultTokens(truncated);
95
- // Verify truncation stayed within boundaries
96
- if (finalTokens > targetSize) {
97
- logger.error("Truncation exceeded target size - this should not happen!", {
98
- finalTokens,
99
- targetSize,
100
- excess: finalTokens - targetSize,
101
- });
102
- // Try more aggressive truncation (70% of target as emergency measure)
103
- const emergencySize = Math.floor(targetSize * 0.7);
104
- const emergencyTruncated = truncateToolResponse(rawOutput, emergencySize);
105
- const emergencyTokens = countToolResultTokens(emergencyTruncated);
106
- // Final safety check - if emergency truncation STILL exceeded target, use ultra-conservative fallback
107
- if (emergencyTokens > targetSize) {
108
- logger.error("Emergency truncation STILL exceeded target - using ultra-conservative fallback", {
109
- emergencyTokens,
110
- targetSize,
111
- emergencySize,
93
+ // Build conversation context for extraction
94
+ const recentMessages = ctx.session.messages.slice(-5);
95
+ const conversationContext = recentMessages
96
+ .map((msg) => {
97
+ const text = msg.content
98
+ .filter((b) => b.type === "text")
99
+ .map((b) => (b.type === "text" ? b.text : ""))
100
+ .join("\n");
101
+ return `${msg.role}: ${text}`;
102
+ })
103
+ .join("\n\n");
104
+ // Try document context extraction
105
+ try {
106
+ const extractionResult = await extractDocumentContext(rawOutput, toolName, toolCallId, toolInput, conversationContext, targetSize, ctx.sessionId ?? "unknown", ctx.storage);
107
+ if (extractionResult.success && extractionResult.extractedData) {
108
+ logger.info("Document context extraction succeeded", {
109
+ originalTokens: outputTokens,
110
+ finalTokens: extractionResult.extractedTokens,
111
+ chunksProcessed: extractionResult.metadata.chunksProcessed,
112
+ chunksExtractedFrom: extractionResult.metadata.chunksExtractedFrom,
112
113
  });
113
- // Ultra-conservative: just return a simple error structure with the raw data sliced to 50% of target
114
- const ultraConservativeSize = Math.floor(targetSize * 0.5);
115
114
  return {
116
115
  newContextEntry: null,
117
116
  metadata: {
118
- action: "truncated",
117
+ action: "compacted",
119
118
  originalTokens: outputTokens,
120
- finalTokens: ultraConservativeSize, // Conservative estimate
121
- modifiedOutput: {
122
- _truncation_error: "Tool response was too large and could not be reliably truncated",
123
- _original_token_count: outputTokens,
124
- _target_token_count: targetSize,
125
- _partial_data: JSON.stringify(rawOutput).slice(0, ultraConservativeSize * 3),
126
- },
127
- truncationWarning: `Tool response was severely truncated from ${outputTokens.toLocaleString()} to ~${ultraConservativeSize.toLocaleString()} tokens (emergency truncation failed - data may be incomplete)`,
119
+ finalTokens: extractionResult.extractedTokens,
120
+ tokensSaved: outputTokens - (extractionResult.extractedTokens ?? 0),
121
+ modifiedOutput: extractionResult.extractedData,
122
+ compactionMethod: "document_context_extraction",
123
+ extractionMetadata: extractionResult.metadata,
128
124
  },
129
125
  };
130
126
  }
131
- return {
132
- newContextEntry: null,
133
- metadata: {
134
- action: "truncated",
135
- originalTokens: outputTokens,
136
- finalTokens: emergencyTokens,
137
- modifiedOutput: emergencyTruncated,
138
- truncationWarning: `Tool response was aggressively truncated from ${outputTokens.toLocaleString()} to ${emergencyTokens.toLocaleString()} tokens to fit within context limit (emergency truncation)`,
139
- },
140
- };
127
+ // Extraction failed - throw error to terminate agent loop
128
+ logger.error("Document context extraction failed", {
129
+ error: extractionResult.error,
130
+ phase: extractionResult.metadata.phase,
131
+ toolName,
132
+ toolCallId,
133
+ outputTokens,
134
+ });
135
+ throw new Error(`Document context extraction failed for tool "${toolName}": ${extractionResult.error}. ` +
136
+ `Original response was ${outputTokens.toLocaleString()} tokens. ` +
137
+ `Full response saved to artifacts.`);
138
+ }
139
+ catch (extractionError) {
140
+ // Re-throw if it's already our error
141
+ if (extractionError instanceof Error &&
142
+ extractionError.message.includes("Document context extraction failed")) {
143
+ throw extractionError;
144
+ }
145
+ // Extraction threw an unexpected error - terminate agent loop
146
+ logger.error("Document context extraction threw an error", {
147
+ error: extractionError instanceof Error
148
+ ? extractionError.message
149
+ : String(extractionError),
150
+ toolName,
151
+ toolCallId,
152
+ outputTokens,
153
+ });
154
+ throw new Error(`Document context extraction failed for tool "${toolName}": ${extractionError instanceof Error
155
+ ? extractionError.message
156
+ : String(extractionError)}. Original response was ${outputTokens.toLocaleString()} tokens.`);
141
157
  }
142
- return {
143
- newContextEntry: null,
144
- metadata: {
145
- action: "truncated",
146
- originalTokens: outputTokens,
147
- finalTokens,
148
- modifiedOutput: truncated,
149
- truncationWarning: `Tool response was truncated from ${outputTokens.toLocaleString()} to ${finalTokens.toLocaleString()} tokens to fit within max response size limit (max allowed: ${effectiveMaxResponseSize.toLocaleString()} tokens)`,
150
- },
151
- };
152
158
  }
153
159
  // Case 1: Medium response, intelligent compaction
154
160
  logger.info("Tool response requires intelligent compaction", {
@@ -171,28 +177,19 @@ export const toolResponseCompactor = async (ctx) => {
171
177
  })
172
178
  .join("\n\n");
173
179
  const compacted = await compactWithLLM(rawOutput, toolName, toolInput, conversationContext, targetSize);
174
- let finalTokens = countToolResultTokens(compacted);
180
+ const finalTokens = countToolResultTokens(compacted);
175
181
  // Verify compaction stayed within boundaries
176
182
  if (finalTokens > targetSize) {
177
- logger.warn("LLM compaction exceeded target, falling back to truncation", {
183
+ logger.error("LLM compaction exceeded target", {
178
184
  finalTokens,
179
185
  targetSize,
180
186
  excess: finalTokens - targetSize,
187
+ toolName,
188
+ toolCallId,
181
189
  });
182
- // Fallback to truncation
183
- const truncated = truncateToolResponse(compacted, targetSize);
184
- finalTokens = countToolResultTokens(truncated);
185
- return {
186
- newContextEntry: null,
187
- metadata: {
188
- action: "compacted_then_truncated",
189
- originalTokens: outputTokens,
190
- finalTokens,
191
- tokensSaved: outputTokens - finalTokens,
192
- modifiedOutput: truncated,
193
- truncationWarning: `Tool response was compacted then truncated from ${outputTokens.toLocaleString()} to ${finalTokens.toLocaleString()} tokens to fit within context limit`,
194
- },
195
- };
190
+ throw new Error(`LLM compaction for tool "${toolName}" exceeded target size. ` +
191
+ `Compacted to ${finalTokens.toLocaleString()} tokens but target was ${targetSize.toLocaleString()}. ` +
192
+ `Original response was ${outputTokens.toLocaleString()} tokens.`);
196
193
  }
197
194
  logger.info("Successfully compacted tool response", {
198
195
  originalTokens: outputTokens,
@@ -212,62 +209,13 @@ export const toolResponseCompactor = async (ctx) => {
212
209
  };
213
210
  }
214
211
  catch (error) {
215
- logger.error("Compaction failed, falling back to truncation", {
212
+ logger.error("Compaction failed", {
216
213
  error: error instanceof Error ? error.message : String(error),
214
+ toolName,
215
+ toolCallId,
216
+ outputTokens,
217
217
  });
218
- // Fallback to truncation with the same target size
219
- const truncated = truncateToolResponse(rawOutput, targetSize);
220
- let finalTokens = countToolResultTokens(truncated);
221
- // Verify truncation stayed within boundaries
222
- if (finalTokens > targetSize) {
223
- logger.error("Fallback truncation exceeded target, using emergency truncation", {
224
- finalTokens,
225
- targetSize,
226
- });
227
- const emergencySize = Math.floor(targetSize * 0.7);
228
- const emergencyTruncated = truncateToolResponse(rawOutput, emergencySize);
229
- finalTokens = countToolResultTokens(emergencyTruncated);
230
- // Final safety check
231
- if (finalTokens > targetSize) {
232
- logger.error("Emergency truncation STILL exceeded target - using ultra-conservative fallback");
233
- const ultraConservativeSize = Math.floor(targetSize * 0.5);
234
- return {
235
- newContextEntry: null,
236
- metadata: {
237
- action: "truncated",
238
- originalTokens: outputTokens,
239
- finalTokens: ultraConservativeSize,
240
- modifiedOutput: {
241
- _truncation_error: "Tool response was too large and could not be reliably truncated (compaction failed)",
242
- _original_token_count: outputTokens,
243
- _target_token_count: targetSize,
244
- _partial_data: JSON.stringify(rawOutput).slice(0, ultraConservativeSize * 3),
245
- },
246
- truncationWarning: `Tool response was severely truncated from ${outputTokens.toLocaleString()} to ~${ultraConservativeSize.toLocaleString()} tokens (compaction+emergency truncation failed)`,
247
- },
248
- };
249
- }
250
- return {
251
- newContextEntry: null,
252
- metadata: {
253
- action: "truncated",
254
- originalTokens: outputTokens,
255
- finalTokens,
256
- modifiedOutput: emergencyTruncated,
257
- truncationWarning: `Tool response was truncated from ${outputTokens.toLocaleString()} to ${finalTokens.toLocaleString()} tokens (compaction failed, emergency truncation applied)`,
258
- },
259
- };
260
- }
261
- return {
262
- newContextEntry: null,
263
- metadata: {
264
- action: "truncated",
265
- originalTokens: outputTokens,
266
- finalTokens,
267
- modifiedOutput: truncated,
268
- truncationWarning: `Tool response was truncated from ${outputTokens.toLocaleString()} to ${finalTokens.toLocaleString()} tokens (compaction failed)`,
269
- },
270
- };
218
+ throw new Error(`LLM compaction failed for tool "${toolName}": ${error instanceof Error ? error.message : String(error)}. Original response was ${outputTokens.toLocaleString()} tokens.`);
271
219
  }
272
220
  };
273
221
  /**
@@ -456,86 +404,3 @@ Return ONLY valid JSON (no explanation text).`;
456
404
  });
457
405
  return currentData;
458
406
  }
459
- /**
460
- * Truncate tool response to target token count
461
- * Uses iterative approach to ensure we stay under the target
462
- */
463
- function truncateToolResponse(rawOutput, targetTokens) {
464
- const currentTokens = countToolResultTokens(rawOutput);
465
- if (currentTokens <= targetTokens) {
466
- return rawOutput; // Already within limit
467
- }
468
- const outputString = JSON.stringify(rawOutput);
469
- // Start with 70% of target to leave significant room for closing braces and metadata
470
- let ratio = 0.7;
471
- let lastResult = null;
472
- // Iteratively truncate until we meet the target
473
- for (let attempt = 0; attempt < 15; attempt++) {
474
- // Calculate character limit based on ratio
475
- const targetChars = Math.floor((targetTokens * ratio * outputString.length) / currentTokens);
476
- // Truncate the JSON string
477
- let truncated = outputString.slice(0, targetChars);
478
- // Try to close any open JSON structures
479
- const openBraces = (truncated.match(/{/g) || []).length;
480
- const closeBraces = (truncated.match(/}/g) || []).length;
481
- const openBrackets = (truncated.match(/\[/g) || []).length;
482
- const closeBrackets = (truncated.match(/\]/g) || []).length;
483
- truncated += "}".repeat(Math.max(0, openBraces - closeBraces));
484
- truncated += "]".repeat(Math.max(0, openBrackets - closeBrackets));
485
- try {
486
- // Try to parse as valid JSON
487
- const parsed = JSON.parse(truncated);
488
- const parsedTokens = countToolResultTokens(parsed);
489
- // Store the result
490
- lastResult = { parsed, tokens: parsedTokens };
491
- if (parsedTokens <= targetTokens) {
492
- // Success! Add truncation notice
493
- return {
494
- ...parsed,
495
- _truncation_notice: "... [TRUNCATED - response exceeded size limit]",
496
- _original_token_count: currentTokens,
497
- _truncated_token_count: parsedTokens,
498
- };
499
- }
500
- // Still too large - calculate how much we need to reduce
501
- // If we overshot, reduce ratio proportionally to how much we exceeded
502
- const overshootRatio = parsedTokens / targetTokens; // e.g., 1.03 if we're 3% over
503
- ratio = (ratio / overshootRatio) * 0.95; // Reduce by overshoot amount plus 5% safety margin
504
- logger.debug("Truncation attempt resulted in overshoot, retrying", {
505
- attempt,
506
- targetTokens,
507
- parsedTokens,
508
- overshootRatio,
509
- newRatio: ratio,
510
- });
511
- }
512
- catch {
513
- // JSON parse failed, try more aggressive truncation
514
- ratio *= 0.85;
515
- }
516
- }
517
- // If we exhausted all attempts, return the last successful parse (if any)
518
- // or a very conservative fallback
519
- if (lastResult && lastResult.tokens <= targetTokens * 1.1) {
520
- // Within 10% of target - good enough
521
- logger.warn("Truncation reached attempt limit but result is close enough", {
522
- targetTokens,
523
- actualTokens: lastResult.tokens,
524
- });
525
- return {
526
- ...lastResult.parsed,
527
- _truncation_notice: "... [TRUNCATED - response exceeded size limit]",
528
- _original_token_count: currentTokens,
529
- _truncated_token_count: lastResult.tokens,
530
- };
531
- }
532
- // If all attempts failed, return a simple truncated structure
533
- const safeChars = Math.floor(targetTokens * 3); // Very conservative
534
- return {
535
- truncated: true,
536
- originalSize: currentTokens,
537
- targetSize: targetTokens,
538
- content: outputString.slice(0, safeChars),
539
- warning: "Response was truncated due to size constraints (JSON parsing failed)",
540
- };
541
- }
@@ -1,5 +1,11 @@
1
1
  import type { ContextEntry } from "../../acp-server/session-storage";
2
2
  import type { SessionMessage } from "../agent-runner";
3
+ /**
4
+ * Storage interface for hooks that need to persist data
5
+ */
6
+ export interface HookStorageInterface {
7
+ getArtifactsDir(sessionId: string): string;
8
+ }
3
9
  /**
4
10
  * Hook types supported by the agent system
5
11
  */
@@ -19,17 +25,10 @@ export interface ContextSizeSettings {
19
25
  export interface ToolResponseSettings {
20
26
  /**
21
27
  * Maximum size of a tool response in tokens.
22
- * Tool responses larger than this will trigger compaction.
28
+ * Tool responses larger than this will trigger compaction/extraction.
23
29
  * Default: 20000
24
30
  */
25
31
  maxTokensSize?: number | undefined;
26
- /**
27
- * Maximum % of compaction model context (Haiku: 200k) that a tool response can be
28
- * to attempt LLM-based compaction. Larger responses are truncated instead.
29
- * The truncation limit is also this percentage.
30
- * Default: 30
31
- */
32
- responseTruncationThreshold?: number | undefined;
33
32
  }
34
33
  /**
35
34
  * Hook configuration in agent definition
@@ -90,6 +89,14 @@ export interface HookContext {
90
89
  * The model being used
91
90
  */
92
91
  model: string;
92
+ /**
93
+ * Session ID for the current session
94
+ */
95
+ sessionId?: string | undefined;
96
+ /**
97
+ * Storage interface for hooks that need to persist data
98
+ */
99
+ storage?: HookStorageInterface | undefined;
93
100
  /**
94
101
  * Tool response data (only for tool_response hooks)
95
102
  */
@@ -1,4 +1,53 @@
1
- import type { AgentDefinition } from "../definition";
2
1
  import { type AgentRunner } from "./agent-runner";
3
2
  export type { AgentRunner };
4
- export declare const makeRunnerFromDefinition: (definition: AgentDefinition) => AgentRunner;
3
+ export declare const makeRunnerFromDefinition: (definition: {
4
+ displayName?: string | undefined;
5
+ version?: string | undefined;
6
+ description?: string | undefined;
7
+ suggestedPrompts?: string[] | undefined;
8
+ systemPrompt: string | null;
9
+ model: string;
10
+ tools?: (string | {
11
+ type: "custom";
12
+ modulePath: string;
13
+ } | {
14
+ type: "filesystem";
15
+ working_directory?: string | undefined;
16
+ } | {
17
+ type: "direct";
18
+ name: string;
19
+ description: string;
20
+ fn: import("zod/v4/core").$InferOuterFunctionType<import("zod/v4/core").$ZodFunctionArgs, import("zod/v4/core").$ZodFunctionOut>;
21
+ schema: any;
22
+ prettyName?: string | undefined;
23
+ icon?: string | undefined;
24
+ })[] | undefined;
25
+ mcps?: (string | {
26
+ name: string;
27
+ transport: "stdio";
28
+ command: string;
29
+ args?: string[] | undefined;
30
+ } | {
31
+ name: string;
32
+ transport: "http";
33
+ url: string;
34
+ headers?: Record<string, string> | undefined;
35
+ })[] | undefined;
36
+ harnessImplementation?: "langchain" | undefined;
37
+ hooks?: {
38
+ type: "context_size" | "tool_response";
39
+ setting?: {
40
+ threshold: number;
41
+ } | {
42
+ maxTokensSize?: number | undefined;
43
+ } | undefined;
44
+ callback: string;
45
+ }[] | undefined;
46
+ initialMessage?: {
47
+ enabled: boolean;
48
+ content: string;
49
+ } | undefined;
50
+ uiConfig?: {
51
+ hideTopBar?: boolean | undefined;
52
+ } | undefined;
53
+ }) => AgentRunner;
@@ -14,6 +14,7 @@ import { createModelFromString, detectProvider } from "./model-factory.js";
14
14
  import { makeOtelCallbacks } from "./otel-callbacks.js";
15
15
  import { makeArtifactsTools } from "./tools/artifacts";
16
16
  import { makeBrowserTools } from "./tools/browser";
17
+ import { makeDocumentExtractTool } from "./tools/document_extract";
17
18
  import { makeFilesystemTools } from "./tools/filesystem";
18
19
  import { makeGenerateImageTool, makeTownGenerateImageTool, } from "./tools/generate_image";
19
20
  import { SUBAGENT_TOOL_NAME } from "./tools/subagent";
@@ -42,6 +43,7 @@ export const TOOL_REGISTRY = {
42
43
  generate_image: () => makeGenerateImageTool(),
43
44
  town_generate_image: () => makeTownGenerateImageTool(),
44
45
  browser: () => makeBrowserTools(),
46
+ document_extract: () => makeDocumentExtractTool(),
45
47
  };
46
48
  // ============================================================================
47
49
  // Custom tool loading
@@ -530,6 +532,14 @@ export class LangchainAgent {
530
532
  if (hasTodoWrite) {
531
533
  agentConfig.systemPrompt = `${agentConfig.systemPrompt ?? ""}\n\n${TODO_WRITE_INSTRUCTIONS}`;
532
534
  }
535
+ // Process template variables in system prompt and inject current date/time
536
+ if (agentConfig.systemPrompt) {
537
+ const currentDateTime = getCurrentDateTimeString();
538
+ // Replace {{.CurrentDate}} template variable
539
+ agentConfig.systemPrompt = agentConfig.systemPrompt.replace(/\{\{\.CurrentDate\}\}/g, currentDateTime);
540
+ // Replace {{.CurrentDateTime}} template variable (alias)
541
+ agentConfig.systemPrompt = agentConfig.systemPrompt.replace(/\{\{\.CurrentDateTime\}\}/g, currentDateTime);
542
+ }
533
543
  const agent = createAgent(agentConfig);
534
544
  // Build messages from context history if available, otherwise use just the prompt
535
545
  // Type includes tool messages for sending tool results
@@ -1270,6 +1280,23 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
1270
1280
  [Assistant continues implementing the feature step by step, marking todos as in_progress and completed as they go]
1271
1281
  </example>
1272
1282
  `.trim();
1283
+ /**
1284
+ * Returns a human-readable string of the current date and time.
1285
+ * Example: "Monday, December 9, 2024 at 2:30 PM PST"
1286
+ */
1287
+ const getCurrentDateTimeString = () => {
1288
+ const now = new Date();
1289
+ const options = {
1290
+ weekday: "long",
1291
+ year: "numeric",
1292
+ month: "long",
1293
+ day: "numeric",
1294
+ hour: "numeric",
1295
+ minute: "2-digit",
1296
+ timeZoneName: "short",
1297
+ };
1298
+ return now.toLocaleString("en-US", options);
1299
+ };
1273
1300
  // Re-export subagent tool utility
1274
1301
  export { makeSubagentsTool } from "./tools/subagent.js";
1275
1302
  /**
@@ -19,19 +19,19 @@ export declare function makeArtifactsTools(): (import("langchain").DynamicStruct
19
19
  source: z.ZodString;
20
20
  destination: z.ZodString;
21
21
  direction: z.ZodEnum<{
22
- upload: "upload";
23
22
  download: "download";
23
+ upload: "upload";
24
24
  }>;
25
25
  }, z.core.$strip>, {
26
26
  session_id: string;
27
27
  source: string;
28
28
  destination: string;
29
- direction: "upload" | "download";
29
+ direction: "download" | "upload";
30
30
  }, {
31
+ session_id?: string | undefined;
31
32
  source: string;
32
33
  destination: string;
33
- direction: "upload" | "download";
34
- session_id?: string | undefined;
34
+ direction: "download" | "upload";
35
35
  }, string> | import("langchain").DynamicStructuredTool<z.ZodObject<{
36
36
  session_id: z.ZodOptional<z.ZodString>;
37
37
  path: z.ZodString;
@@ -39,8 +39,8 @@ export declare function makeArtifactsTools(): (import("langchain").DynamicStruct
39
39
  session_id: string;
40
40
  path: string;
41
41
  }, {
42
- path: string;
43
42
  session_id?: string | undefined;
43
+ path: string;
44
44
  }, string> | import("langchain").DynamicStructuredTool<z.ZodObject<{
45
45
  session_id: z.ZodOptional<z.ZodString>;
46
46
  path: z.ZodOptional<z.ZodString>;
@@ -62,7 +62,7 @@ export declare function makeArtifactsTools(): (import("langchain").DynamicStruct
62
62
  path: string;
63
63
  expires_in?: number;
64
64
  }, {
65
- path: string;
66
65
  session_id?: string | undefined;
66
+ path: string;
67
67
  expires_in?: number | undefined;
68
68
  }, string>)[];