@townco/agent 0.1.81 → 0.1.82

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -602,8 +602,7 @@ export class AgentAcpAdapter {
602
602
  const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, undefined, // No LLM-reported tokens yet
603
603
  this.currentToolOverheadTokens, // Include tool overhead
604
604
  this.currentMcpOverheadTokens, // Include MCP overhead
605
- getModelContextWindow(this.agent.definition.model), // Model context window for UI
606
- true);
605
+ getModelContextWindow(this.agent.definition.model));
607
606
  const contextSnapshot = createContextSnapshot(session.messages.length - 1, // Exclude the newly added user message (it will be passed separately via prompt)
608
607
  new Date().toISOString(), previousContext, context_size);
609
608
  session.context.push(contextSnapshot);
@@ -1215,13 +1214,10 @@ export class AgentAcpAdapter {
1215
1214
  }
1216
1215
  }
1217
1216
  // Calculate context size with LLM-reported tokens from this turn
1218
- // Exclude tool results - they're only sent during the turn they were received,
1219
- // not in subsequent turns (only messages are sent)
1220
1217
  const context_size = calculateContextSize(contextMessages, this.agent.definition.systemPrompt ?? undefined, turnTokenUsage.inputTokens, // Final LLM-reported tokens from this turn
1221
1218
  this.currentToolOverheadTokens, // Include tool overhead
1222
1219
  this.currentMcpOverheadTokens, // Include MCP overhead
1223
- getModelContextWindow(this.agent.definition.model), // Model context window for UI
1224
- true);
1220
+ getModelContextWindow(this.agent.definition.model));
1225
1221
  const contextSnapshot = createContextSnapshot(session.messages.length, new Date().toISOString(), previousContext, context_size);
1226
1222
  session.context.push(contextSnapshot);
1227
1223
  await this.saveSessionToDisk(params.sessionId, session);
@@ -24,14 +24,36 @@ export const compactionTool = async (ctx) => {
24
24
  });
25
25
  // Build the conversation history to compact
26
26
  const messagesToCompact = ctx.session.messages;
27
- // Convert session messages to text for context
27
+ // Convert session messages to text for context, including tool calls and results
28
28
  const conversationText = messagesToCompact
29
29
  .map((msg) => {
30
- const textContent = msg.content
31
- .filter((block) => block.type === "text")
32
- .map((block) => block.text)
33
- .join("\n");
34
- return `${msg.role.toUpperCase()}:\n${textContent}`;
30
+ const parts = [];
31
+ for (const block of msg.content) {
32
+ if (block.type === "text") {
33
+ parts.push(block.text);
34
+ }
35
+ else if (block.type === "tool_call") {
36
+ // Include tool call info
37
+ parts.push(`[Tool: ${block.title}]`);
38
+ if (block.rawInput) {
39
+ parts.push(`Input: ${JSON.stringify(block.rawInput, null, 2)}`);
40
+ }
41
+ if (block.rawOutput) {
42
+ // Summarize large outputs to avoid overwhelming the compaction LLM
43
+ const outputStr = JSON.stringify(block.rawOutput);
44
+ if (outputStr.length > 2000) {
45
+ parts.push(`Output: [Large output - ${outputStr.length} chars]`);
46
+ }
47
+ else {
48
+ parts.push(`Output: ${outputStr}`);
49
+ }
50
+ }
51
+ if (block.error) {
52
+ parts.push(`Error: ${block.error}`);
53
+ }
54
+ }
55
+ }
56
+ return `${msg.role.toUpperCase()}:\n${parts.join("\n")}`;
35
57
  })
36
58
  .join("\n\n---\n\n");
37
59
  // Create system prompt for compaction
@@ -472,6 +472,7 @@ export class LangchainAgent {
472
472
  }
473
473
  const agent = createAgent(agentConfig);
474
474
  // Build messages from context history if available, otherwise use just the prompt
475
+ // Type includes tool messages for sending tool results
475
476
  let messages;
476
477
  // Helper to convert content blocks to LangChain format
477
478
  // LangChain expects image_url type with data URL, not Claude's native image+source format
@@ -539,11 +540,62 @@ export class LangchainAgent {
539
540
  };
540
541
  if (req.contextMessages && req.contextMessages.length > 0) {
541
542
  // Use context messages (already resolved from context entries)
542
- // Convert to LangChain format
543
- messages = req.contextMessages.map((msg) => ({
544
- type: msg.role === "user" ? "human" : "ai",
545
- content: convertContentBlocks(msg.content),
546
- }));
543
+ // Convert to LangChain format, including tool calls and their results
544
+ messages = [];
545
+ for (const msg of req.contextMessages) {
546
+ if (msg.role === "user") {
547
+ messages.push({
548
+ type: "human",
549
+ content: convertContentBlocks(msg.content),
550
+ });
551
+ }
552
+ else if (msg.role === "assistant") {
553
+ // Check if message has tool calls
554
+ const toolCalls = msg.content.filter((block) => block.type === "tool_call");
555
+ const textBlocks = msg.content.filter((block) => block.type === "text" || block.type === "image");
556
+ if (toolCalls.length > 0) {
557
+ // Build AI message with tool_use blocks
558
+ const aiContent = [];
559
+ // Add any text content first
560
+ for (const block of textBlocks) {
561
+ if (block.type === "text") {
562
+ aiContent.push({ type: "text", text: block.text });
563
+ }
564
+ }
565
+ // Add tool_use blocks
566
+ for (const tc of toolCalls) {
567
+ if (tc.type === "tool_call") {
568
+ aiContent.push({
569
+ type: "tool_use",
570
+ id: tc.id,
571
+ name: tc.title,
572
+ input: tc.rawInput || {},
573
+ });
574
+ }
575
+ }
576
+ messages.push({ type: "ai", content: aiContent });
577
+ // Add tool result messages for each tool call that has output
578
+ for (const tc of toolCalls) {
579
+ if (tc.type === "tool_call" && tc.rawOutput) {
580
+ messages.push({
581
+ type: "tool",
582
+ tool_call_id: tc.id,
583
+ content: typeof tc.rawOutput === "string"
584
+ ? tc.rawOutput
585
+ : JSON.stringify(tc.rawOutput),
586
+ });
587
+ }
588
+ }
589
+ }
590
+ else {
591
+ // No tool calls - simple AI message
592
+ messages.push({
593
+ type: "ai",
594
+ content: convertContentBlocks(msg.content),
595
+ });
596
+ }
597
+ }
598
+ }
547
599
  // Add the current prompt as the final human message
548
600
  const promptContent = convertContentBlocks(req.prompt);
549
601
  messages.push({