@townco/agent 0.1.32 → 0.1.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,6 +17,7 @@ export declare class AgentAcpAdapter implements acp.Agent {
17
17
  private sessions;
18
18
  private agent;
19
19
  private storage;
20
+ private noSession;
20
21
  constructor(agent: AgentRunner, connection: acp.AgentSideConnection, agentDir?: string, agentName?: string);
21
22
  initialize(_params: acp.InitializeRequest): Promise<acp.InitializeResponse>;
22
23
  newSession(params: acp.NewSessionRequest): Promise<acp.NewSessionResponse>;
@@ -11,24 +11,29 @@ export class AgentAcpAdapter {
11
11
  sessions;
12
12
  agent;
13
13
  storage;
14
+ noSession;
14
15
  constructor(agent, connection, agentDir, agentName) {
15
16
  this.connection = connection;
16
17
  this.sessions = new Map();
17
18
  this.agent = agent;
19
+ this.noSession = process.env.TOWN_NO_SESSION === "true";
18
20
  this.storage =
19
- agentDir && agentName ? new SessionStorage(agentDir, agentName) : null;
21
+ agentDir && agentName && !this.noSession
22
+ ? new SessionStorage(agentDir, agentName)
23
+ : null;
20
24
  console.log("[adapter] Initialized with:", {
21
25
  agentDir,
22
26
  agentName,
27
+ noSession: this.noSession,
23
28
  hasStorage: this.storage !== null,
24
- sessionStoragePath: this.storage ? `${agentDir}/.sessions` : null
29
+ sessionStoragePath: this.storage ? `${agentDir}/.sessions` : null,
25
30
  });
26
31
  }
27
32
  async initialize(_params) {
28
33
  return {
29
34
  protocolVersion: acp.PROTOCOL_VERSION,
30
35
  agentCapabilities: {
31
- loadSession: this.storage !== null,
36
+ loadSession: !this.noSession && this.storage !== null,
32
37
  },
33
38
  };
34
39
  }
@@ -103,12 +108,15 @@ export class AgentAcpAdapter {
103
108
  .filter((p) => p.type === "text")
104
109
  .map((p) => p.text)
105
110
  .join("\n");
106
- const userMessage = {
107
- role: "user",
108
- content: userMessageContent,
109
- timestamp: new Date().toISOString(),
110
- };
111
- session.messages.push(userMessage);
111
+ // Only store messages if session persistence is enabled
112
+ if (!this.noSession) {
113
+ const userMessage = {
114
+ role: "user",
115
+ content: userMessageContent,
116
+ timestamp: new Date().toISOString(),
117
+ };
118
+ session.messages.push(userMessage);
119
+ }
112
120
  // Accumulate assistant response content
113
121
  let assistantContent = "";
114
122
  try {
@@ -116,7 +124,8 @@ export class AgentAcpAdapter {
116
124
  prompt: params.prompt,
117
125
  sessionId: params.sessionId,
118
126
  messageId,
119
- sessionMessages: session.messages, // Pass session history to agent
127
+ // Only pass session history if session persistence is enabled
128
+ sessionMessages: this.noSession ? [] : session.messages,
120
129
  };
121
130
  // Only add sessionMeta if it's defined
122
131
  if (session.requestParams._meta) {
@@ -157,7 +166,8 @@ export class AgentAcpAdapter {
157
166
  throw err;
158
167
  }
159
168
  // Store the complete assistant response in session messages
160
- if (assistantContent.length > 0) {
169
+ // Only store if session persistence is enabled
170
+ if (!this.noSession && assistantContent.length > 0) {
161
171
  const assistantMessage = {
162
172
  role: "assistant",
163
173
  content: assistantContent,
@@ -165,8 +175,8 @@ export class AgentAcpAdapter {
165
175
  };
166
176
  session.messages.push(assistantMessage);
167
177
  }
168
- // Save session to disk if storage is configured
169
- if (this.storage) {
178
+ // Save session to disk if storage is configured and session persistence is enabled
179
+ if (!this.noSession && this.storage) {
170
180
  try {
171
181
  await this.storage.saveSession(params.sessionId, session.messages);
172
182
  }
@@ -1,5 +1,5 @@
1
1
  import { existsSync, mkdirSync, readdirSync, readFileSync, unlinkSync, writeFileSync, } from "node:fs";
2
- import { dirname, join } from "node:path";
2
+ import { join } from "node:path";
3
3
  import { z } from "zod";
4
4
  /**
5
5
  * Zod schema for validating session files
@@ -4,6 +4,7 @@ import { AIMessageChunk, createAgent, ToolMessage, tool, } from "langchain";
4
4
  import { z } from "zod";
5
5
  import { SUBAGENT_MODE_KEY } from "../../acp-server/adapter";
6
6
  import { loadCustomToolModule, } from "../tool-loader.js";
7
+ import { createModelFromString } from "./model-factory.js";
7
8
  import { makeFilesystemTools } from "./tools/filesystem";
8
9
  import { TASK_TOOL_NAME } from "./tools/subagent";
9
10
  import { TODO_WRITE_TOOL_NAME, todoWrite } from "./tools/todo";
@@ -120,8 +121,14 @@ export class LangchainAgent {
120
121
  const finalTools = isSubagent
121
122
  ? enabledTools.filter((t) => t.name !== TODO_WRITE_TOOL_NAME && t.name !== TASK_TOOL_NAME)
122
123
  : enabledTools;
124
+ // Create the model instance using the factory
125
+ // This detects the provider from the model string:
126
+ // - "gemini-2.0-flash" → Google Generative AI
127
+ // - "vertex-gemini-2.0-flash" → Vertex AI (strips prefix)
128
+ // - "claude-sonnet-4-5-20250929" → Anthropic
129
+ const model = createModelFromString(this.definition.model);
123
130
  const agentConfig = {
124
- model: this.definition.model,
131
+ model,
125
132
  tools: finalTools,
126
133
  };
127
134
  // Inject system prompt with optional TodoWrite instructions
@@ -0,0 +1,20 @@
1
+ import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
2
+ /**
3
+ * Detects the provider from a model string and returns the appropriate
4
+ * LangChain chat model instance.
5
+ *
6
+ * Detection logic:
7
+ * - If model starts with "vertex-" → Google Vertex AI (strips prefix)
8
+ * - If model contains "gemini" (unprefixed) → Google Generative AI
9
+ * - If model contains "gpt" → OpenAI (future support)
10
+ * - Otherwise → Anthropic (default for backward compatibility)
11
+ *
12
+ * Supported formats:
13
+ * - Direct model name: "gemini-2.0-flash", "vertex-gemini-2.0-flash", "claude-sonnet-4-5-20250929"
14
+ * - Provider prefix: "google_vertexai:gemini-2.0-flash", "google_genai:gemini-2.0-flash", "anthropic:claude-3-5-sonnet"
15
+ */
16
+ export declare function createModelFromString(modelString: string): BaseChatModel;
17
+ /**
18
+ * Helper function to detect if a model string is for a specific provider
19
+ */
20
+ export declare function detectProvider(modelString: string): string;
@@ -0,0 +1,113 @@
1
+ import { ChatAnthropic } from "@langchain/anthropic";
2
+ import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
3
+ import { ChatVertexAI } from "@langchain/google-vertexai";
4
+ /**
5
+ * Detects the provider from a model string and returns the appropriate
6
+ * LangChain chat model instance.
7
+ *
8
+ * Detection logic:
9
+ * - If model starts with "vertex-" → Google Vertex AI (strips prefix)
10
+ * - If model contains "gemini" (unprefixed) → Google Generative AI
11
+ * - If model contains "gpt" → OpenAI (future support)
12
+ * - Otherwise → Anthropic (default for backward compatibility)
13
+ *
14
+ * Supported formats:
15
+ * - Direct model name: "gemini-2.0-flash", "vertex-gemini-2.0-flash", "claude-sonnet-4-5-20250929"
16
+ * - Provider prefix: "google_vertexai:gemini-2.0-flash", "google_genai:gemini-2.0-flash", "anthropic:claude-3-5-sonnet"
17
+ */
18
+ export function createModelFromString(modelString) {
19
+ // Check if the model string uses provider prefix format
20
+ const parts = modelString.split(":", 2);
21
+ const maybeProvider = parts[0];
22
+ const maybeModel = parts[1];
23
+ let provider = null;
24
+ let modelName = modelString;
25
+ // If there's a colon, treat the first part as the provider
26
+ if (maybeModel) {
27
+ provider = maybeProvider?.toLowerCase() ?? null;
28
+ modelName = maybeModel;
29
+ }
30
+ else {
31
+ // Auto-detect provider from model name
32
+ const lowerModel = modelString.toLowerCase();
33
+ // Check for vertex- prefix
34
+ if (lowerModel.startsWith("vertex-")) {
35
+ provider = "google_vertexai";
36
+ modelName = modelString.substring(7); // Strip "vertex-" prefix
37
+ }
38
+ else if (lowerModel.includes("gemini")) {
39
+ // Unprefixed gemini models use Google Generative AI
40
+ provider = "google_genai";
41
+ }
42
+ else if (lowerModel.includes("gpt")) {
43
+ provider = "openai";
44
+ }
45
+ else if (lowerModel.includes("claude")) {
46
+ provider = "anthropic";
47
+ }
48
+ else {
49
+ // Default to Anthropic for backward compatibility
50
+ provider = "anthropic";
51
+ }
52
+ }
53
+ // Create the appropriate model instance based on provider
54
+ switch (provider) {
55
+ case "google_vertexai":
56
+ case "vertex":
57
+ case "vertexai":
58
+ return new ChatVertexAI({
59
+ model: modelName,
60
+ // Default to reasonable settings
61
+ temperature: 0,
62
+ location: "global",
63
+ });
64
+ case "google_genai":
65
+ case "gemini":
66
+ return new ChatGoogleGenerativeAI({
67
+ model: modelName,
68
+ // Default to reasonable settings
69
+ temperature: 0,
70
+ });
71
+ case "openai":
72
+ throw new Error("OpenAI provider is not yet supported. Please install @langchain/openai and add implementation.");
73
+ case "anthropic":
74
+ case "claude":
75
+ return new ChatAnthropic({
76
+ model: modelName,
77
+ // Use default Anthropic settings
78
+ });
79
+ default:
80
+ // Fallback to Anthropic for unknown providers (backward compatibility)
81
+ console.warn(`Unknown provider "${provider}" in model string "${modelString}". Defaulting to Anthropic.`);
82
+ return new ChatAnthropic({
83
+ model: modelString,
84
+ });
85
+ }
86
+ }
87
+ /**
88
+ * Helper function to detect if a model string is for a specific provider
89
+ */
90
+ export function detectProvider(modelString) {
91
+ const parts = modelString.split(":", 2);
92
+ const maybeProvider = parts[0];
93
+ const maybeModel = parts[1];
94
+ if (maybeModel && maybeProvider) {
95
+ return maybeProvider.toLowerCase();
96
+ }
97
+ const lowerModel = modelString.toLowerCase();
98
+ // Check for vertex- prefix
99
+ if (lowerModel.startsWith("vertex-")) {
100
+ return "google_vertexai";
101
+ }
102
+ else if (lowerModel.includes("gemini")) {
103
+ // Unprefixed gemini models use Google Generative AI
104
+ return "google_genai";
105
+ }
106
+ else if (lowerModel.includes("gpt")) {
107
+ return "openai";
108
+ }
109
+ else if (lowerModel.includes("claude")) {
110
+ return "anthropic";
111
+ }
112
+ return "anthropic"; // default
113
+ }