@townco/agent 0.1.31 → 0.1.33
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/acp-server/adapter.d.ts +4 -1
- package/dist/acp-server/adapter.js +96 -4
- package/dist/acp-server/cli.d.ts +3 -1
- package/dist/acp-server/cli.js +9 -5
- package/dist/acp-server/http.d.ts +1 -1
- package/dist/acp-server/http.js +14 -3
- package/dist/acp-server/session-storage.d.ts +71 -0
- package/dist/acp-server/session-storage.js +155 -0
- package/dist/bin.js +0 -0
- package/dist/definition/mcp.d.ts +0 -0
- package/dist/definition/mcp.js +0 -0
- package/dist/definition/tools/todo.d.ts +49 -0
- package/dist/definition/tools/todo.js +80 -0
- package/dist/definition/tools/web_search.d.ts +4 -0
- package/dist/definition/tools/web_search.js +26 -0
- package/dist/dev-agent/index.d.ts +2 -0
- package/dist/dev-agent/index.js +18 -0
- package/dist/example.d.ts +2 -0
- package/dist/example.js +19 -0
- package/dist/index.js +5 -1
- package/dist/runner/agent-runner.d.ts +6 -0
- package/dist/runner/index.d.ts +3 -1
- package/dist/runner/index.js +18 -14
- package/dist/runner/langchain/index.js +41 -11
- package/dist/runner/langchain/model-factory.d.ts +20 -0
- package/dist/runner/langchain/model-factory.js +113 -0
- package/dist/templates/index.js +9 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/dist/utils/logger.d.ts +39 -0
- package/dist/utils/logger.js +175 -0
- package/index.ts +6 -1
- package/package.json +8 -6
- package/templates/index.ts +9 -1
package/dist/example.js
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
import { makeHttpTransport, makeStdioTransport } from "./acp-server/index.js";
|
|
3
|
+
|
|
4
|
+
const exampleAgent = {
|
|
5
|
+
model: "claude-sonnet-4-5-20250929",
|
|
6
|
+
systemPrompt: "You are a helpful assistant.",
|
|
7
|
+
tools: ["todo_write", "get_weather", "web_search"],
|
|
8
|
+
};
|
|
9
|
+
// Parse transport type from command line argument
|
|
10
|
+
const transport = process.argv[2] || "stdio";
|
|
11
|
+
if (transport === "http") {
|
|
12
|
+
makeHttpTransport(exampleAgent);
|
|
13
|
+
} else if (transport === "stdio") {
|
|
14
|
+
makeStdioTransport(exampleAgent);
|
|
15
|
+
} else {
|
|
16
|
+
console.error(`Invalid transport: ${transport}`);
|
|
17
|
+
console.error("Usage: bun run example.ts [stdio|http]");
|
|
18
|
+
process.exit(1);
|
|
19
|
+
}
|
package/dist/index.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { basename } from "node:path";
|
|
1
2
|
import { makeHttpTransport, makeStdioTransport } from "./acp-server";
|
|
2
3
|
import { makeSubagentsTool } from "./utils";
|
|
3
4
|
const exampleAgent = {
|
|
@@ -23,8 +24,11 @@ const exampleAgent = {
|
|
|
23
24
|
};
|
|
24
25
|
// Parse transport type from command line argument
|
|
25
26
|
const transport = process.argv[2] || "stdio";
|
|
27
|
+
// Get agent directory and name for session storage
|
|
28
|
+
const agentDir = process.cwd();
|
|
29
|
+
const agentName = basename(agentDir);
|
|
26
30
|
if (transport === "http") {
|
|
27
|
-
makeHttpTransport(exampleAgent);
|
|
31
|
+
makeHttpTransport(exampleAgent, agentDir, agentName);
|
|
28
32
|
}
|
|
29
33
|
else if (transport === "stdio") {
|
|
30
34
|
makeStdioTransport(exampleAgent);
|
|
@@ -29,9 +29,15 @@ export declare const zAgentRunnerParams: z.ZodObject<{
|
|
|
29
29
|
}, z.core.$strip>]>>>;
|
|
30
30
|
}, z.core.$strip>;
|
|
31
31
|
export type CreateAgentRunnerParams = z.infer<typeof zAgentRunnerParams>;
|
|
32
|
+
export interface SessionMessage {
|
|
33
|
+
role: "user" | "assistant";
|
|
34
|
+
content: string;
|
|
35
|
+
timestamp: string;
|
|
36
|
+
}
|
|
32
37
|
export type InvokeRequest = Omit<PromptRequest, "_meta"> & {
|
|
33
38
|
messageId: string;
|
|
34
39
|
sessionMeta?: Record<string, unknown>;
|
|
40
|
+
sessionMessages?: SessionMessage[];
|
|
35
41
|
};
|
|
36
42
|
export interface TokenUsage {
|
|
37
43
|
inputTokens?: number;
|
package/dist/runner/index.d.ts
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import type { AgentDefinition } from "../definition";
|
|
2
2
|
import { type AgentRunner } from "./agent-runner";
|
|
3
3
|
export type { AgentRunner };
|
|
4
|
-
export declare const makeRunnerFromDefinition: (
|
|
4
|
+
export declare const makeRunnerFromDefinition: (
|
|
5
|
+
definition: AgentDefinition,
|
|
6
|
+
) => AgentRunner;
|
package/dist/runner/index.js
CHANGED
|
@@ -1,18 +1,22 @@
|
|
|
1
1
|
import { zAgentRunnerParams } from "./agent-runner";
|
|
2
2
|
import { LangchainAgent } from "./langchain";
|
|
3
3
|
export const makeRunnerFromDefinition = (definition) => {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
4
|
+
const agentRunnerParams = zAgentRunnerParams.safeParse(definition);
|
|
5
|
+
if (!agentRunnerParams.success) {
|
|
6
|
+
throw new Error(
|
|
7
|
+
`Invalid agent definition: ${agentRunnerParams.error.message}`,
|
|
8
|
+
);
|
|
9
|
+
}
|
|
10
|
+
switch (definition.harnessImplementation) {
|
|
11
|
+
case undefined:
|
|
12
|
+
case "langchain": {
|
|
13
|
+
return new LangchainAgent(agentRunnerParams.data);
|
|
14
|
+
}
|
|
15
|
+
default: {
|
|
16
|
+
const _exhaustiveCheck = definition.harnessImplementation;
|
|
17
|
+
throw new Error(
|
|
18
|
+
`Unsupported harness implementation: ${definition.harnessImplementation}`,
|
|
19
|
+
);
|
|
20
|
+
}
|
|
21
|
+
}
|
|
18
22
|
};
|
|
@@ -4,6 +4,7 @@ import { AIMessageChunk, createAgent, ToolMessage, tool, } from "langchain";
|
|
|
4
4
|
import { z } from "zod";
|
|
5
5
|
import { SUBAGENT_MODE_KEY } from "../../acp-server/adapter";
|
|
6
6
|
import { loadCustomToolModule, } from "../tool-loader.js";
|
|
7
|
+
import { createModelFromString } from "./model-factory.js";
|
|
7
8
|
import { makeFilesystemTools } from "./tools/filesystem";
|
|
8
9
|
import { TASK_TOOL_NAME } from "./tools/subagent";
|
|
9
10
|
import { TODO_WRITE_TOOL_NAME, todoWrite } from "./tools/todo";
|
|
@@ -120,8 +121,14 @@ export class LangchainAgent {
|
|
|
120
121
|
const finalTools = isSubagent
|
|
121
122
|
? enabledTools.filter((t) => t.name !== TODO_WRITE_TOOL_NAME && t.name !== TASK_TOOL_NAME)
|
|
122
123
|
: enabledTools;
|
|
124
|
+
// Create the model instance using the factory
|
|
125
|
+
// This detects the provider from the model string:
|
|
126
|
+
// - "gemini-2.0-flash" → Google Generative AI
|
|
127
|
+
// - "vertex-gemini-2.0-flash" → Vertex AI (strips prefix)
|
|
128
|
+
// - "claude-sonnet-4-5-20250929" → Anthropic
|
|
129
|
+
const model = createModelFromString(this.definition.model);
|
|
123
130
|
const agentConfig = {
|
|
124
|
-
model
|
|
131
|
+
model,
|
|
125
132
|
tools: finalTools,
|
|
126
133
|
};
|
|
127
134
|
// Inject system prompt with optional TodoWrite instructions
|
|
@@ -130,12 +137,35 @@ export class LangchainAgent {
|
|
|
130
137
|
agentConfig.systemPrompt = `${agentConfig.systemPrompt ?? ""}\n\n${TODO_WRITE_INSTRUCTIONS}`;
|
|
131
138
|
}
|
|
132
139
|
const agent = createAgent(agentConfig);
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
140
|
+
// Build messages from session history if available, otherwise use just the prompt
|
|
141
|
+
let messages;
|
|
142
|
+
if (req.sessionMessages && req.sessionMessages.length > 0) {
|
|
143
|
+
// Use session message history - convert to LangChain format
|
|
144
|
+
// Only include messages BEFORE the current user message (exclude the last one since it's the current prompt)
|
|
145
|
+
const historyMessages = req.sessionMessages.slice(0, -1);
|
|
146
|
+
messages = historyMessages.map((msg) => ({
|
|
147
|
+
type: msg.role === "user" ? "human" : "ai",
|
|
148
|
+
content: msg.content,
|
|
149
|
+
}));
|
|
150
|
+
// Add the current prompt as the final human message
|
|
151
|
+
const currentPromptText = req.prompt
|
|
152
|
+
.filter((promptMsg) => promptMsg.type === "text")
|
|
153
|
+
.map((promptMsg) => promptMsg.text)
|
|
154
|
+
.join("\n");
|
|
155
|
+
messages.push({
|
|
156
|
+
type: "human",
|
|
157
|
+
content: currentPromptText,
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
else {
|
|
161
|
+
// Fallback: No session history, use just the prompt (legacy behavior)
|
|
162
|
+
messages = req.prompt
|
|
163
|
+
.filter((promptMsg) => promptMsg.type === "text")
|
|
164
|
+
.map((promptMsg) => ({
|
|
165
|
+
type: "human",
|
|
166
|
+
content: promptMsg.text,
|
|
167
|
+
}));
|
|
168
|
+
}
|
|
139
169
|
const stream = agent.stream({ messages }, {
|
|
140
170
|
streamMode: ["updates", "messages"],
|
|
141
171
|
});
|
|
@@ -230,7 +260,7 @@ export class LangchainAgent {
|
|
|
230
260
|
: typeof aiMessage.content === "string"
|
|
231
261
|
? aiMessage.content.length
|
|
232
262
|
: -1;
|
|
233
|
-
console.
|
|
263
|
+
console.log("DEBUG agent: messageTokenUsage:", JSON.stringify(messageTokenUsage), "contentType:", contentType, "isArray:", contentIsArray, "length:", contentLength);
|
|
234
264
|
}
|
|
235
265
|
// If we have tokenUsage but no content, send a token-only chunk
|
|
236
266
|
if (messageTokenUsage &&
|
|
@@ -238,7 +268,7 @@ export class LangchainAgent {
|
|
|
238
268
|
? aiMessage.content === ""
|
|
239
269
|
: Array.isArray(aiMessage.content) &&
|
|
240
270
|
aiMessage.content.length === 0)) {
|
|
241
|
-
console.
|
|
271
|
+
console.log("DEBUG agent: sending token-only chunk:", JSON.stringify(messageTokenUsage));
|
|
242
272
|
const msgToYield = {
|
|
243
273
|
sessionUpdate: "agent_message_chunk",
|
|
244
274
|
content: {
|
|
@@ -271,7 +301,7 @@ export class LangchainAgent {
|
|
|
271
301
|
text: aiMessage.content,
|
|
272
302
|
},
|
|
273
303
|
};
|
|
274
|
-
console.
|
|
304
|
+
console.log("DEBUG agent: yielding message (string content):", JSON.stringify(msgToYield));
|
|
275
305
|
yield msgToYield;
|
|
276
306
|
}
|
|
277
307
|
else if (Array.isArray(aiMessage.content)) {
|
|
@@ -295,7 +325,7 @@ export class LangchainAgent {
|
|
|
295
325
|
text: part.text,
|
|
296
326
|
},
|
|
297
327
|
};
|
|
298
|
-
console.
|
|
328
|
+
console.log("DEBUG agent: yielding message (array content):", JSON.stringify(msgToYield));
|
|
299
329
|
yield msgToYield;
|
|
300
330
|
}
|
|
301
331
|
else if (part.type === "tool_use") {
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
2
|
+
/**
|
|
3
|
+
* Detects the provider from a model string and returns the appropriate
|
|
4
|
+
* LangChain chat model instance.
|
|
5
|
+
*
|
|
6
|
+
* Detection logic:
|
|
7
|
+
* - If model starts with "vertex-" → Google Vertex AI (strips prefix)
|
|
8
|
+
* - If model contains "gemini" (unprefixed) → Google Generative AI
|
|
9
|
+
* - If model contains "gpt" → OpenAI (future support)
|
|
10
|
+
* - Otherwise → Anthropic (default for backward compatibility)
|
|
11
|
+
*
|
|
12
|
+
* Supported formats:
|
|
13
|
+
* - Direct model name: "gemini-2.0-flash", "vertex-gemini-2.0-flash", "claude-sonnet-4-5-20250929"
|
|
14
|
+
* - Provider prefix: "google_vertexai:gemini-2.0-flash", "google_genai:gemini-2.0-flash", "anthropic:claude-3-5-sonnet"
|
|
15
|
+
*/
|
|
16
|
+
export declare function createModelFromString(modelString: string): BaseChatModel;
|
|
17
|
+
/**
|
|
18
|
+
* Helper function to detect if a model string is for a specific provider
|
|
19
|
+
*/
|
|
20
|
+
export declare function detectProvider(modelString: string): string;
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import { ChatAnthropic } from "@langchain/anthropic";
|
|
2
|
+
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
|
|
3
|
+
import { ChatVertexAI } from "@langchain/google-vertexai";
|
|
4
|
+
/**
|
|
5
|
+
* Detects the provider from a model string and returns the appropriate
|
|
6
|
+
* LangChain chat model instance.
|
|
7
|
+
*
|
|
8
|
+
* Detection logic:
|
|
9
|
+
* - If model starts with "vertex-" → Google Vertex AI (strips prefix)
|
|
10
|
+
* - If model contains "gemini" (unprefixed) → Google Generative AI
|
|
11
|
+
* - If model contains "gpt" → OpenAI (future support)
|
|
12
|
+
* - Otherwise → Anthropic (default for backward compatibility)
|
|
13
|
+
*
|
|
14
|
+
* Supported formats:
|
|
15
|
+
* - Direct model name: "gemini-2.0-flash", "vertex-gemini-2.0-flash", "claude-sonnet-4-5-20250929"
|
|
16
|
+
* - Provider prefix: "google_vertexai:gemini-2.0-flash", "google_genai:gemini-2.0-flash", "anthropic:claude-3-5-sonnet"
|
|
17
|
+
*/
|
|
18
|
+
export function createModelFromString(modelString) {
|
|
19
|
+
// Check if the model string uses provider prefix format
|
|
20
|
+
const parts = modelString.split(":", 2);
|
|
21
|
+
const maybeProvider = parts[0];
|
|
22
|
+
const maybeModel = parts[1];
|
|
23
|
+
let provider = null;
|
|
24
|
+
let modelName = modelString;
|
|
25
|
+
// If there's a colon, treat the first part as the provider
|
|
26
|
+
if (maybeModel) {
|
|
27
|
+
provider = maybeProvider?.toLowerCase() ?? null;
|
|
28
|
+
modelName = maybeModel;
|
|
29
|
+
}
|
|
30
|
+
else {
|
|
31
|
+
// Auto-detect provider from model name
|
|
32
|
+
const lowerModel = modelString.toLowerCase();
|
|
33
|
+
// Check for vertex- prefix
|
|
34
|
+
if (lowerModel.startsWith("vertex-")) {
|
|
35
|
+
provider = "google_vertexai";
|
|
36
|
+
modelName = modelString.substring(7); // Strip "vertex-" prefix
|
|
37
|
+
}
|
|
38
|
+
else if (lowerModel.includes("gemini")) {
|
|
39
|
+
// Unprefixed gemini models use Google Generative AI
|
|
40
|
+
provider = "google_genai";
|
|
41
|
+
}
|
|
42
|
+
else if (lowerModel.includes("gpt")) {
|
|
43
|
+
provider = "openai";
|
|
44
|
+
}
|
|
45
|
+
else if (lowerModel.includes("claude")) {
|
|
46
|
+
provider = "anthropic";
|
|
47
|
+
}
|
|
48
|
+
else {
|
|
49
|
+
// Default to Anthropic for backward compatibility
|
|
50
|
+
provider = "anthropic";
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
// Create the appropriate model instance based on provider
|
|
54
|
+
switch (provider) {
|
|
55
|
+
case "google_vertexai":
|
|
56
|
+
case "vertex":
|
|
57
|
+
case "vertexai":
|
|
58
|
+
return new ChatVertexAI({
|
|
59
|
+
model: modelName,
|
|
60
|
+
// Default to reasonable settings
|
|
61
|
+
temperature: 0,
|
|
62
|
+
location: "global",
|
|
63
|
+
});
|
|
64
|
+
case "google_genai":
|
|
65
|
+
case "gemini":
|
|
66
|
+
return new ChatGoogleGenerativeAI({
|
|
67
|
+
model: modelName,
|
|
68
|
+
// Default to reasonable settings
|
|
69
|
+
temperature: 0,
|
|
70
|
+
});
|
|
71
|
+
case "openai":
|
|
72
|
+
throw new Error("OpenAI provider is not yet supported. Please install @langchain/openai and add implementation.");
|
|
73
|
+
case "anthropic":
|
|
74
|
+
case "claude":
|
|
75
|
+
return new ChatAnthropic({
|
|
76
|
+
model: modelName,
|
|
77
|
+
// Use default Anthropic settings
|
|
78
|
+
});
|
|
79
|
+
default:
|
|
80
|
+
// Fallback to Anthropic for unknown providers (backward compatibility)
|
|
81
|
+
console.warn(`Unknown provider "${provider}" in model string "${modelString}". Defaulting to Anthropic.`);
|
|
82
|
+
return new ChatAnthropic({
|
|
83
|
+
model: modelString,
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Helper function to detect if a model string is for a specific provider
|
|
89
|
+
*/
|
|
90
|
+
export function detectProvider(modelString) {
|
|
91
|
+
const parts = modelString.split(":", 2);
|
|
92
|
+
const maybeProvider = parts[0];
|
|
93
|
+
const maybeModel = parts[1];
|
|
94
|
+
if (maybeModel && maybeProvider) {
|
|
95
|
+
return maybeProvider.toLowerCase();
|
|
96
|
+
}
|
|
97
|
+
const lowerModel = modelString.toLowerCase();
|
|
98
|
+
// Check for vertex- prefix
|
|
99
|
+
if (lowerModel.startsWith("vertex-")) {
|
|
100
|
+
return "google_vertexai";
|
|
101
|
+
}
|
|
102
|
+
else if (lowerModel.includes("gemini")) {
|
|
103
|
+
// Unprefixed gemini models use Google Generative AI
|
|
104
|
+
return "google_genai";
|
|
105
|
+
}
|
|
106
|
+
else if (lowerModel.includes("gpt")) {
|
|
107
|
+
return "openai";
|
|
108
|
+
}
|
|
109
|
+
else if (lowerModel.includes("claude")) {
|
|
110
|
+
return "anthropic";
|
|
111
|
+
}
|
|
112
|
+
return "anthropic"; // default
|
|
113
|
+
}
|
package/dist/templates/index.js
CHANGED
|
@@ -50,14 +50,21 @@ export async function generateIndexTs(vars) {
|
|
|
50
50
|
};
|
|
51
51
|
return prettier.format(`import { makeHttpTransport, makeStdioTransport } from "@townco/agent/acp-server";
|
|
52
52
|
import type { AgentDefinition } from "@townco/agent/definition";
|
|
53
|
+
import { basename } from "node:path";
|
|
53
54
|
|
|
54
55
|
// Load agent definition from JSON file
|
|
55
56
|
const agent: AgentDefinition = ${JSON.stringify(agentDef)};
|
|
56
57
|
|
|
57
58
|
const transport = process.argv[2] || "stdio";
|
|
58
59
|
|
|
60
|
+
// Get agent directory and name for session storage
|
|
61
|
+
const agentDir = process.cwd();
|
|
62
|
+
const agentName = basename(agentDir);
|
|
63
|
+
|
|
64
|
+
console.log("[agent] Configuration:", { transport, agentDir, agentName });
|
|
65
|
+
|
|
59
66
|
if (transport === "http") {
|
|
60
|
-
makeHttpTransport(agent);
|
|
67
|
+
makeHttpTransport(agent, agentDir, agentName);
|
|
61
68
|
} else if (transport === "stdio") {
|
|
62
69
|
makeStdioTransport(agent);
|
|
63
70
|
} else {
|
|
@@ -75,6 +82,7 @@ export function generateGitignore() {
|
|
|
75
82
|
return `node_modules
|
|
76
83
|
dist
|
|
77
84
|
.env
|
|
85
|
+
.sessions
|
|
78
86
|
`;
|
|
79
87
|
}
|
|
80
88
|
export function generateTsConfig() {
|