@tjamescouch/gro 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +20 -0
- package/README.md +218 -0
- package/_base.md +44 -0
- package/gro +198 -0
- package/owl/behaviors/agentic-turn.md +43 -0
- package/owl/components/cli.md +37 -0
- package/owl/components/drivers.md +29 -0
- package/owl/components/mcp.md +33 -0
- package/owl/components/memory.md +35 -0
- package/owl/components/session.md +35 -0
- package/owl/constraints.md +32 -0
- package/owl/product.md +28 -0
- package/owl/proposals/cooperative-scheduler.md +106 -0
- package/package.json +22 -0
- package/providers/claude.sh +50 -0
- package/providers/gemini.sh +36 -0
- package/providers/openai.py +85 -0
- package/src/drivers/anthropic.ts +215 -0
- package/src/drivers/index.ts +5 -0
- package/src/drivers/streaming-openai.ts +245 -0
- package/src/drivers/types.ts +33 -0
- package/src/errors.ts +97 -0
- package/src/logger.ts +28 -0
- package/src/main.ts +827 -0
- package/src/mcp/client.ts +147 -0
- package/src/mcp/index.ts +2 -0
- package/src/memory/advanced-memory.ts +263 -0
- package/src/memory/agent-memory.ts +61 -0
- package/src/memory/agenthnsw.ts +122 -0
- package/src/memory/index.ts +6 -0
- package/src/memory/simple-memory.ts +41 -0
- package/src/memory/vector-index.ts +30 -0
- package/src/session.ts +150 -0
- package/src/tools/agentpatch.ts +89 -0
- package/src/tools/bash.ts +61 -0
- package/src/utils/rate-limiter.ts +60 -0
- package/src/utils/retry.ts +32 -0
- package/src/utils/timed-fetch.ts +29 -0
- package/tests/errors.test.ts +246 -0
- package/tests/memory.test.ts +186 -0
- package/tests/rate-limiter.test.ts +76 -0
- package/tests/retry.test.ts +138 -0
- package/tests/timed-fetch.test.ts +104 -0
- package/tsconfig.json +13 -0
package/src/main.ts
ADDED
|
@@ -0,0 +1,827 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
/**
|
|
3
|
+
* gro — provider-agnostic LLM runtime with context management.
|
|
4
|
+
*
|
|
5
|
+
* Extracted from org. Single-agent, headless, no terminal UI.
|
|
6
|
+
* Reads prompt from argv or stdin, manages conversation state,
|
|
7
|
+
* outputs completion to stdout. Connects to MCP servers for tools.
|
|
8
|
+
*
|
|
9
|
+
* Supersets the claude CLI flags for drop-in compatibility.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { readFileSync, existsSync } from "node:fs";
|
|
13
|
+
import { join } from "node:path";
|
|
14
|
+
import { homedir } from "node:os";
|
|
15
|
+
import { Logger, C } from "./logger.js";
|
|
16
|
+
import { makeStreamingOpenAiDriver } from "./drivers/streaming-openai.js";
|
|
17
|
+
import { makeAnthropicDriver } from "./drivers/anthropic.js";
|
|
18
|
+
import { SimpleMemory } from "./memory/simple-memory.js";
|
|
19
|
+
import { AdvancedMemory } from "./memory/advanced-memory.js";
|
|
20
|
+
import { McpManager } from "./mcp/index.js";
|
|
21
|
+
import { newSessionId, findLatestSession, loadSession, ensureGroDir } from "./session.js";
|
|
22
|
+
import { groError, asError, isGroError, errorLogFields } from "./errors.js";
|
|
23
|
+
import type { McpServerConfig } from "./mcp/index.js";
|
|
24
|
+
import type { ChatDriver, ChatMessage, ChatOutput } from "./drivers/types.js";
|
|
25
|
+
import type { AgentMemory } from "./memory/agent-memory.js";
|
|
26
|
+
import { bashToolDefinition, executeBash } from "./tools/bash.js";
|
|
27
|
+
import { agentpatchToolDefinition, executeAgentpatch } from "./tools/agentpatch.js";
|
|
28
|
+
|
|
29
|
+
const VERSION = "0.3.1";
|
|
30
|
+
|
|
31
|
+
// Wake notes: a runner-global file that is prepended to the system prompt on process start
|
|
32
|
+
// so agents reliably see dev workflow + memory pointers on wake.
|
|
33
|
+
const WAKE_NOTES_DEFAULT_PATH = join(process.env.HOME || "", ".claude", "WAKE.md");
|
|
34
|
+
|
|
35
|
+
// ---------------------------------------------------------------------------
|
|
36
|
+
// Config
|
|
37
|
+
// ---------------------------------------------------------------------------
|
|
38
|
+
|
|
39
|
+
interface GroConfig {
|
|
40
|
+
provider: "openai" | "anthropic" | "local";
|
|
41
|
+
model: string;
|
|
42
|
+
baseUrl: string;
|
|
43
|
+
apiKey: string;
|
|
44
|
+
systemPrompt: string;
|
|
45
|
+
wakeNotes: string;
|
|
46
|
+
wakeNotesEnabled: boolean;
|
|
47
|
+
contextTokens: number;
|
|
48
|
+
interactive: boolean;
|
|
49
|
+
print: boolean;
|
|
50
|
+
maxToolRounds: number;
|
|
51
|
+
persistent: boolean;
|
|
52
|
+
maxIdleNudges: number;
|
|
53
|
+
bash: boolean;
|
|
54
|
+
summarizerModel: string | null;
|
|
55
|
+
outputFormat: "text" | "json" | "stream-json";
|
|
56
|
+
continueSession: boolean;
|
|
57
|
+
resumeSession: string | null;
|
|
58
|
+
sessionPersistence: boolean;
|
|
59
|
+
verbose: boolean;
|
|
60
|
+
mcpServers: Record<string, McpServerConfig>;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function loadMcpServers(mcpConfigPaths: string[]): Record<string, McpServerConfig> {
|
|
64
|
+
// If explicit --mcp-config paths given, use those
|
|
65
|
+
if (mcpConfigPaths.length > 0) {
|
|
66
|
+
const merged: Record<string, McpServerConfig> = {};
|
|
67
|
+
for (const p of mcpConfigPaths) {
|
|
68
|
+
try {
|
|
69
|
+
let raw: string;
|
|
70
|
+
if (p.startsWith("{")) {
|
|
71
|
+
raw = p; // inline JSON
|
|
72
|
+
} else if (existsSync(p)) {
|
|
73
|
+
raw = readFileSync(p, "utf-8");
|
|
74
|
+
} else {
|
|
75
|
+
Logger.warn(`MCP config not found: ${p}`);
|
|
76
|
+
continue;
|
|
77
|
+
}
|
|
78
|
+
const parsed = JSON.parse(raw);
|
|
79
|
+
const servers = parsed.mcpServers || parsed;
|
|
80
|
+
if (typeof servers === "object") {
|
|
81
|
+
Object.assign(merged, servers);
|
|
82
|
+
}
|
|
83
|
+
} catch (e: unknown) {
|
|
84
|
+
const ge = groError("config_error", `Failed to parse MCP config ${p}: ${asError(e).message}`, { cause: e });
|
|
85
|
+
Logger.warn(ge.message, errorLogFields(ge));
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
return merged;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Try Claude Code config locations
|
|
92
|
+
const candidates = [
|
|
93
|
+
join(process.cwd(), ".claude", "settings.json"),
|
|
94
|
+
join(process.env.HOME || "", ".claude", "settings.json"),
|
|
95
|
+
];
|
|
96
|
+
|
|
97
|
+
for (const path of candidates) {
|
|
98
|
+
if (existsSync(path)) {
|
|
99
|
+
try {
|
|
100
|
+
const raw = readFileSync(path, "utf-8");
|
|
101
|
+
const parsed = JSON.parse(raw);
|
|
102
|
+
if (parsed.mcpServers && typeof parsed.mcpServers === "object") {
|
|
103
|
+
Logger.debug(`Loaded MCP config from ${path}`);
|
|
104
|
+
return parsed.mcpServers;
|
|
105
|
+
}
|
|
106
|
+
} catch (e: unknown) {
|
|
107
|
+
const ge = groError("config_error", `Failed to parse ${path}: ${asError(e).message}`, { cause: e });
|
|
108
|
+
Logger.debug(ge.message, errorLogFields(ge));
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
return {};
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Flags that claude supports but we don't yet — accept gracefully
|
|
116
|
+
const UNSUPPORTED_VALUE_FLAGS = new Set([
|
|
117
|
+
"--effort", "--agent", "--agents", "--betas", "--fallback-model",
|
|
118
|
+
"--permission-prompt-tool", "--permission-mode", "--tools",
|
|
119
|
+
"--allowedTools", "--allowed-tools", "--disallowedTools", "--disallowed-tools",
|
|
120
|
+
"--add-dir", "--plugin-dir", "--settings", "--setting-sources",
|
|
121
|
+
"--json-schema", "--input-format", "--file",
|
|
122
|
+
"--resume-session-at", "--rewind-files", "--session-id",
|
|
123
|
+
"--debug-file", "--sdk-url",
|
|
124
|
+
]);
|
|
125
|
+
|
|
126
|
+
const UNSUPPORTED_BOOL_FLAGS = new Set([
|
|
127
|
+
"--include-partial-messages", "--replay-user-messages",
|
|
128
|
+
"--dangerously-skip-permissions", "--allow-dangerously-skip-permissions",
|
|
129
|
+
"--fork-session", "--from-pr", "--strict-mcp-config", "--mcp-debug",
|
|
130
|
+
"--ide", "--chrome", "--no-chrome", "--disable-slash-commands",
|
|
131
|
+
"--init", "--init-only", "--maintenance", "--enable-auth-status",
|
|
132
|
+
]);
|
|
133
|
+
|
|
134
|
+
function loadConfig(): GroConfig {
|
|
135
|
+
const args = process.argv.slice(2);
|
|
136
|
+
const flags: Record<string, string> = {};
|
|
137
|
+
const positional: string[] = [];
|
|
138
|
+
const mcpConfigPaths: string[] = [];
|
|
139
|
+
|
|
140
|
+
// Wake file: global startup instructions injected into the system prompt.
|
|
141
|
+
// This is intentionally runner-level (not per-repo) so agents reliably see
|
|
142
|
+
// the same rules on boot.
|
|
143
|
+
const defaultWakeFile = join(homedir(), ".claude", "WAKE.md");
|
|
144
|
+
let wakeFile: string | null = defaultWakeFile;
|
|
145
|
+
let disableWake = false;
|
|
146
|
+
|
|
147
|
+
for (let i = 0; i < args.length; i++) {
|
|
148
|
+
const arg = args[i];
|
|
149
|
+
|
|
150
|
+
// --- gro native flags ---
|
|
151
|
+
if (arg === "--provider" || arg === "-P") { flags.provider = args[++i]; }
|
|
152
|
+
else if (arg === "--model" || arg === "-m") { flags.model = args[++i]; }
|
|
153
|
+
else if (arg === "--base-url") { flags.baseUrl = args[++i]; }
|
|
154
|
+
else if (arg === "--system-prompt") { flags.systemPrompt = args[++i]; }
|
|
155
|
+
else if (arg === "--system-prompt-file") { flags.systemPromptFile = args[++i]; }
|
|
156
|
+
else if (arg === "--append-system-prompt") { flags.appendSystemPrompt = args[++i]; }
|
|
157
|
+
else if (arg === "--append-system-prompt-file") { flags.appendSystemPromptFile = args[++i]; }
|
|
158
|
+
else if (arg === "--wake-notes") { flags.wakeNotes = args[++i]; }
|
|
159
|
+
else if (arg === "--no-wake-notes") { flags.noWakeNotes = "true"; }
|
|
160
|
+
else if (arg === "--context-tokens") { flags.contextTokens = args[++i]; }
|
|
161
|
+
else if (arg === "--max-tool-rounds" || arg === "--max-turns") { flags.maxToolRounds = args[++i]; }
|
|
162
|
+
else if (arg === "--bash") { flags.bash = "true"; }
|
|
163
|
+
else if (arg === "--persistent" || arg === "--keep-alive") { flags.persistent = "true"; }
|
|
164
|
+
else if (arg === "--max-idle-nudges") { flags.maxIdleNudges = args[++i]; }
|
|
165
|
+
else if (arg === "--max-thinking-tokens") { flags.maxThinkingTokens = args[++i]; } // accepted, not used yet
|
|
166
|
+
else if (arg === "--max-budget-usd") { flags.maxBudgetUsd = args[++i]; } // accepted, not used yet
|
|
167
|
+
else if (arg === "--summarizer-model") { flags.summarizerModel = args[++i]; }
|
|
168
|
+
else if (arg === "--output-format") { flags.outputFormat = args[++i]; }
|
|
169
|
+
else if (arg === "--mcp-config") { mcpConfigPaths.push(args[++i]); }
|
|
170
|
+
else if (arg === "-i" || arg === "--interactive") { flags.interactive = "true"; }
|
|
171
|
+
else if (arg === "-p" || arg === "--print") { flags.print = "true"; }
|
|
172
|
+
else if (arg === "-c" || arg === "--continue") { flags.continue = "true"; }
|
|
173
|
+
else if (arg === "-r" || arg === "--resume") {
|
|
174
|
+
// --resume can have optional value
|
|
175
|
+
if (i + 1 < args.length && !args[i + 1].startsWith("-")) {
|
|
176
|
+
flags.resume = args[++i];
|
|
177
|
+
} else {
|
|
178
|
+
flags.resume = "latest";
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
else if (arg === "--no-mcp") { flags.noMcp = "true"; }
|
|
182
|
+
else if (arg === "--no-session-persistence") { flags.noSessionPersistence = "true"; }
|
|
183
|
+
else if (arg === "--verbose") { flags.verbose = "true"; }
|
|
184
|
+
else if (arg === "-d" || arg === "--debug" || arg === "-d2e" || arg === "--debug-to-stderr") {
|
|
185
|
+
flags.verbose = "true";
|
|
186
|
+
// --debug may have optional filter value
|
|
187
|
+
if (arg === "-d" || arg === "--debug") {
|
|
188
|
+
if (i + 1 < args.length && !args[i + 1].startsWith("-")) { i++; } // consume filter
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
else if (arg === "-V" || arg === "--version") { console.log(`gro ${VERSION}`); process.exit(0); }
|
|
192
|
+
else if (arg === "-h" || arg === "--help") { usage(); process.exit(0); }
|
|
193
|
+
// --- graceful degradation for unsupported claude flags ---
|
|
194
|
+
else if (UNSUPPORTED_VALUE_FLAGS.has(arg)) {
|
|
195
|
+
Logger.warn(`${arg} not yet supported, ignoring`);
|
|
196
|
+
if (i + 1 < args.length && !args[i + 1].startsWith("-")) i++; // skip value
|
|
197
|
+
}
|
|
198
|
+
else if (UNSUPPORTED_BOOL_FLAGS.has(arg)) {
|
|
199
|
+
Logger.warn(`${arg} not yet supported, ignoring`);
|
|
200
|
+
}
|
|
201
|
+
else if (!arg.startsWith("-")) { positional.push(arg); }
|
|
202
|
+
else { Logger.warn(`Unknown flag: ${arg}`); }
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
const provider = inferProvider(flags.provider, flags.model);
|
|
206
|
+
const apiKey = resolveApiKey(provider);
|
|
207
|
+
const noMcp = flags.noMcp === "true";
|
|
208
|
+
const mcpServers = noMcp ? {} : loadMcpServers(mcpConfigPaths);
|
|
209
|
+
|
|
210
|
+
// Resolve system prompt
|
|
211
|
+
let systemPrompt = flags.systemPrompt || "";
|
|
212
|
+
|
|
213
|
+
// Inject wake notes by default (runner-global), unless explicitly disabled.
|
|
214
|
+
// This ensures the model always sees workflow + memory pointers on wake.
|
|
215
|
+
const wakeNotesPath = flags.wakeNotes || WAKE_NOTES_DEFAULT_PATH;
|
|
216
|
+
const wakeNotesEnabled = flags.noWakeNotes !== "true";
|
|
217
|
+
if (wakeNotesEnabled && wakeNotesPath && existsSync(wakeNotesPath)) {
|
|
218
|
+
try {
|
|
219
|
+
const wake = readFileSync(wakeNotesPath, "utf-8").trim();
|
|
220
|
+
if (wake) systemPrompt = systemPrompt ? `${wake}
|
|
221
|
+
|
|
222
|
+
${systemPrompt}` : wake;
|
|
223
|
+
} catch (e) {
|
|
224
|
+
// Non-fatal: if wake notes can't be read, proceed without them.
|
|
225
|
+
Logger.warn(`Failed to read wake notes at ${wakeNotesPath}: ${asError(e).message}`);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
if (flags.systemPromptFile) {
|
|
229
|
+
try {
|
|
230
|
+
systemPrompt = readFileSync(flags.systemPromptFile, "utf-8").trim();
|
|
231
|
+
} catch (e: unknown) {
|
|
232
|
+
const ge = groError("config_error", `Failed to read system prompt file: ${asError(e).message}`, { cause: e });
|
|
233
|
+
Logger.error(ge.message, errorLogFields(ge));
|
|
234
|
+
process.exit(1);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
if (flags.appendSystemPrompt) {
|
|
238
|
+
systemPrompt = systemPrompt ? `${systemPrompt}\n\n${flags.appendSystemPrompt}` : flags.appendSystemPrompt;
|
|
239
|
+
}
|
|
240
|
+
if (flags.appendSystemPromptFile) {
|
|
241
|
+
try {
|
|
242
|
+
const extra = readFileSync(flags.appendSystemPromptFile, "utf-8").trim();
|
|
243
|
+
systemPrompt = systemPrompt ? `${systemPrompt}\n\n${extra}` : extra;
|
|
244
|
+
} catch (e: unknown) {
|
|
245
|
+
const ge = groError("config_error", `Failed to read append system prompt file: ${asError(e).message}`, { cause: e });
|
|
246
|
+
Logger.error(ge.message, errorLogFields(ge));
|
|
247
|
+
process.exit(1);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// Default wake injection: prepend runner-global WAKE.md unless explicitly disabled.
|
|
252
|
+
// Soft dependency: if missing, warn and continue.
|
|
253
|
+
if (!disableWake && wakeFile) {
|
|
254
|
+
try {
|
|
255
|
+
const wake = readFileSync(wakeFile, "utf-8").trim();
|
|
256
|
+
if (wake) systemPrompt = systemPrompt ? `${wake}\n\n${systemPrompt}` : wake;
|
|
257
|
+
} catch (e: unknown) {
|
|
258
|
+
Logger.warn(`Wake file not found/readable (${wakeFile}); continuing without it`);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
// Mode resolution: -p forces non-interactive, -i forces interactive
|
|
265
|
+
// Default: interactive if TTY and no prompt given
|
|
266
|
+
const printMode = flags.print === "true";
|
|
267
|
+
const interactiveMode = printMode ? false
|
|
268
|
+
: flags.interactive === "true" ? true
|
|
269
|
+
: (positional.length === 0 && process.stdin.isTTY === true);
|
|
270
|
+
|
|
271
|
+
return {
|
|
272
|
+
provider,
|
|
273
|
+
model: flags.model || defaultModel(provider),
|
|
274
|
+
baseUrl: flags.baseUrl || defaultBaseUrl(provider),
|
|
275
|
+
apiKey,
|
|
276
|
+
systemPrompt,
|
|
277
|
+
wakeNotes: flags.wakeNotes || WAKE_NOTES_DEFAULT_PATH,
|
|
278
|
+
wakeNotesEnabled: flags.noWakeNotes !== "true",
|
|
279
|
+
contextTokens: parseInt(flags.contextTokens || "8192"),
|
|
280
|
+
interactive: interactiveMode,
|
|
281
|
+
print: printMode,
|
|
282
|
+
maxToolRounds: parseInt(flags.maxToolRounds || "10"),
|
|
283
|
+
persistent: flags.persistent === "true",
|
|
284
|
+
maxIdleNudges: parseInt(flags.maxIdleNudges || "10"),
|
|
285
|
+
bash: flags.bash === "true",
|
|
286
|
+
summarizerModel: flags.summarizerModel || null,
|
|
287
|
+
outputFormat: (flags.outputFormat as GroConfig["outputFormat"]) || "text",
|
|
288
|
+
continueSession: flags.continue === "true",
|
|
289
|
+
resumeSession: flags.resume || null,
|
|
290
|
+
sessionPersistence: flags.noSessionPersistence !== "true",
|
|
291
|
+
verbose: flags.verbose === "true",
|
|
292
|
+
mcpServers,
|
|
293
|
+
};
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
function inferProvider(explicit?: string, model?: string): "openai" | "anthropic" | "local" {
|
|
297
|
+
if (explicit) {
|
|
298
|
+
if (explicit === "openai" || explicit === "anthropic" || explicit === "local") return explicit;
|
|
299
|
+
Logger.warn(`Unknown provider "${explicit}", defaulting to anthropic`);
|
|
300
|
+
return "anthropic";
|
|
301
|
+
}
|
|
302
|
+
if (model) {
|
|
303
|
+
if (/^(gpt-|o1-|o3-|o4-|chatgpt-)/.test(model)) return "openai";
|
|
304
|
+
if (/^(claude-|sonnet|haiku|opus)/.test(model)) return "anthropic";
|
|
305
|
+
if (/^(gemma|llama|mistral|phi|qwen|deepseek)/.test(model)) return "local";
|
|
306
|
+
}
|
|
307
|
+
return "anthropic";
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
function defaultModel(provider: string): string {
|
|
311
|
+
switch (provider) {
|
|
312
|
+
case "openai": return "gpt-4o";
|
|
313
|
+
case "anthropic": return "claude-sonnet-4-20250514";
|
|
314
|
+
case "local": return "llama3";
|
|
315
|
+
default: return "claude-sonnet-4-20250514";
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
function defaultBaseUrl(provider: string): string {
|
|
320
|
+
switch (provider) {
|
|
321
|
+
case "openai": return process.env.OPENAI_BASE_URL || "https://api.openai.com";
|
|
322
|
+
case "local": return "http://127.0.0.1:11434";
|
|
323
|
+
default: return process.env.ANTHROPIC_BASE_URL || "https://api.anthropic.com";
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
function resolveApiKey(provider: string): string {
|
|
328
|
+
switch (provider) {
|
|
329
|
+
case "openai": return process.env.OPENAI_API_KEY || "";
|
|
330
|
+
case "anthropic": return process.env.ANTHROPIC_API_KEY || "";
|
|
331
|
+
default: return "";
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
function usage() {
|
|
336
|
+
console.log(`gro ${VERSION} — provider-agnostic LLM runtime
|
|
337
|
+
|
|
338
|
+
usage:
|
|
339
|
+
gro [options] "prompt"
|
|
340
|
+
echo "prompt" | gro [options]
|
|
341
|
+
gro -i # interactive mode
|
|
342
|
+
|
|
343
|
+
options:
|
|
344
|
+
-P, --provider openai | anthropic | local (default: anthropic)
|
|
345
|
+
-m, --model model name (auto-infers provider)
|
|
346
|
+
--base-url API base URL
|
|
347
|
+
--system-prompt system prompt text
|
|
348
|
+
--system-prompt-file read system prompt from file
|
|
349
|
+
--append-system-prompt append to system prompt
|
|
350
|
+
--append-system-prompt-file append system prompt from file
|
|
351
|
+
--wake-notes path to wake notes file (default: ~/.claude/WAKE.md)
|
|
352
|
+
--no-wake-notes disable auto-prepending wake notes
|
|
353
|
+
--context-tokens context window budget (default: 8192)
|
|
354
|
+
--max-turns max agentic rounds per turn (default: 10)
|
|
355
|
+
--max-tool-rounds alias for --max-turns
|
|
356
|
+
--bash enable built-in bash tool for shell command execution
|
|
357
|
+
--persistent nudge model to keep using tools instead of exiting
|
|
358
|
+
--max-idle-nudges max consecutive nudges before giving up (default: 10)
|
|
359
|
+
--summarizer-model model for context summarization (default: same as --model)
|
|
360
|
+
--output-format text | json | stream-json (default: text)
|
|
361
|
+
--mcp-config load MCP servers from JSON file or string
|
|
362
|
+
--no-mcp disable MCP server connections
|
|
363
|
+
--no-session-persistence don't save sessions to .gro/
|
|
364
|
+
-p, --print print response and exit (non-interactive)
|
|
365
|
+
-c, --continue continue most recent session
|
|
366
|
+
-r, --resume [id] resume a session by ID
|
|
367
|
+
-i, --interactive interactive conversation mode
|
|
368
|
+
--verbose verbose output
|
|
369
|
+
-V, --version show version
|
|
370
|
+
-h, --help show this help
|
|
371
|
+
|
|
372
|
+
session state is stored in .gro/context/<session-id>/`);
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
// ---------------------------------------------------------------------------
|
|
376
|
+
// Driver factory
|
|
377
|
+
// ---------------------------------------------------------------------------
|
|
378
|
+
|
|
379
|
+
function createDriverForModel(
|
|
380
|
+
provider: "openai" | "anthropic" | "local",
|
|
381
|
+
model: string,
|
|
382
|
+
apiKey: string,
|
|
383
|
+
baseUrl: string,
|
|
384
|
+
): ChatDriver {
|
|
385
|
+
switch (provider) {
|
|
386
|
+
case "anthropic":
|
|
387
|
+
if (!apiKey && baseUrl === "https://api.anthropic.com") {
|
|
388
|
+
Logger.error("gro: ANTHROPIC_API_KEY not set (set ANTHROPIC_BASE_URL for proxy mode)");
|
|
389
|
+
process.exit(1);
|
|
390
|
+
}
|
|
391
|
+
return makeAnthropicDriver({ apiKey: apiKey || "proxy-managed", model, baseUrl });
|
|
392
|
+
|
|
393
|
+
case "openai":
|
|
394
|
+
if (!apiKey && baseUrl === "https://api.openai.com") {
|
|
395
|
+
Logger.error("gro: OPENAI_API_KEY not set (set OPENAI_BASE_URL for proxy mode)");
|
|
396
|
+
process.exit(1);
|
|
397
|
+
}
|
|
398
|
+
return makeStreamingOpenAiDriver({ baseUrl, model, apiKey: apiKey || undefined });
|
|
399
|
+
|
|
400
|
+
case "local":
|
|
401
|
+
return makeStreamingOpenAiDriver({ baseUrl, model });
|
|
402
|
+
|
|
403
|
+
default:
|
|
404
|
+
Logger.error(`gro: unknown provider "${provider}"`);
|
|
405
|
+
process.exit(1);
|
|
406
|
+
}
|
|
407
|
+
throw new Error("unreachable");
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
function createDriver(cfg: GroConfig): ChatDriver {
|
|
411
|
+
return createDriverForModel(cfg.provider, cfg.model, cfg.apiKey, cfg.baseUrl);
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
// ---------------------------------------------------------------------------
|
|
415
|
+
// Memory factory
|
|
416
|
+
// ---------------------------------------------------------------------------
|
|
417
|
+
|
|
418
|
+
function createMemory(cfg: GroConfig, driver: ChatDriver): AgentMemory {
|
|
419
|
+
if (cfg.interactive) {
|
|
420
|
+
let summarizerDriver: ChatDriver | undefined;
|
|
421
|
+
let summarizerModel: string | undefined;
|
|
422
|
+
|
|
423
|
+
if (cfg.summarizerModel) {
|
|
424
|
+
summarizerModel = cfg.summarizerModel;
|
|
425
|
+
const summarizerProvider = inferProvider(undefined, summarizerModel);
|
|
426
|
+
summarizerDriver = createDriverForModel(
|
|
427
|
+
summarizerProvider,
|
|
428
|
+
summarizerModel,
|
|
429
|
+
resolveApiKey(summarizerProvider),
|
|
430
|
+
defaultBaseUrl(summarizerProvider),
|
|
431
|
+
);
|
|
432
|
+
Logger.info(`Summarizer: ${summarizerProvider}/${summarizerModel}`);
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
return new AdvancedMemory({
|
|
436
|
+
driver,
|
|
437
|
+
model: cfg.model,
|
|
438
|
+
summarizerDriver,
|
|
439
|
+
summarizerModel,
|
|
440
|
+
systemPrompt: cfg.systemPrompt || undefined,
|
|
441
|
+
contextTokens: cfg.contextTokens,
|
|
442
|
+
});
|
|
443
|
+
}
|
|
444
|
+
const mem = new SimpleMemory(cfg.systemPrompt || undefined);
|
|
445
|
+
mem.setMeta(cfg.provider, cfg.model);
|
|
446
|
+
return mem;
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// ---------------------------------------------------------------------------
|
|
450
|
+
// Output formatting
|
|
451
|
+
// ---------------------------------------------------------------------------
|
|
452
|
+
|
|
453
|
+
function formatOutput(text: string, format: GroConfig["outputFormat"]): string {
|
|
454
|
+
switch (format) {
|
|
455
|
+
case "json":
|
|
456
|
+
return JSON.stringify({ result: text, type: "result" });
|
|
457
|
+
case "stream-json":
|
|
458
|
+
// For stream-json, individual tokens are already streamed.
|
|
459
|
+
// This is the final message.
|
|
460
|
+
return JSON.stringify({ result: text, type: "result" });
|
|
461
|
+
case "text":
|
|
462
|
+
default:
|
|
463
|
+
return text;
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
// ---------------------------------------------------------------------------
|
|
468
|
+
// Tool execution loop
|
|
469
|
+
// ---------------------------------------------------------------------------
|
|
470
|
+
|
|
471
|
+
/**
|
|
472
|
+
* Execute a single turn: call the model, handle tool calls, repeat until
|
|
473
|
+
* the model produces a final text response or we hit maxRounds.
|
|
474
|
+
*/
|
|
475
|
+
async function executeTurn(
|
|
476
|
+
driver: ChatDriver,
|
|
477
|
+
memory: AgentMemory,
|
|
478
|
+
mcp: McpManager,
|
|
479
|
+
cfg: GroConfig,
|
|
480
|
+
): Promise<string> {
|
|
481
|
+
const tools = mcp.getToolDefinitions();
|
|
482
|
+
tools.push(agentpatchToolDefinition());
|
|
483
|
+
if (cfg.bash) tools.push(bashToolDefinition());
|
|
484
|
+
let finalText = "";
|
|
485
|
+
|
|
486
|
+
const onToken = cfg.outputFormat === "stream-json"
|
|
487
|
+
? (t: string) => process.stdout.write(JSON.stringify({ type: "token", token: t }) + "\n")
|
|
488
|
+
: (t: string) => process.stdout.write(t);
|
|
489
|
+
|
|
490
|
+
let brokeCleanly = false;
|
|
491
|
+
let idleNudges = 0;
|
|
492
|
+
for (let round = 0; round < cfg.maxToolRounds; round++) {
|
|
493
|
+
const output: ChatOutput = await driver.chat(memory.messages(), {
|
|
494
|
+
model: cfg.model,
|
|
495
|
+
tools: tools.length > 0 ? tools : undefined,
|
|
496
|
+
onToken,
|
|
497
|
+
});
|
|
498
|
+
|
|
499
|
+
// Accumulate text
|
|
500
|
+
if (output.text) finalText += output.text;
|
|
501
|
+
|
|
502
|
+
// Store assistant message — must include tool_calls when present
|
|
503
|
+
// so OpenAI sees the required assistant→tool message sequence.
|
|
504
|
+
const assistantMsg: ChatMessage = { role: "assistant", from: "Assistant", content: output.text || "" };
|
|
505
|
+
if (output.toolCalls.length > 0) {
|
|
506
|
+
(assistantMsg as any).tool_calls = output.toolCalls;
|
|
507
|
+
}
|
|
508
|
+
await memory.add(assistantMsg);
|
|
509
|
+
|
|
510
|
+
// No tool calls — either we're done, or we need to nudge the model
|
|
511
|
+
if (output.toolCalls.length === 0) {
|
|
512
|
+
if (!cfg.persistent || tools.length === 0) {
|
|
513
|
+
brokeCleanly = true;
|
|
514
|
+
break;
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
// Persistent mode: nudge the model to resume tool use
|
|
518
|
+
idleNudges++;
|
|
519
|
+
if (idleNudges > cfg.maxIdleNudges) {
|
|
520
|
+
Logger.debug(`Persistent mode: ${idleNudges} consecutive idle responses — giving up`);
|
|
521
|
+
brokeCleanly = true;
|
|
522
|
+
break;
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
Logger.debug(`Persistent mode: model stopped calling tools (nudge ${idleNudges}/${cfg.maxIdleNudges})`);
|
|
526
|
+
await memory.add({
|
|
527
|
+
role: "user",
|
|
528
|
+
from: "System",
|
|
529
|
+
content: "[SYSTEM] You stopped calling tools. You are a persistent agent — you MUST continue your tool loop. Call agentchat_listen now to resume listening for messages. Do not respond with text only.",
|
|
530
|
+
});
|
|
531
|
+
continue;
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
// Model used tools — reset idle nudge counter
|
|
535
|
+
idleNudges = 0;
|
|
536
|
+
|
|
537
|
+
// Process tool calls
|
|
538
|
+
for (const tc of output.toolCalls) {
|
|
539
|
+
const fnName = tc.function.name;
|
|
540
|
+
let fnArgs: Record<string, any>;
|
|
541
|
+
try {
|
|
542
|
+
fnArgs = JSON.parse(tc.function.arguments);
|
|
543
|
+
} catch {
|
|
544
|
+
fnArgs = {};
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
Logger.debug(`Tool call: ${fnName}(${JSON.stringify(fnArgs)})`);
|
|
548
|
+
|
|
549
|
+
let result: string;
|
|
550
|
+
try {
|
|
551
|
+
if (fnName === "apply_patch") {
|
|
552
|
+
result = executeAgentpatch(fnArgs);
|
|
553
|
+
} else if (fnName === "bash" && cfg.bash) {
|
|
554
|
+
result = executeBash(fnArgs);
|
|
555
|
+
} else {
|
|
556
|
+
result = await mcp.callTool(fnName, fnArgs);
|
|
557
|
+
}
|
|
558
|
+
} catch (e: unknown) {
|
|
559
|
+
const ge = groError("tool_error", `Tool "${fnName}" failed: ${asError(e).message}`, {
|
|
560
|
+
retryable: false,
|
|
561
|
+
cause: e,
|
|
562
|
+
});
|
|
563
|
+
Logger.error("Tool execution error:", errorLogFields(ge));
|
|
564
|
+
result = `Error: ${ge.message}`;
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
// Feed tool result back into memory
|
|
568
|
+
await memory.add({
|
|
569
|
+
role: "tool",
|
|
570
|
+
from: fnName,
|
|
571
|
+
content: result,
|
|
572
|
+
tool_call_id: tc.id,
|
|
573
|
+
name: fnName,
|
|
574
|
+
});
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
// If we exhausted maxToolRounds (loop didn't break via no-tool-calls),
|
|
579
|
+
// give the model one final turn with no tools so it can produce a closing response.
|
|
580
|
+
if (!brokeCleanly && tools.length > 0) {
|
|
581
|
+
Logger.debug("Max tool rounds reached — final turn with no tools");
|
|
582
|
+
const finalOutput: ChatOutput = await driver.chat(memory.messages(), {
|
|
583
|
+
model: cfg.model,
|
|
584
|
+
onToken,
|
|
585
|
+
});
|
|
586
|
+
if (finalOutput.text) finalText += finalOutput.text;
|
|
587
|
+
await memory.add({ role: "assistant", from: "Assistant", content: finalOutput.text || "" });
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
return finalText;
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
// ---------------------------------------------------------------------------
|
|
594
|
+
// Main modes
|
|
595
|
+
// ---------------------------------------------------------------------------
|
|
596
|
+
|
|
597
|
+
async function singleShot(
|
|
598
|
+
cfg: GroConfig,
|
|
599
|
+
driver: ChatDriver,
|
|
600
|
+
mcp: McpManager,
|
|
601
|
+
sessionId: string,
|
|
602
|
+
positionalArgs?: string[],
|
|
603
|
+
): Promise<void> {
|
|
604
|
+
let prompt = (positionalArgs || []).join(" ").trim();
|
|
605
|
+
|
|
606
|
+
if (!prompt && !process.stdin.isTTY) {
|
|
607
|
+
const chunks: Uint8Array[] = [];
|
|
608
|
+
for await (const chunk of process.stdin) {
|
|
609
|
+
chunks.push(chunk);
|
|
610
|
+
}
|
|
611
|
+
prompt = Buffer.concat(chunks).toString("utf-8").trim();
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
if (!prompt) {
|
|
615
|
+
Logger.error("gro: no prompt provided");
|
|
616
|
+
usage();
|
|
617
|
+
process.exit(1);
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
const memory = createMemory(cfg, driver);
|
|
621
|
+
|
|
622
|
+
// Resume existing session if requested
|
|
623
|
+
if (cfg.continueSession || cfg.resumeSession) {
|
|
624
|
+
await memory.load(sessionId);
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
await memory.add({ role: "user", from: "User", content: prompt });
|
|
628
|
+
|
|
629
|
+
let text: string | undefined;
|
|
630
|
+
try {
|
|
631
|
+
text = await executeTurn(driver, memory, mcp, cfg);
|
|
632
|
+
} catch (e: unknown) {
|
|
633
|
+
const ge = isGroError(e) ? e : groError("provider_error", asError(e).message, { cause: e });
|
|
634
|
+
Logger.error(C.red(`error: ${ge.message}`), errorLogFields(ge));
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
// Save session (even on error — preserve conversation state)
|
|
638
|
+
if (cfg.sessionPersistence) {
|
|
639
|
+
try {
|
|
640
|
+
await memory.save(sessionId);
|
|
641
|
+
} catch (e: unknown) {
|
|
642
|
+
Logger.error(C.red(`session save failed: ${asError(e).message}`));
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
if (text) {
|
|
647
|
+
if (cfg.outputFormat === "json") {
|
|
648
|
+
process.stdout.write(formatOutput(text, "json") + "\n");
|
|
649
|
+
} else if (!text.endsWith("\n")) {
|
|
650
|
+
process.stdout.write("\n");
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
async function interactive(
|
|
656
|
+
cfg: GroConfig,
|
|
657
|
+
driver: ChatDriver,
|
|
658
|
+
mcp: McpManager,
|
|
659
|
+
sessionId: string,
|
|
660
|
+
): Promise<void> {
|
|
661
|
+
const memory = createMemory(cfg, driver);
|
|
662
|
+
const readline = await import("readline");
|
|
663
|
+
|
|
664
|
+
// Resume existing session if requested
|
|
665
|
+
if (cfg.continueSession || cfg.resumeSession) {
|
|
666
|
+
await memory.load(sessionId);
|
|
667
|
+
const sess = loadSession(sessionId);
|
|
668
|
+
if (sess) {
|
|
669
|
+
const msgCount = sess.messages.filter((m: any) => m.role !== "system").length;
|
|
670
|
+
Logger.info(C.gray(`Resumed session ${sessionId} (${msgCount} messages)`));
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
const rl = readline.createInterface({
|
|
675
|
+
input: process.stdin,
|
|
676
|
+
output: process.stderr,
|
|
677
|
+
prompt: C.cyan("you > "),
|
|
678
|
+
});
|
|
679
|
+
|
|
680
|
+
const toolCount = mcp.getToolDefinitions().length;
|
|
681
|
+
Logger.info(C.gray(`gro interactive — ${cfg.provider}/${cfg.model} [${sessionId}]`));
|
|
682
|
+
if (cfg.summarizerModel) Logger.info(C.gray(`summarizer: ${cfg.summarizerModel}`));
|
|
683
|
+
if (toolCount > 0) Logger.info(C.gray(`${toolCount} MCP tool(s) available`));
|
|
684
|
+
Logger.info(C.gray("type 'exit' or Ctrl+D to quit\n"));
|
|
685
|
+
rl.prompt();
|
|
686
|
+
|
|
687
|
+
rl.on("line", async (line: string) => {
|
|
688
|
+
const input = line.trim();
|
|
689
|
+
if (!input) { rl.prompt(); return; }
|
|
690
|
+
if (input === "exit" || input === "quit") { rl.close(); return; }
|
|
691
|
+
|
|
692
|
+
try {
|
|
693
|
+
await memory.add({ role: "user", from: "User", content: input });
|
|
694
|
+
await executeTurn(driver, memory, mcp, cfg);
|
|
695
|
+
} catch (e: unknown) {
|
|
696
|
+
const ge = isGroError(e) ? e : groError("provider_error", asError(e).message, { cause: e });
|
|
697
|
+
Logger.error(C.red(`error: ${ge.message}`), errorLogFields(ge));
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
// Auto-save after each turn
|
|
701
|
+
if (cfg.sessionPersistence) {
|
|
702
|
+
try {
|
|
703
|
+
await memory.save(sessionId);
|
|
704
|
+
} catch (e: unknown) {
|
|
705
|
+
Logger.error(C.red(`session save failed: ${asError(e).message}`));
|
|
706
|
+
}
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
process.stdout.write("\n");
|
|
710
|
+
rl.prompt();
|
|
711
|
+
});
|
|
712
|
+
|
|
713
|
+
rl.on("error", (e: Error) => {
|
|
714
|
+
Logger.error(C.red(`readline error: ${e.message}`));
|
|
715
|
+
});
|
|
716
|
+
|
|
717
|
+
rl.on("close", async () => {
|
|
718
|
+
if (cfg.sessionPersistence) {
|
|
719
|
+
try {
|
|
720
|
+
await memory.save(sessionId);
|
|
721
|
+
} catch (e: unknown) {
|
|
722
|
+
Logger.error(C.red(`session save failed: ${asError(e).message}`));
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
await mcp.disconnectAll();
|
|
726
|
+
Logger.info(C.gray(`\ngoodbye. session: ${sessionId}`));
|
|
727
|
+
process.exit(0);
|
|
728
|
+
});
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
// ---------------------------------------------------------------------------
|
|
732
|
+
// Entry point
|
|
733
|
+
// ---------------------------------------------------------------------------
|
|
734
|
+
|
|
735
|
+
async function main() {
|
|
736
|
+
const cfg = loadConfig();
|
|
737
|
+
|
|
738
|
+
if (cfg.verbose) {
|
|
739
|
+
process.env.GRO_LOG_LEVEL = "debug";
|
|
740
|
+
}
|
|
741
|
+
|
|
742
|
+
// Resolve session ID
|
|
743
|
+
let sessionId: string;
|
|
744
|
+
if (cfg.continueSession) {
|
|
745
|
+
const latest = findLatestSession();
|
|
746
|
+
if (!latest) {
|
|
747
|
+
Logger.error("gro: no session to continue");
|
|
748
|
+
process.exit(1);
|
|
749
|
+
}
|
|
750
|
+
sessionId = latest;
|
|
751
|
+
Logger.debug(`Continuing session: ${sessionId}`);
|
|
752
|
+
} else if (cfg.resumeSession) {
|
|
753
|
+
if (cfg.resumeSession === "latest") {
|
|
754
|
+
const latest = findLatestSession();
|
|
755
|
+
if (!latest) {
|
|
756
|
+
Logger.error("gro: no session to resume");
|
|
757
|
+
process.exit(1);
|
|
758
|
+
}
|
|
759
|
+
sessionId = latest;
|
|
760
|
+
} else {
|
|
761
|
+
sessionId = cfg.resumeSession;
|
|
762
|
+
}
|
|
763
|
+
Logger.debug(`Resuming session: ${sessionId}`);
|
|
764
|
+
} else {
|
|
765
|
+
sessionId = newSessionId();
|
|
766
|
+
if (cfg.sessionPersistence) {
|
|
767
|
+
ensureGroDir();
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
const args = process.argv.slice(2);
|
|
772
|
+
const positional: string[] = [];
|
|
773
|
+
const flagsWithValues = [
|
|
774
|
+
"--provider", "-P", "--model", "-m", "--base-url",
|
|
775
|
+
"--system-prompt", "--system-prompt-file",
|
|
776
|
+
"--append-system-prompt", "--append-system-prompt-file",
|
|
777
|
+
"--context-tokens", "--max-tool-rounds", "--max-turns",
|
|
778
|
+
"--max-thinking-tokens", "--max-budget-usd",
|
|
779
|
+
"--summarizer-model", "--output-format", "--mcp-config",
|
|
780
|
+
"--resume", "-r",
|
|
781
|
+
];
|
|
782
|
+
for (let i = 0; i < args.length; i++) {
|
|
783
|
+
if (args[i].startsWith("-")) {
|
|
784
|
+
if (flagsWithValues.includes(args[i])) i++;
|
|
785
|
+
continue;
|
|
786
|
+
}
|
|
787
|
+
positional.push(args[i]);
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
const driver = createDriver(cfg);
|
|
791
|
+
|
|
792
|
+
// Connect to MCP servers
|
|
793
|
+
const mcp = new McpManager();
|
|
794
|
+
if (Object.keys(cfg.mcpServers).length > 0) {
|
|
795
|
+
await mcp.connectAll(cfg.mcpServers);
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
try {
|
|
799
|
+
if (cfg.interactive && positional.length === 0) {
|
|
800
|
+
await interactive(cfg, driver, mcp, sessionId);
|
|
801
|
+
} else {
|
|
802
|
+
await singleShot(cfg, driver, mcp, sessionId, positional);
|
|
803
|
+
await mcp.disconnectAll();
|
|
804
|
+
}
|
|
805
|
+
} catch (e: unknown) {
|
|
806
|
+
await mcp.disconnectAll();
|
|
807
|
+
throw e;
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
// Graceful shutdown on signals
|
|
812
|
+
for (const sig of ["SIGTERM", "SIGHUP"] as const) {
|
|
813
|
+
process.on(sig, () => {
|
|
814
|
+
Logger.info(C.gray(`\nreceived ${sig}, shutting down...`));
|
|
815
|
+
process.exit(0);
|
|
816
|
+
});
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
// Catch unhandled promise rejections (e.g. background summarization)
|
|
820
|
+
process.on("unhandledRejection", (reason: unknown) => {
|
|
821
|
+
Logger.error(C.red(`unhandled rejection: ${asError(reason).message}`));
|
|
822
|
+
});
|
|
823
|
+
|
|
824
|
+
main().catch((e: unknown) => {
|
|
825
|
+
Logger.error("gro:", asError(e).message);
|
|
826
|
+
process.exit(1);
|
|
827
|
+
});
|