@tjamescouch/gro 1.3.3 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/package.json +1 -1
- package/src/drivers/anthropic.ts +82 -21
- package/src/drivers/index.ts +1 -1
- package/src/drivers/streaming-openai.ts +16 -3
- package/src/drivers/types.ts +6 -0
- package/src/main.ts +44 -9
- package/src/mcp/client.ts +28 -12
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 James Couch
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/package.json
CHANGED
package/src/drivers/anthropic.ts
CHANGED
|
@@ -7,7 +7,7 @@ import { rateLimiter } from "../utils/rate-limiter.js";
|
|
|
7
7
|
import { timedFetch } from "../utils/timed-fetch.js";
|
|
8
8
|
import { MAX_RETRIES, isRetryable, retryDelay, sleep } from "../utils/retry.js";
|
|
9
9
|
import { groError, asError, isGroError, errorLogFields } from "../errors.js";
|
|
10
|
-
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
10
|
+
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall, TokenUsage } from "./types.js";
|
|
11
11
|
|
|
12
12
|
export interface AnthropicDriverConfig {
|
|
13
13
|
apiKey: string;
|
|
@@ -107,6 +107,40 @@ function convertMessages(messages: ChatMessage[]): { system: string | undefined;
|
|
|
107
107
|
return { system: systemPrompt, apiMessages };
|
|
108
108
|
}
|
|
109
109
|
|
|
110
|
+
/** Pattern matching transient network errors that should be retried */
|
|
111
|
+
const TRANSIENT_ERROR_RE = /fetch timeout|fetch failed|ECONNREFUSED|ECONNRESET|ETIMEDOUT|ENETUNREACH|EAI_AGAIN|socket hang up/i;
|
|
112
|
+
|
|
113
|
+
/** Parse response content blocks into text + tool calls + token usage */
|
|
114
|
+
function parseResponseContent(data: any, onToken?: (t: string) => void): ChatOutput {
|
|
115
|
+
let text = "";
|
|
116
|
+
const toolCalls: ChatToolCall[] = [];
|
|
117
|
+
|
|
118
|
+
for (const block of data.content ?? []) {
|
|
119
|
+
if (block.type === "text") {
|
|
120
|
+
text += block.text;
|
|
121
|
+
if (onToken) {
|
|
122
|
+
try { onToken(block.text); } catch {}
|
|
123
|
+
}
|
|
124
|
+
} else if (block.type === "tool_use") {
|
|
125
|
+
toolCalls.push({
|
|
126
|
+
id: block.id,
|
|
127
|
+
type: "custom",
|
|
128
|
+
function: {
|
|
129
|
+
name: block.name,
|
|
130
|
+
arguments: JSON.stringify(block.input),
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const usage: TokenUsage | undefined = data.usage ? {
|
|
137
|
+
inputTokens: data.usage.input_tokens ?? 0,
|
|
138
|
+
outputTokens: data.usage.output_tokens ?? 0,
|
|
139
|
+
} : undefined;
|
|
140
|
+
|
|
141
|
+
return { text, toolCalls, usage };
|
|
142
|
+
}
|
|
143
|
+
|
|
110
144
|
export function makeAnthropicDriver(cfg: AnthropicDriverConfig): ChatDriver {
|
|
111
145
|
const base = (cfg.baseUrl ?? "https://api.anthropic.com").replace(/\/+$/, "");
|
|
112
146
|
const endpoint = `${base}/v1/messages`;
|
|
@@ -179,32 +213,59 @@ export function makeAnthropicDriver(cfg: AnthropicDriverConfig): ChatDriver {
|
|
|
179
213
|
}
|
|
180
214
|
|
|
181
215
|
const data = await res.json() as any;
|
|
216
|
+
return parseResponseContent(data, onToken);
|
|
217
|
+
} catch (e: unknown) {
|
|
218
|
+
if (isGroError(e)) throw e; // already wrapped above
|
|
219
|
+
|
|
220
|
+
// Classify the error: fetch timeouts and network errors are transient
|
|
221
|
+
const errMsg = asError(e).message;
|
|
222
|
+
const isTransient = TRANSIENT_ERROR_RE.test(errMsg);
|
|
223
|
+
|
|
224
|
+
if (isTransient) {
|
|
225
|
+
// Retry transient network errors (e.g. auth proxy down during container restart)
|
|
226
|
+
for (let attempt = 0; attempt < MAX_RETRIES; attempt++) {
|
|
227
|
+
const delay = retryDelay(attempt);
|
|
228
|
+
Logger.warn(`Transient error: ${errMsg.substring(0, 120)}, retry ${attempt + 1}/${MAX_RETRIES} in ${Math.round(delay)}ms`);
|
|
229
|
+
await sleep(delay);
|
|
230
|
+
|
|
231
|
+
try {
|
|
232
|
+
const retryRes = await timedFetch(endpoint, {
|
|
233
|
+
method: "POST",
|
|
234
|
+
headers,
|
|
235
|
+
body: JSON.stringify(body),
|
|
236
|
+
where: "driver:anthropic",
|
|
237
|
+
timeoutMs,
|
|
238
|
+
});
|
|
182
239
|
|
|
183
|
-
|
|
184
|
-
|
|
240
|
+
if (!retryRes.ok) {
|
|
241
|
+
const text = await retryRes.text().catch(() => "");
|
|
242
|
+
if (isRetryable(retryRes.status) && attempt < MAX_RETRIES - 1) continue;
|
|
243
|
+
throw groError("provider_error", `Anthropic API failed (${retryRes.status}): ${text}`, {
|
|
244
|
+
provider: "anthropic", model: resolvedModel, retryable: false, cause: new Error(text),
|
|
245
|
+
});
|
|
246
|
+
}
|
|
185
247
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
248
|
+
// Success on retry — parse and return
|
|
249
|
+
const data = await retryRes.json() as any;
|
|
250
|
+
Logger.info(`Recovered from transient error after ${attempt + 1} retries`);
|
|
251
|
+
return parseResponseContent(data, onToken);
|
|
252
|
+
} catch (retryErr: unknown) {
|
|
253
|
+
if (isGroError(retryErr)) throw retryErr;
|
|
254
|
+
if (attempt === MAX_RETRIES - 1) {
|
|
255
|
+
// Exhausted retries — throw with context
|
|
256
|
+
const ge = groError("provider_error", `Anthropic driver error (after ${MAX_RETRIES} retries): ${errMsg}`, {
|
|
257
|
+
provider: "anthropic", model: resolvedModel, request_id: requestId,
|
|
258
|
+
retryable: false, cause: e,
|
|
259
|
+
});
|
|
260
|
+
Logger.error("Anthropic driver error (retries exhausted):", errorLogFields(ge));
|
|
261
|
+
throw ge;
|
|
262
|
+
}
|
|
191
263
|
}
|
|
192
|
-
} else if (block.type === "tool_use") {
|
|
193
|
-
toolCalls.push({
|
|
194
|
-
id: block.id,
|
|
195
|
-
type: "custom",
|
|
196
|
-
function: {
|
|
197
|
-
name: block.name,
|
|
198
|
-
arguments: JSON.stringify(block.input),
|
|
199
|
-
},
|
|
200
|
-
});
|
|
201
264
|
}
|
|
202
265
|
}
|
|
203
266
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
if (isGroError(e)) throw e; // already wrapped above
|
|
207
|
-
const ge = groError("provider_error", `Anthropic driver error: ${asError(e).message}`, {
|
|
267
|
+
// Non-transient error — throw immediately
|
|
268
|
+
const ge = groError("provider_error", `Anthropic driver error: ${errMsg}`, {
|
|
208
269
|
provider: "anthropic",
|
|
209
270
|
model: resolvedModel,
|
|
210
271
|
request_id: requestId,
|
package/src/drivers/index.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
1
|
+
export type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall, TokenUsage } from "./types.js";
|
|
2
2
|
export { makeStreamingOpenAiDriver } from "./streaming-openai.js";
|
|
3
3
|
export type { OpenAiDriverConfig } from "./streaming-openai.js";
|
|
4
4
|
export { makeAnthropicDriver } from "./anthropic.js";
|
|
@@ -7,7 +7,7 @@ import { asError } from "../errors.js";
|
|
|
7
7
|
import { rateLimiter } from "../utils/rate-limiter.js";
|
|
8
8
|
import { timedFetch } from "../utils/timed-fetch.js";
|
|
9
9
|
import { MAX_RETRIES, isRetryable, retryDelay, sleep } from "../utils/retry.js";
|
|
10
|
-
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
10
|
+
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall, TokenUsage } from "./types.js";
|
|
11
11
|
|
|
12
12
|
export interface OpenAiDriverConfig {
|
|
13
13
|
baseUrl: string;
|
|
@@ -108,7 +108,11 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
108
108
|
const content = typeof msg?.content === "string" ? msg.content : "";
|
|
109
109
|
const toolCalls: ChatToolCall[] = Array.isArray(msg?.tool_calls) ? msg.tool_calls : [];
|
|
110
110
|
if (content && onToken) onToken(content);
|
|
111
|
-
|
|
111
|
+
const usage: TokenUsage | undefined = data?.usage ? {
|
|
112
|
+
inputTokens: data.usage.prompt_tokens ?? 0,
|
|
113
|
+
outputTokens: data.usage.completion_tokens ?? 0,
|
|
114
|
+
} : undefined;
|
|
115
|
+
return { text: content, reasoning: msg?.reasoning || undefined, toolCalls, usage };
|
|
112
116
|
}
|
|
113
117
|
|
|
114
118
|
// SSE streaming
|
|
@@ -117,6 +121,7 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
117
121
|
let buf = "";
|
|
118
122
|
let fullText = "";
|
|
119
123
|
let fullReasoning = "";
|
|
124
|
+
let streamUsage: TokenUsage | undefined;
|
|
120
125
|
const toolByIndex = new Map<number, ChatToolCall>();
|
|
121
126
|
|
|
122
127
|
const pumpEvent = async (rawEvent: string) => {
|
|
@@ -133,6 +138,14 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
133
138
|
let payload: any;
|
|
134
139
|
try { payload = JSON.parse(joined); } catch { return; }
|
|
135
140
|
|
|
141
|
+
// Capture usage from final streaming chunk (if stream_options.include_usage was set)
|
|
142
|
+
if (payload?.usage) {
|
|
143
|
+
streamUsage = {
|
|
144
|
+
inputTokens: payload.usage.prompt_tokens ?? 0,
|
|
145
|
+
outputTokens: payload.usage.completion_tokens ?? 0,
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
|
|
136
149
|
const delta = payload?.choices?.[0]?.delta;
|
|
137
150
|
if (!delta) return;
|
|
138
151
|
|
|
@@ -230,7 +243,7 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
230
243
|
.sort((a, b) => a[0] - b[0])
|
|
231
244
|
.map(([, v]) => v);
|
|
232
245
|
|
|
233
|
-
return { text: fullText, reasoning: fullReasoning || undefined, toolCalls };
|
|
246
|
+
return { text: fullText, reasoning: fullReasoning || undefined, toolCalls, usage: streamUsage };
|
|
234
247
|
} catch (e: unknown) {
|
|
235
248
|
const wrapped = asError(e);
|
|
236
249
|
if (wrapped.name === "AbortError") Logger.debug("timeout(stream)", { ms: defaultTimeout });
|
package/src/drivers/types.ts
CHANGED
|
@@ -16,10 +16,16 @@ export interface ChatToolCall {
|
|
|
16
16
|
raw?: string;
|
|
17
17
|
}
|
|
18
18
|
|
|
19
|
+
export interface TokenUsage {
|
|
20
|
+
inputTokens: number;
|
|
21
|
+
outputTokens: number;
|
|
22
|
+
}
|
|
23
|
+
|
|
19
24
|
export interface ChatOutput {
|
|
20
25
|
text: string;
|
|
21
26
|
toolCalls: ChatToolCall[];
|
|
22
27
|
reasoning?: string;
|
|
28
|
+
usage?: TokenUsage;
|
|
23
29
|
}
|
|
24
30
|
|
|
25
31
|
export interface ChatDriver {
|
package/src/main.ts
CHANGED
|
@@ -21,7 +21,7 @@ import { McpManager } from "./mcp/index.js";
|
|
|
21
21
|
import { newSessionId, findLatestSession, loadSession, ensureGroDir } from "./session.js";
|
|
22
22
|
import { groError, asError, isGroError, errorLogFields } from "./errors.js";
|
|
23
23
|
import type { McpServerConfig } from "./mcp/index.js";
|
|
24
|
-
import type { ChatDriver, ChatMessage, ChatOutput } from "./drivers/types.js";
|
|
24
|
+
import type { ChatDriver, ChatMessage, ChatOutput, TokenUsage } from "./drivers/types.js";
|
|
25
25
|
import type { AgentMemory } from "./memory/agent-memory.js";
|
|
26
26
|
import { bashToolDefinition, executeBash } from "./tools/bash.js";
|
|
27
27
|
import { agentpatchToolDefinition, executeAgentpatch } from "./tools/agentpatch.js";
|
|
@@ -55,6 +55,7 @@ interface GroConfig {
|
|
|
55
55
|
wakeNotes: string;
|
|
56
56
|
wakeNotesEnabled: boolean;
|
|
57
57
|
contextTokens: number;
|
|
58
|
+
maxTokens: number;
|
|
58
59
|
interactive: boolean;
|
|
59
60
|
print: boolean;
|
|
60
61
|
maxToolRounds: number;
|
|
@@ -168,6 +169,7 @@ function loadConfig(): GroConfig {
|
|
|
168
169
|
else if (arg === "--wake-notes") { flags.wakeNotes = args[++i]; }
|
|
169
170
|
else if (arg === "--no-wake-notes") { flags.noWakeNotes = "true"; }
|
|
170
171
|
else if (arg === "--context-tokens") { flags.contextTokens = args[++i]; }
|
|
172
|
+
else if (arg === "--max-tokens") { flags.maxTokens = args[++i]; }
|
|
171
173
|
else if (arg === "--max-tool-rounds" || arg === "--max-turns") { flags.maxToolRounds = args[++i]; }
|
|
172
174
|
else if (arg === "--bash") { flags.bash = "true"; }
|
|
173
175
|
else if (arg === "--persistent" || arg === "--keep-alive") { flags.persistent = "true"; }
|
|
@@ -287,6 +289,7 @@ ${systemPrompt}` : wake;
|
|
|
287
289
|
wakeNotes: flags.wakeNotes || WAKE_NOTES_DEFAULT_PATH,
|
|
288
290
|
wakeNotesEnabled: flags.noWakeNotes !== "true",
|
|
289
291
|
contextTokens: parseInt(flags.contextTokens || "8192"),
|
|
292
|
+
maxTokens: parseInt(flags.maxTokens || "16384"),
|
|
290
293
|
interactive: interactiveMode,
|
|
291
294
|
print: printMode,
|
|
292
295
|
maxToolRounds: parseInt(flags.maxToolRounds || "10"),
|
|
@@ -361,6 +364,7 @@ options:
|
|
|
361
364
|
--wake-notes path to wake notes file (default: ~/.claude/WAKE.md)
|
|
362
365
|
--no-wake-notes disable auto-prepending wake notes
|
|
363
366
|
--context-tokens context window budget (default: 8192)
|
|
367
|
+
--max-tokens max response tokens per turn (default: 16384)
|
|
364
368
|
--max-turns max agentic rounds per turn (default: 10)
|
|
365
369
|
--max-tool-rounds alias for --max-turns
|
|
366
370
|
--bash enable built-in bash tool for shell command execution
|
|
@@ -391,6 +395,7 @@ function createDriverForModel(
|
|
|
391
395
|
model: string,
|
|
392
396
|
apiKey: string,
|
|
393
397
|
baseUrl: string,
|
|
398
|
+
maxTokens?: number,
|
|
394
399
|
): ChatDriver {
|
|
395
400
|
switch (provider) {
|
|
396
401
|
case "anthropic":
|
|
@@ -398,7 +403,7 @@ function createDriverForModel(
|
|
|
398
403
|
Logger.error("gro: ANTHROPIC_API_KEY not set (set ANTHROPIC_BASE_URL for proxy mode)");
|
|
399
404
|
process.exit(1);
|
|
400
405
|
}
|
|
401
|
-
return makeAnthropicDriver({ apiKey: apiKey || "proxy-managed", model, baseUrl });
|
|
406
|
+
return makeAnthropicDriver({ apiKey: apiKey || "proxy-managed", model, baseUrl, maxTokens });
|
|
402
407
|
|
|
403
408
|
case "openai":
|
|
404
409
|
if (!apiKey && baseUrl === "https://api.openai.com") {
|
|
@@ -418,7 +423,7 @@ function createDriverForModel(
|
|
|
418
423
|
}
|
|
419
424
|
|
|
420
425
|
function createDriver(cfg: GroConfig): ChatDriver {
|
|
421
|
-
return createDriverForModel(cfg.provider, cfg.model, cfg.apiKey, cfg.baseUrl);
|
|
426
|
+
return createDriverForModel(cfg.provider, cfg.model, cfg.apiKey, cfg.baseUrl, cfg.maxTokens);
|
|
422
427
|
}
|
|
423
428
|
|
|
424
429
|
// ---------------------------------------------------------------------------
|
|
@@ -493,6 +498,8 @@ async function executeTurn(
|
|
|
493
498
|
tools.push(agentpatchToolDefinition());
|
|
494
499
|
if (cfg.bash) tools.push(bashToolDefinition());
|
|
495
500
|
let finalText = "";
|
|
501
|
+
let turnTokensIn = 0;
|
|
502
|
+
let turnTokensOut = 0;
|
|
496
503
|
|
|
497
504
|
const onToken = cfg.outputFormat === "stream-json"
|
|
498
505
|
? (t: string) => process.stdout.write(JSON.stringify({ type: "token", token: t }) + "\n")
|
|
@@ -507,6 +514,14 @@ async function executeTurn(
|
|
|
507
514
|
onToken,
|
|
508
515
|
});
|
|
509
516
|
|
|
517
|
+
// Track token usage for niki budget enforcement
|
|
518
|
+
if (output.usage) {
|
|
519
|
+
turnTokensIn += output.usage.inputTokens;
|
|
520
|
+
turnTokensOut += output.usage.outputTokens;
|
|
521
|
+
// Log cumulative usage to stderr — niki parses these patterns for budget enforcement
|
|
522
|
+
process.stderr.write(`"input_tokens": ${turnTokensIn}, "output_tokens": ${turnTokensOut}\n`);
|
|
523
|
+
}
|
|
524
|
+
|
|
510
525
|
// Accumulate text
|
|
511
526
|
if (output.text) finalText += output.text;
|
|
512
527
|
|
|
@@ -551,7 +566,8 @@ async function executeTurn(
|
|
|
551
566
|
let fnArgs: Record<string, any>;
|
|
552
567
|
try {
|
|
553
568
|
fnArgs = JSON.parse(tc.function.arguments);
|
|
554
|
-
} catch {
|
|
569
|
+
} catch (e: unknown) {
|
|
570
|
+
Logger.debug(`Failed to parse args for ${fnName}: ${asError(e).message}, using empty args`);
|
|
555
571
|
fnArgs = {};
|
|
556
572
|
}
|
|
557
573
|
|
|
@@ -567,12 +583,14 @@ async function executeTurn(
|
|
|
567
583
|
result = await mcp.callTool(fnName, fnArgs);
|
|
568
584
|
}
|
|
569
585
|
} catch (e: unknown) {
|
|
570
|
-
const
|
|
586
|
+
const raw = asError(e);
|
|
587
|
+
const ge = groError("tool_error", `Tool "${fnName}" failed: ${raw.message}`, {
|
|
571
588
|
retryable: false,
|
|
572
589
|
cause: e,
|
|
573
590
|
});
|
|
574
591
|
Logger.error("Tool execution error:", errorLogFields(ge));
|
|
575
|
-
|
|
592
|
+
if (raw.stack) Logger.error(raw.stack);
|
|
593
|
+
result = `Error: ${ge.message}${raw.stack ? '\nStack: ' + raw.stack : ''}`;
|
|
576
594
|
}
|
|
577
595
|
|
|
578
596
|
// Feed tool result back into memory
|
|
@@ -604,6 +622,11 @@ async function executeTurn(
|
|
|
604
622
|
model: cfg.model,
|
|
605
623
|
onToken,
|
|
606
624
|
});
|
|
625
|
+
if (finalOutput.usage) {
|
|
626
|
+
turnTokensIn += finalOutput.usage.inputTokens;
|
|
627
|
+
turnTokensOut += finalOutput.usage.outputTokens;
|
|
628
|
+
process.stderr.write(`"input_tokens": ${turnTokensIn}, "output_tokens": ${turnTokensOut}\n`);
|
|
629
|
+
}
|
|
607
630
|
if (finalOutput.text) finalText += finalOutput.text;
|
|
608
631
|
await memory.add({ role: "assistant", from: "Assistant", content: finalOutput.text || "" });
|
|
609
632
|
}
|
|
@@ -653,11 +676,13 @@ async function singleShot(
|
|
|
653
676
|
await memory.add({ role: "user", from: "User", content: prompt });
|
|
654
677
|
|
|
655
678
|
let text: string | undefined;
|
|
679
|
+
let fatalError = false;
|
|
656
680
|
try {
|
|
657
681
|
text = await executeTurn(driver, memory, mcp, cfg, sessionId);
|
|
658
682
|
} catch (e: unknown) {
|
|
659
683
|
const ge = isGroError(e) ? e : groError("provider_error", asError(e).message, { cause: e });
|
|
660
684
|
Logger.error(C.red(`error: ${ge.message}`), errorLogFields(ge));
|
|
685
|
+
fatalError = true;
|
|
661
686
|
}
|
|
662
687
|
|
|
663
688
|
// Save session (even on error — preserve conversation state)
|
|
@@ -669,6 +694,12 @@ async function singleShot(
|
|
|
669
694
|
}
|
|
670
695
|
}
|
|
671
696
|
|
|
697
|
+
// Exit with non-zero code on fatal API errors so the supervisor
|
|
698
|
+
// can distinguish "finished cleanly" from "crashed on API call"
|
|
699
|
+
if (fatalError) {
|
|
700
|
+
process.exit(1);
|
|
701
|
+
}
|
|
702
|
+
|
|
672
703
|
if (text) {
|
|
673
704
|
if (cfg.outputFormat === "json") {
|
|
674
705
|
process.stdout.write(formatOutput(text, "json") + "\n");
|
|
@@ -805,7 +836,7 @@ async function main() {
|
|
|
805
836
|
"--provider", "-P", "--model", "-m", "--base-url",
|
|
806
837
|
"--system-prompt", "--system-prompt-file",
|
|
807
838
|
"--append-system-prompt", "--append-system-prompt-file",
|
|
808
|
-
"--context-tokens", "--max-tool-rounds", "--max-turns",
|
|
839
|
+
"--context-tokens", "--max-tokens", "--max-tool-rounds", "--max-turns",
|
|
809
840
|
"--max-thinking-tokens", "--max-budget-usd",
|
|
810
841
|
"--summarizer-model", "--output-format", "--mcp-config",
|
|
811
842
|
"--resume", "-r",
|
|
@@ -857,10 +888,14 @@ for (const sig of ["SIGTERM", "SIGHUP"] as const) {
|
|
|
857
888
|
|
|
858
889
|
// Catch unhandled promise rejections (e.g. background summarization)
|
|
859
890
|
process.on("unhandledRejection", (reason: unknown) => {
|
|
860
|
-
|
|
891
|
+
const err = asError(reason);
|
|
892
|
+
Logger.error(C.red(`unhandled rejection: ${err.message}`));
|
|
893
|
+
if (err.stack) Logger.error(C.red(err.stack));
|
|
861
894
|
});
|
|
862
895
|
|
|
863
896
|
main().catch((e: unknown) => {
|
|
864
|
-
|
|
897
|
+
const err = asError(e);
|
|
898
|
+
Logger.error("gro:", err.message);
|
|
899
|
+
if (err.stack) Logger.error(err.stack);
|
|
865
900
|
process.exit(1);
|
|
866
901
|
});
|
package/src/mcp/client.ts
CHANGED
|
@@ -109,20 +109,32 @@ export class McpManager {
|
|
|
109
109
|
for (const server of this.servers.values()) {
|
|
110
110
|
const tool = server.tools.find(t => t.name === name);
|
|
111
111
|
if (tool) {
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
.
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
112
|
+
try {
|
|
113
|
+
const result = await server.client.callTool({ name, arguments: args }, undefined, { timeout: 5 * 60 * 1000 });
|
|
114
|
+
// Extract text content from result
|
|
115
|
+
if (Array.isArray(result.content)) {
|
|
116
|
+
return result.content
|
|
117
|
+
.map((c: any) => {
|
|
118
|
+
if (c.type === "text") return c.text;
|
|
119
|
+
return JSON.stringify(c);
|
|
120
|
+
})
|
|
121
|
+
.join("\n");
|
|
122
|
+
}
|
|
123
|
+
return JSON.stringify(result);
|
|
124
|
+
} catch (e: unknown) {
|
|
125
|
+
const err = asError(e);
|
|
126
|
+
const ge = groError("mcp_error", `MCP tool "${name}" (server: ${server.name}) failed: ${err.message}`, {
|
|
127
|
+
retryable: true,
|
|
128
|
+
cause: e,
|
|
129
|
+
});
|
|
130
|
+
Logger.error(`MCP tool call failed [${server.name}/${name}]:`, errorLogFields(ge));
|
|
131
|
+
throw ge;
|
|
121
132
|
}
|
|
122
|
-
return JSON.stringify(result);
|
|
123
133
|
}
|
|
124
134
|
}
|
|
125
|
-
|
|
135
|
+
const ge = groError("mcp_error", `No MCP server provides tool "${name}"`, { retryable: false });
|
|
136
|
+
Logger.error(ge.message, errorLogFields(ge));
|
|
137
|
+
throw ge;
|
|
126
138
|
}
|
|
127
139
|
|
|
128
140
|
/**
|
|
@@ -140,7 +152,11 @@ export class McpManager {
|
|
|
140
152
|
*/
|
|
141
153
|
async disconnectAll(): Promise<void> {
|
|
142
154
|
for (const server of this.servers.values()) {
|
|
143
|
-
try {
|
|
155
|
+
try {
|
|
156
|
+
await server.client.close();
|
|
157
|
+
} catch (e: unknown) {
|
|
158
|
+
Logger.debug(`MCP server "${server.name}" close error: ${asError(e).message}`);
|
|
159
|
+
}
|
|
144
160
|
}
|
|
145
161
|
this.servers.clear();
|
|
146
162
|
}
|