@tjamescouch/gro 1.3.3 → 1.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/package.json +1 -1
- package/src/drivers/anthropic.ts +82 -21
- package/src/drivers/index.ts +1 -1
- package/src/drivers/streaming-openai.ts +16 -3
- package/src/drivers/types.ts +6 -0
- package/src/main.ts +49 -10
- package/src/mcp/client.ts +28 -12
- package/src/tools/version.ts +98 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 James Couch
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/package.json
CHANGED
package/src/drivers/anthropic.ts
CHANGED
|
@@ -7,7 +7,7 @@ import { rateLimiter } from "../utils/rate-limiter.js";
|
|
|
7
7
|
import { timedFetch } from "../utils/timed-fetch.js";
|
|
8
8
|
import { MAX_RETRIES, isRetryable, retryDelay, sleep } from "../utils/retry.js";
|
|
9
9
|
import { groError, asError, isGroError, errorLogFields } from "../errors.js";
|
|
10
|
-
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
10
|
+
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall, TokenUsage } from "./types.js";
|
|
11
11
|
|
|
12
12
|
export interface AnthropicDriverConfig {
|
|
13
13
|
apiKey: string;
|
|
@@ -107,6 +107,40 @@ function convertMessages(messages: ChatMessage[]): { system: string | undefined;
|
|
|
107
107
|
return { system: systemPrompt, apiMessages };
|
|
108
108
|
}
|
|
109
109
|
|
|
110
|
+
/** Pattern matching transient network errors that should be retried */
|
|
111
|
+
const TRANSIENT_ERROR_RE = /fetch timeout|fetch failed|ECONNREFUSED|ECONNRESET|ETIMEDOUT|ENETUNREACH|EAI_AGAIN|socket hang up/i;
|
|
112
|
+
|
|
113
|
+
/** Parse response content blocks into text + tool calls + token usage */
|
|
114
|
+
function parseResponseContent(data: any, onToken?: (t: string) => void): ChatOutput {
|
|
115
|
+
let text = "";
|
|
116
|
+
const toolCalls: ChatToolCall[] = [];
|
|
117
|
+
|
|
118
|
+
for (const block of data.content ?? []) {
|
|
119
|
+
if (block.type === "text") {
|
|
120
|
+
text += block.text;
|
|
121
|
+
if (onToken) {
|
|
122
|
+
try { onToken(block.text); } catch {}
|
|
123
|
+
}
|
|
124
|
+
} else if (block.type === "tool_use") {
|
|
125
|
+
toolCalls.push({
|
|
126
|
+
id: block.id,
|
|
127
|
+
type: "custom",
|
|
128
|
+
function: {
|
|
129
|
+
name: block.name,
|
|
130
|
+
arguments: JSON.stringify(block.input),
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const usage: TokenUsage | undefined = data.usage ? {
|
|
137
|
+
inputTokens: data.usage.input_tokens ?? 0,
|
|
138
|
+
outputTokens: data.usage.output_tokens ?? 0,
|
|
139
|
+
} : undefined;
|
|
140
|
+
|
|
141
|
+
return { text, toolCalls, usage };
|
|
142
|
+
}
|
|
143
|
+
|
|
110
144
|
export function makeAnthropicDriver(cfg: AnthropicDriverConfig): ChatDriver {
|
|
111
145
|
const base = (cfg.baseUrl ?? "https://api.anthropic.com").replace(/\/+$/, "");
|
|
112
146
|
const endpoint = `${base}/v1/messages`;
|
|
@@ -179,32 +213,59 @@ export function makeAnthropicDriver(cfg: AnthropicDriverConfig): ChatDriver {
|
|
|
179
213
|
}
|
|
180
214
|
|
|
181
215
|
const data = await res.json() as any;
|
|
216
|
+
return parseResponseContent(data, onToken);
|
|
217
|
+
} catch (e: unknown) {
|
|
218
|
+
if (isGroError(e)) throw e; // already wrapped above
|
|
219
|
+
|
|
220
|
+
// Classify the error: fetch timeouts and network errors are transient
|
|
221
|
+
const errMsg = asError(e).message;
|
|
222
|
+
const isTransient = TRANSIENT_ERROR_RE.test(errMsg);
|
|
223
|
+
|
|
224
|
+
if (isTransient) {
|
|
225
|
+
// Retry transient network errors (e.g. auth proxy down during container restart)
|
|
226
|
+
for (let attempt = 0; attempt < MAX_RETRIES; attempt++) {
|
|
227
|
+
const delay = retryDelay(attempt);
|
|
228
|
+
Logger.warn(`Transient error: ${errMsg.substring(0, 120)}, retry ${attempt + 1}/${MAX_RETRIES} in ${Math.round(delay)}ms`);
|
|
229
|
+
await sleep(delay);
|
|
230
|
+
|
|
231
|
+
try {
|
|
232
|
+
const retryRes = await timedFetch(endpoint, {
|
|
233
|
+
method: "POST",
|
|
234
|
+
headers,
|
|
235
|
+
body: JSON.stringify(body),
|
|
236
|
+
where: "driver:anthropic",
|
|
237
|
+
timeoutMs,
|
|
238
|
+
});
|
|
182
239
|
|
|
183
|
-
|
|
184
|
-
|
|
240
|
+
if (!retryRes.ok) {
|
|
241
|
+
const text = await retryRes.text().catch(() => "");
|
|
242
|
+
if (isRetryable(retryRes.status) && attempt < MAX_RETRIES - 1) continue;
|
|
243
|
+
throw groError("provider_error", `Anthropic API failed (${retryRes.status}): ${text}`, {
|
|
244
|
+
provider: "anthropic", model: resolvedModel, retryable: false, cause: new Error(text),
|
|
245
|
+
});
|
|
246
|
+
}
|
|
185
247
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
248
|
+
// Success on retry — parse and return
|
|
249
|
+
const data = await retryRes.json() as any;
|
|
250
|
+
Logger.info(`Recovered from transient error after ${attempt + 1} retries`);
|
|
251
|
+
return parseResponseContent(data, onToken);
|
|
252
|
+
} catch (retryErr: unknown) {
|
|
253
|
+
if (isGroError(retryErr)) throw retryErr;
|
|
254
|
+
if (attempt === MAX_RETRIES - 1) {
|
|
255
|
+
// Exhausted retries — throw with context
|
|
256
|
+
const ge = groError("provider_error", `Anthropic driver error (after ${MAX_RETRIES} retries): ${errMsg}`, {
|
|
257
|
+
provider: "anthropic", model: resolvedModel, request_id: requestId,
|
|
258
|
+
retryable: false, cause: e,
|
|
259
|
+
});
|
|
260
|
+
Logger.error("Anthropic driver error (retries exhausted):", errorLogFields(ge));
|
|
261
|
+
throw ge;
|
|
262
|
+
}
|
|
191
263
|
}
|
|
192
|
-
} else if (block.type === "tool_use") {
|
|
193
|
-
toolCalls.push({
|
|
194
|
-
id: block.id,
|
|
195
|
-
type: "custom",
|
|
196
|
-
function: {
|
|
197
|
-
name: block.name,
|
|
198
|
-
arguments: JSON.stringify(block.input),
|
|
199
|
-
},
|
|
200
|
-
});
|
|
201
264
|
}
|
|
202
265
|
}
|
|
203
266
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
if (isGroError(e)) throw e; // already wrapped above
|
|
207
|
-
const ge = groError("provider_error", `Anthropic driver error: ${asError(e).message}`, {
|
|
267
|
+
// Non-transient error — throw immediately
|
|
268
|
+
const ge = groError("provider_error", `Anthropic driver error: ${errMsg}`, {
|
|
208
269
|
provider: "anthropic",
|
|
209
270
|
model: resolvedModel,
|
|
210
271
|
request_id: requestId,
|
package/src/drivers/index.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
1
|
+
export type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall, TokenUsage } from "./types.js";
|
|
2
2
|
export { makeStreamingOpenAiDriver } from "./streaming-openai.js";
|
|
3
3
|
export type { OpenAiDriverConfig } from "./streaming-openai.js";
|
|
4
4
|
export { makeAnthropicDriver } from "./anthropic.js";
|
|
@@ -7,7 +7,7 @@ import { asError } from "../errors.js";
|
|
|
7
7
|
import { rateLimiter } from "../utils/rate-limiter.js";
|
|
8
8
|
import { timedFetch } from "../utils/timed-fetch.js";
|
|
9
9
|
import { MAX_RETRIES, isRetryable, retryDelay, sleep } from "../utils/retry.js";
|
|
10
|
-
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall } from "./types.js";
|
|
10
|
+
import type { ChatDriver, ChatMessage, ChatOutput, ChatToolCall, TokenUsage } from "./types.js";
|
|
11
11
|
|
|
12
12
|
export interface OpenAiDriverConfig {
|
|
13
13
|
baseUrl: string;
|
|
@@ -108,7 +108,11 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
108
108
|
const content = typeof msg?.content === "string" ? msg.content : "";
|
|
109
109
|
const toolCalls: ChatToolCall[] = Array.isArray(msg?.tool_calls) ? msg.tool_calls : [];
|
|
110
110
|
if (content && onToken) onToken(content);
|
|
111
|
-
|
|
111
|
+
const usage: TokenUsage | undefined = data?.usage ? {
|
|
112
|
+
inputTokens: data.usage.prompt_tokens ?? 0,
|
|
113
|
+
outputTokens: data.usage.completion_tokens ?? 0,
|
|
114
|
+
} : undefined;
|
|
115
|
+
return { text: content, reasoning: msg?.reasoning || undefined, toolCalls, usage };
|
|
112
116
|
}
|
|
113
117
|
|
|
114
118
|
// SSE streaming
|
|
@@ -117,6 +121,7 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
117
121
|
let buf = "";
|
|
118
122
|
let fullText = "";
|
|
119
123
|
let fullReasoning = "";
|
|
124
|
+
let streamUsage: TokenUsage | undefined;
|
|
120
125
|
const toolByIndex = new Map<number, ChatToolCall>();
|
|
121
126
|
|
|
122
127
|
const pumpEvent = async (rawEvent: string) => {
|
|
@@ -133,6 +138,14 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
133
138
|
let payload: any;
|
|
134
139
|
try { payload = JSON.parse(joined); } catch { return; }
|
|
135
140
|
|
|
141
|
+
// Capture usage from final streaming chunk (if stream_options.include_usage was set)
|
|
142
|
+
if (payload?.usage) {
|
|
143
|
+
streamUsage = {
|
|
144
|
+
inputTokens: payload.usage.prompt_tokens ?? 0,
|
|
145
|
+
outputTokens: payload.usage.completion_tokens ?? 0,
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
|
|
136
149
|
const delta = payload?.choices?.[0]?.delta;
|
|
137
150
|
if (!delta) return;
|
|
138
151
|
|
|
@@ -230,7 +243,7 @@ export function makeStreamingOpenAiDriver(cfg: OpenAiDriverConfig): ChatDriver {
|
|
|
230
243
|
.sort((a, b) => a[0] - b[0])
|
|
231
244
|
.map(([, v]) => v);
|
|
232
245
|
|
|
233
|
-
return { text: fullText, reasoning: fullReasoning || undefined, toolCalls };
|
|
246
|
+
return { text: fullText, reasoning: fullReasoning || undefined, toolCalls, usage: streamUsage };
|
|
234
247
|
} catch (e: unknown) {
|
|
235
248
|
const wrapped = asError(e);
|
|
236
249
|
if (wrapped.name === "AbortError") Logger.debug("timeout(stream)", { ms: defaultTimeout });
|
package/src/drivers/types.ts
CHANGED
|
@@ -16,10 +16,16 @@ export interface ChatToolCall {
|
|
|
16
16
|
raw?: string;
|
|
17
17
|
}
|
|
18
18
|
|
|
19
|
+
export interface TokenUsage {
|
|
20
|
+
inputTokens: number;
|
|
21
|
+
outputTokens: number;
|
|
22
|
+
}
|
|
23
|
+
|
|
19
24
|
export interface ChatOutput {
|
|
20
25
|
text: string;
|
|
21
26
|
toolCalls: ChatToolCall[];
|
|
22
27
|
reasoning?: string;
|
|
28
|
+
usage?: TokenUsage;
|
|
23
29
|
}
|
|
24
30
|
|
|
25
31
|
export interface ChatDriver {
|
package/src/main.ts
CHANGED
|
@@ -21,12 +21,13 @@ import { McpManager } from "./mcp/index.js";
|
|
|
21
21
|
import { newSessionId, findLatestSession, loadSession, ensureGroDir } from "./session.js";
|
|
22
22
|
import { groError, asError, isGroError, errorLogFields } from "./errors.js";
|
|
23
23
|
import type { McpServerConfig } from "./mcp/index.js";
|
|
24
|
-
import type { ChatDriver, ChatMessage, ChatOutput } from "./drivers/types.js";
|
|
24
|
+
import type { ChatDriver, ChatMessage, ChatOutput, TokenUsage } from "./drivers/types.js";
|
|
25
25
|
import type { AgentMemory } from "./memory/agent-memory.js";
|
|
26
26
|
import { bashToolDefinition, executeBash } from "./tools/bash.js";
|
|
27
27
|
import { agentpatchToolDefinition, executeAgentpatch } from "./tools/agentpatch.js";
|
|
28
|
+
import { groVersionToolDefinition, executeGroVersion, getGroVersion } from "./tools/version.js";
|
|
28
29
|
|
|
29
|
-
const VERSION =
|
|
30
|
+
const VERSION = getGroVersion();
|
|
30
31
|
|
|
31
32
|
// ---------------------------------------------------------------------------
|
|
32
33
|
// Graceful shutdown state — module-level so signal handlers can save sessions.
|
|
@@ -55,6 +56,7 @@ interface GroConfig {
|
|
|
55
56
|
wakeNotes: string;
|
|
56
57
|
wakeNotesEnabled: boolean;
|
|
57
58
|
contextTokens: number;
|
|
59
|
+
maxTokens: number;
|
|
58
60
|
interactive: boolean;
|
|
59
61
|
print: boolean;
|
|
60
62
|
maxToolRounds: number;
|
|
@@ -168,6 +170,7 @@ function loadConfig(): GroConfig {
|
|
|
168
170
|
else if (arg === "--wake-notes") { flags.wakeNotes = args[++i]; }
|
|
169
171
|
else if (arg === "--no-wake-notes") { flags.noWakeNotes = "true"; }
|
|
170
172
|
else if (arg === "--context-tokens") { flags.contextTokens = args[++i]; }
|
|
173
|
+
else if (arg === "--max-tokens") { flags.maxTokens = args[++i]; }
|
|
171
174
|
else if (arg === "--max-tool-rounds" || arg === "--max-turns") { flags.maxToolRounds = args[++i]; }
|
|
172
175
|
else if (arg === "--bash") { flags.bash = "true"; }
|
|
173
176
|
else if (arg === "--persistent" || arg === "--keep-alive") { flags.persistent = "true"; }
|
|
@@ -287,6 +290,7 @@ ${systemPrompt}` : wake;
|
|
|
287
290
|
wakeNotes: flags.wakeNotes || WAKE_NOTES_DEFAULT_PATH,
|
|
288
291
|
wakeNotesEnabled: flags.noWakeNotes !== "true",
|
|
289
292
|
contextTokens: parseInt(flags.contextTokens || "8192"),
|
|
293
|
+
maxTokens: parseInt(flags.maxTokens || "16384"),
|
|
290
294
|
interactive: interactiveMode,
|
|
291
295
|
print: printMode,
|
|
292
296
|
maxToolRounds: parseInt(flags.maxToolRounds || "10"),
|
|
@@ -361,6 +365,7 @@ options:
|
|
|
361
365
|
--wake-notes path to wake notes file (default: ~/.claude/WAKE.md)
|
|
362
366
|
--no-wake-notes disable auto-prepending wake notes
|
|
363
367
|
--context-tokens context window budget (default: 8192)
|
|
368
|
+
--max-tokens max response tokens per turn (default: 16384)
|
|
364
369
|
--max-turns max agentic rounds per turn (default: 10)
|
|
365
370
|
--max-tool-rounds alias for --max-turns
|
|
366
371
|
--bash enable built-in bash tool for shell command execution
|
|
@@ -391,6 +396,7 @@ function createDriverForModel(
|
|
|
391
396
|
model: string,
|
|
392
397
|
apiKey: string,
|
|
393
398
|
baseUrl: string,
|
|
399
|
+
maxTokens?: number,
|
|
394
400
|
): ChatDriver {
|
|
395
401
|
switch (provider) {
|
|
396
402
|
case "anthropic":
|
|
@@ -398,7 +404,7 @@ function createDriverForModel(
|
|
|
398
404
|
Logger.error("gro: ANTHROPIC_API_KEY not set (set ANTHROPIC_BASE_URL for proxy mode)");
|
|
399
405
|
process.exit(1);
|
|
400
406
|
}
|
|
401
|
-
return makeAnthropicDriver({ apiKey: apiKey || "proxy-managed", model, baseUrl });
|
|
407
|
+
return makeAnthropicDriver({ apiKey: apiKey || "proxy-managed", model, baseUrl, maxTokens });
|
|
402
408
|
|
|
403
409
|
case "openai":
|
|
404
410
|
if (!apiKey && baseUrl === "https://api.openai.com") {
|
|
@@ -418,7 +424,7 @@ function createDriverForModel(
|
|
|
418
424
|
}
|
|
419
425
|
|
|
420
426
|
function createDriver(cfg: GroConfig): ChatDriver {
|
|
421
|
-
return createDriverForModel(cfg.provider, cfg.model, cfg.apiKey, cfg.baseUrl);
|
|
427
|
+
return createDriverForModel(cfg.provider, cfg.model, cfg.apiKey, cfg.baseUrl, cfg.maxTokens);
|
|
422
428
|
}
|
|
423
429
|
|
|
424
430
|
// ---------------------------------------------------------------------------
|
|
@@ -492,7 +498,10 @@ async function executeTurn(
|
|
|
492
498
|
const tools = mcp.getToolDefinitions();
|
|
493
499
|
tools.push(agentpatchToolDefinition());
|
|
494
500
|
if (cfg.bash) tools.push(bashToolDefinition());
|
|
501
|
+
tools.push(groVersionToolDefinition());
|
|
495
502
|
let finalText = "";
|
|
503
|
+
let turnTokensIn = 0;
|
|
504
|
+
let turnTokensOut = 0;
|
|
496
505
|
|
|
497
506
|
const onToken = cfg.outputFormat === "stream-json"
|
|
498
507
|
? (t: string) => process.stdout.write(JSON.stringify({ type: "token", token: t }) + "\n")
|
|
@@ -507,6 +516,14 @@ async function executeTurn(
|
|
|
507
516
|
onToken,
|
|
508
517
|
});
|
|
509
518
|
|
|
519
|
+
// Track token usage for niki budget enforcement
|
|
520
|
+
if (output.usage) {
|
|
521
|
+
turnTokensIn += output.usage.inputTokens;
|
|
522
|
+
turnTokensOut += output.usage.outputTokens;
|
|
523
|
+
// Log cumulative usage to stderr — niki parses these patterns for budget enforcement
|
|
524
|
+
process.stderr.write(`"input_tokens": ${turnTokensIn}, "output_tokens": ${turnTokensOut}\n`);
|
|
525
|
+
}
|
|
526
|
+
|
|
510
527
|
// Accumulate text
|
|
511
528
|
if (output.text) finalText += output.text;
|
|
512
529
|
|
|
@@ -551,7 +568,8 @@ async function executeTurn(
|
|
|
551
568
|
let fnArgs: Record<string, any>;
|
|
552
569
|
try {
|
|
553
570
|
fnArgs = JSON.parse(tc.function.arguments);
|
|
554
|
-
} catch {
|
|
571
|
+
} catch (e: unknown) {
|
|
572
|
+
Logger.debug(`Failed to parse args for ${fnName}: ${asError(e).message}, using empty args`);
|
|
555
573
|
fnArgs = {};
|
|
556
574
|
}
|
|
557
575
|
|
|
@@ -563,16 +581,20 @@ async function executeTurn(
|
|
|
563
581
|
result = executeAgentpatch(fnArgs);
|
|
564
582
|
} else if (fnName === "bash" && cfg.bash) {
|
|
565
583
|
result = executeBash(fnArgs);
|
|
584
|
+
} else if (fnName === "gro_version") {
|
|
585
|
+
result = executeGroVersion({ provider: cfg.provider, model: cfg.model, persistent: cfg.persistent });
|
|
566
586
|
} else {
|
|
567
587
|
result = await mcp.callTool(fnName, fnArgs);
|
|
568
588
|
}
|
|
569
589
|
} catch (e: unknown) {
|
|
570
|
-
const
|
|
590
|
+
const raw = asError(e);
|
|
591
|
+
const ge = groError("tool_error", `Tool "${fnName}" failed: ${raw.message}`, {
|
|
571
592
|
retryable: false,
|
|
572
593
|
cause: e,
|
|
573
594
|
});
|
|
574
595
|
Logger.error("Tool execution error:", errorLogFields(ge));
|
|
575
|
-
|
|
596
|
+
if (raw.stack) Logger.error(raw.stack);
|
|
597
|
+
result = `Error: ${ge.message}${raw.stack ? '\nStack: ' + raw.stack : ''}`;
|
|
576
598
|
}
|
|
577
599
|
|
|
578
600
|
// Feed tool result back into memory
|
|
@@ -604,6 +626,11 @@ async function executeTurn(
|
|
|
604
626
|
model: cfg.model,
|
|
605
627
|
onToken,
|
|
606
628
|
});
|
|
629
|
+
if (finalOutput.usage) {
|
|
630
|
+
turnTokensIn += finalOutput.usage.inputTokens;
|
|
631
|
+
turnTokensOut += finalOutput.usage.outputTokens;
|
|
632
|
+
process.stderr.write(`"input_tokens": ${turnTokensIn}, "output_tokens": ${turnTokensOut}\n`);
|
|
633
|
+
}
|
|
607
634
|
if (finalOutput.text) finalText += finalOutput.text;
|
|
608
635
|
await memory.add({ role: "assistant", from: "Assistant", content: finalOutput.text || "" });
|
|
609
636
|
}
|
|
@@ -653,11 +680,13 @@ async function singleShot(
|
|
|
653
680
|
await memory.add({ role: "user", from: "User", content: prompt });
|
|
654
681
|
|
|
655
682
|
let text: string | undefined;
|
|
683
|
+
let fatalError = false;
|
|
656
684
|
try {
|
|
657
685
|
text = await executeTurn(driver, memory, mcp, cfg, sessionId);
|
|
658
686
|
} catch (e: unknown) {
|
|
659
687
|
const ge = isGroError(e) ? e : groError("provider_error", asError(e).message, { cause: e });
|
|
660
688
|
Logger.error(C.red(`error: ${ge.message}`), errorLogFields(ge));
|
|
689
|
+
fatalError = true;
|
|
661
690
|
}
|
|
662
691
|
|
|
663
692
|
// Save session (even on error — preserve conversation state)
|
|
@@ -669,6 +698,12 @@ async function singleShot(
|
|
|
669
698
|
}
|
|
670
699
|
}
|
|
671
700
|
|
|
701
|
+
// Exit with non-zero code on fatal API errors so the supervisor
|
|
702
|
+
// can distinguish "finished cleanly" from "crashed on API call"
|
|
703
|
+
if (fatalError) {
|
|
704
|
+
process.exit(1);
|
|
705
|
+
}
|
|
706
|
+
|
|
672
707
|
if (text) {
|
|
673
708
|
if (cfg.outputFormat === "json") {
|
|
674
709
|
process.stdout.write(formatOutput(text, "json") + "\n");
|
|
@@ -805,7 +840,7 @@ async function main() {
|
|
|
805
840
|
"--provider", "-P", "--model", "-m", "--base-url",
|
|
806
841
|
"--system-prompt", "--system-prompt-file",
|
|
807
842
|
"--append-system-prompt", "--append-system-prompt-file",
|
|
808
|
-
"--context-tokens", "--max-tool-rounds", "--max-turns",
|
|
843
|
+
"--context-tokens", "--max-tokens", "--max-tool-rounds", "--max-turns",
|
|
809
844
|
"--max-thinking-tokens", "--max-budget-usd",
|
|
810
845
|
"--summarizer-model", "--output-format", "--mcp-config",
|
|
811
846
|
"--resume", "-r",
|
|
@@ -857,10 +892,14 @@ for (const sig of ["SIGTERM", "SIGHUP"] as const) {
|
|
|
857
892
|
|
|
858
893
|
// Catch unhandled promise rejections (e.g. background summarization)
|
|
859
894
|
process.on("unhandledRejection", (reason: unknown) => {
|
|
860
|
-
|
|
895
|
+
const err = asError(reason);
|
|
896
|
+
Logger.error(C.red(`unhandled rejection: ${err.message}`));
|
|
897
|
+
if (err.stack) Logger.error(C.red(err.stack));
|
|
861
898
|
});
|
|
862
899
|
|
|
863
900
|
main().catch((e: unknown) => {
|
|
864
|
-
|
|
901
|
+
const err = asError(e);
|
|
902
|
+
Logger.error("gro:", err.message);
|
|
903
|
+
if (err.stack) Logger.error(err.stack);
|
|
865
904
|
process.exit(1);
|
|
866
905
|
});
|
package/src/mcp/client.ts
CHANGED
|
@@ -109,20 +109,32 @@ export class McpManager {
|
|
|
109
109
|
for (const server of this.servers.values()) {
|
|
110
110
|
const tool = server.tools.find(t => t.name === name);
|
|
111
111
|
if (tool) {
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
.
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
112
|
+
try {
|
|
113
|
+
const result = await server.client.callTool({ name, arguments: args }, undefined, { timeout: 5 * 60 * 1000 });
|
|
114
|
+
// Extract text content from result
|
|
115
|
+
if (Array.isArray(result.content)) {
|
|
116
|
+
return result.content
|
|
117
|
+
.map((c: any) => {
|
|
118
|
+
if (c.type === "text") return c.text;
|
|
119
|
+
return JSON.stringify(c);
|
|
120
|
+
})
|
|
121
|
+
.join("\n");
|
|
122
|
+
}
|
|
123
|
+
return JSON.stringify(result);
|
|
124
|
+
} catch (e: unknown) {
|
|
125
|
+
const err = asError(e);
|
|
126
|
+
const ge = groError("mcp_error", `MCP tool "${name}" (server: ${server.name}) failed: ${err.message}`, {
|
|
127
|
+
retryable: true,
|
|
128
|
+
cause: e,
|
|
129
|
+
});
|
|
130
|
+
Logger.error(`MCP tool call failed [${server.name}/${name}]:`, errorLogFields(ge));
|
|
131
|
+
throw ge;
|
|
121
132
|
}
|
|
122
|
-
return JSON.stringify(result);
|
|
123
133
|
}
|
|
124
134
|
}
|
|
125
|
-
|
|
135
|
+
const ge = groError("mcp_error", `No MCP server provides tool "${name}"`, { retryable: false });
|
|
136
|
+
Logger.error(ge.message, errorLogFields(ge));
|
|
137
|
+
throw ge;
|
|
126
138
|
}
|
|
127
139
|
|
|
128
140
|
/**
|
|
@@ -140,7 +152,11 @@ export class McpManager {
|
|
|
140
152
|
*/
|
|
141
153
|
async disconnectAll(): Promise<void> {
|
|
142
154
|
for (const server of this.servers.values()) {
|
|
143
|
-
try {
|
|
155
|
+
try {
|
|
156
|
+
await server.client.close();
|
|
157
|
+
} catch (e: unknown) {
|
|
158
|
+
Logger.debug(`MCP server "${server.name}" close error: ${asError(e).message}`);
|
|
159
|
+
}
|
|
144
160
|
}
|
|
145
161
|
this.servers.clear();
|
|
146
162
|
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Built-in version/identity tool for gro.
|
|
3
|
+
*
|
|
4
|
+
* Lets agents (and humans) introspect the gro runtime — version, provider,
|
|
5
|
+
* model, uptime, process info. This is the canonical way to confirm an
|
|
6
|
+
* agent is running on gro.
|
|
7
|
+
*/
|
|
8
|
+
import { readFileSync, existsSync } from "node:fs";
|
|
9
|
+
import { join, dirname } from "node:path";
|
|
10
|
+
import { fileURLToPath } from "node:url";
|
|
11
|
+
|
|
12
|
+
const startTime = Date.now();
|
|
13
|
+
|
|
14
|
+
interface GroRuntimeInfo {
|
|
15
|
+
runtime: "gro";
|
|
16
|
+
version: string;
|
|
17
|
+
provider: string;
|
|
18
|
+
model: string;
|
|
19
|
+
pid: number;
|
|
20
|
+
uptime_seconds: number;
|
|
21
|
+
node_version: string;
|
|
22
|
+
platform: string;
|
|
23
|
+
persistent: boolean;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/** Read version from package.json — single source of truth. */
|
|
27
|
+
function readVersion(): string {
|
|
28
|
+
// In ESM, __dirname isn't available — derive from import.meta.url
|
|
29
|
+
let selfDir: string;
|
|
30
|
+
try {
|
|
31
|
+
selfDir = dirname(fileURLToPath(import.meta.url));
|
|
32
|
+
} catch {
|
|
33
|
+
selfDir = process.cwd();
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const candidates = [
|
|
37
|
+
join(selfDir, "..", "package.json"), // from dist/tools/ or src/tools/
|
|
38
|
+
join(selfDir, "..", "..", "package.json"), // from deeper nesting
|
|
39
|
+
join(process.cwd(), "package.json"),
|
|
40
|
+
];
|
|
41
|
+
for (const p of candidates) {
|
|
42
|
+
if (existsSync(p)) {
|
|
43
|
+
try {
|
|
44
|
+
const pkg = JSON.parse(readFileSync(p, "utf-8"));
|
|
45
|
+
if (pkg.name === "@tjamescouch/gro" && pkg.version) {
|
|
46
|
+
return pkg.version;
|
|
47
|
+
}
|
|
48
|
+
} catch {
|
|
49
|
+
// try next candidate
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
return "unknown";
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Cache version at module load
|
|
57
|
+
const GRO_VERSION = readVersion();
|
|
58
|
+
|
|
59
|
+
export function getGroVersion(): string {
|
|
60
|
+
return GRO_VERSION;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
export function groVersionToolDefinition(): any {
|
|
64
|
+
return {
|
|
65
|
+
type: "function",
|
|
66
|
+
function: {
|
|
67
|
+
name: "gro_version",
|
|
68
|
+
description:
|
|
69
|
+
"Report gro runtime identity and version. Returns runtime name, version, provider, model, uptime, and process info. Use this to confirm an agent is running on gro.",
|
|
70
|
+
parameters: {
|
|
71
|
+
type: "object",
|
|
72
|
+
properties: {},
|
|
73
|
+
},
|
|
74
|
+
},
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Execute the version tool. Requires runtime config to report provider/model.
|
|
80
|
+
*/
|
|
81
|
+
export function executeGroVersion(cfg: {
|
|
82
|
+
provider: string;
|
|
83
|
+
model: string;
|
|
84
|
+
persistent: boolean;
|
|
85
|
+
}): string {
|
|
86
|
+
const info: GroRuntimeInfo = {
|
|
87
|
+
runtime: "gro",
|
|
88
|
+
version: GRO_VERSION,
|
|
89
|
+
provider: cfg.provider,
|
|
90
|
+
model: cfg.model,
|
|
91
|
+
pid: process.pid,
|
|
92
|
+
uptime_seconds: Math.floor((Date.now() - startTime) / 1000),
|
|
93
|
+
node_version: process.version,
|
|
94
|
+
platform: process.platform,
|
|
95
|
+
persistent: cfg.persistent,
|
|
96
|
+
};
|
|
97
|
+
return JSON.stringify(info, null, 2);
|
|
98
|
+
}
|