@agi-cli/server 0.1.108 → 0.1.110
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@agi-cli/server",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.110",
|
|
4
4
|
"description": "HTTP API server for AGI CLI",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./src/index.ts",
|
|
@@ -29,8 +29,8 @@
|
|
|
29
29
|
"typecheck": "tsc --noEmit"
|
|
30
30
|
},
|
|
31
31
|
"dependencies": {
|
|
32
|
-
"@agi-cli/sdk": "0.1.
|
|
33
|
-
"@agi-cli/database": "0.1.
|
|
32
|
+
"@agi-cli/sdk": "0.1.110",
|
|
33
|
+
"@agi-cli/database": "0.1.110",
|
|
34
34
|
"drizzle-orm": "^0.44.5",
|
|
35
35
|
"hono": "^4.9.9",
|
|
36
36
|
"zod": "^4.1.8"
|
|
@@ -43,6 +43,7 @@ export function registerModelsRoutes(app: Hono) {
|
|
|
43
43
|
label: m.label || m.id,
|
|
44
44
|
toolCall: m.toolCall,
|
|
45
45
|
reasoning: m.reasoning,
|
|
46
|
+
vision: m.modalities?.input?.includes('image') ?? false,
|
|
46
47
|
})),
|
|
47
48
|
default: getDefault(
|
|
48
49
|
embeddedConfig?.model,
|
|
@@ -94,6 +95,7 @@ export function registerModelsRoutes(app: Hono) {
|
|
|
94
95
|
label: m.label || m.id,
|
|
95
96
|
toolCall: m.toolCall,
|
|
96
97
|
reasoning: m.reasoning,
|
|
98
|
+
vision: m.modalities?.input?.includes('image') ?? false,
|
|
97
99
|
})),
|
|
98
100
|
};
|
|
99
101
|
}
|
|
@@ -111,6 +111,7 @@ export function registerSessionMessagesRoutes(app: Hono) {
|
|
|
111
111
|
const agent = body?.agent ?? sess.agent ?? cfg.defaults.agent;
|
|
112
112
|
const content = body?.content ?? '';
|
|
113
113
|
const userContext = body?.userContext;
|
|
114
|
+
const images = Array.isArray(body?.images) ? body.images : undefined;
|
|
114
115
|
|
|
115
116
|
// DEBUG: Log extracted userContext
|
|
116
117
|
logger.info('[API] Extracted userContext', {
|
|
@@ -120,6 +121,8 @@ export function registerSessionMessagesRoutes(app: Hono) {
|
|
|
120
121
|
typeOf: typeof userContext,
|
|
121
122
|
});
|
|
122
123
|
|
|
124
|
+
const reasoning = body?.reasoning === true;
|
|
125
|
+
|
|
123
126
|
// Validate model capabilities if tools are allowed for this agent
|
|
124
127
|
const wantsToolCalls = true; // agent toolset may be non-empty
|
|
125
128
|
try {
|
|
@@ -152,6 +155,8 @@ export function registerSessionMessagesRoutes(app: Hono) {
|
|
|
152
155
|
content,
|
|
153
156
|
oneShot: Boolean(body?.oneShot),
|
|
154
157
|
userContext,
|
|
158
|
+
reasoning,
|
|
159
|
+
images,
|
|
155
160
|
});
|
|
156
161
|
return c.json({ messageId: assistantMessageId }, 202);
|
|
157
162
|
} catch (error) {
|
|
@@ -41,12 +41,27 @@ export async function buildHistoryMessages(
|
|
|
41
41
|
if (m.role === 'user') {
|
|
42
42
|
const uparts: UIMessage['parts'] = [];
|
|
43
43
|
for (const p of parts) {
|
|
44
|
-
if (p.type
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
44
|
+
if (p.type === 'text') {
|
|
45
|
+
try {
|
|
46
|
+
const obj = JSON.parse(p.content ?? '{}');
|
|
47
|
+
const t = String(obj.text ?? '');
|
|
48
|
+
if (t) uparts.push({ type: 'text', text: t });
|
|
49
|
+
} catch {}
|
|
50
|
+
} else if (p.type === 'image') {
|
|
51
|
+
try {
|
|
52
|
+
const obj = JSON.parse(p.content ?? '{}') as {
|
|
53
|
+
data?: string;
|
|
54
|
+
mediaType?: string;
|
|
55
|
+
};
|
|
56
|
+
if (obj.data && obj.mediaType) {
|
|
57
|
+
uparts.push({
|
|
58
|
+
type: 'file',
|
|
59
|
+
mediaType: obj.mediaType,
|
|
60
|
+
url: `data:${obj.mediaType};base64,${obj.data}`,
|
|
61
|
+
} as never);
|
|
62
|
+
}
|
|
63
|
+
} catch {}
|
|
64
|
+
}
|
|
50
65
|
}
|
|
51
66
|
if (uparts.length) {
|
|
52
67
|
ui.push({ id: m.id, role: 'user', parts: uparts });
|
|
@@ -22,6 +22,8 @@ type DispatchOptions = {
|
|
|
22
22
|
content: string;
|
|
23
23
|
oneShot?: boolean;
|
|
24
24
|
userContext?: string;
|
|
25
|
+
reasoning?: boolean;
|
|
26
|
+
images?: Array<{ data: string; mediaType: string }>;
|
|
25
27
|
};
|
|
26
28
|
|
|
27
29
|
export async function dispatchAssistantMessage(
|
|
@@ -37,6 +39,8 @@ export async function dispatchAssistantMessage(
|
|
|
37
39
|
content,
|
|
38
40
|
oneShot,
|
|
39
41
|
userContext,
|
|
42
|
+
reasoning,
|
|
43
|
+
images,
|
|
40
44
|
} = options;
|
|
41
45
|
|
|
42
46
|
// DEBUG: Log userContext in dispatch
|
|
@@ -68,6 +72,23 @@ export async function dispatchAssistantMessage(
|
|
|
68
72
|
provider,
|
|
69
73
|
model,
|
|
70
74
|
});
|
|
75
|
+
|
|
76
|
+
if (images && images.length > 0) {
|
|
77
|
+
for (let i = 0; i < images.length; i++) {
|
|
78
|
+
const img = images[i];
|
|
79
|
+
await db.insert(messageParts).values({
|
|
80
|
+
id: crypto.randomUUID(),
|
|
81
|
+
messageId: userMessageId,
|
|
82
|
+
index: i + 1,
|
|
83
|
+
type: 'image',
|
|
84
|
+
content: JSON.stringify({ data: img.data, mediaType: img.mediaType }),
|
|
85
|
+
agent,
|
|
86
|
+
provider,
|
|
87
|
+
model,
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
71
92
|
publish({
|
|
72
93
|
type: 'message.created',
|
|
73
94
|
sessionId,
|
|
@@ -108,6 +129,7 @@ export async function dispatchAssistantMessage(
|
|
|
108
129
|
projectRoot: cfg.projectRoot,
|
|
109
130
|
oneShot: Boolean(oneShot),
|
|
110
131
|
userContext,
|
|
132
|
+
reasoning,
|
|
111
133
|
},
|
|
112
134
|
runSessionLoop,
|
|
113
135
|
);
|
package/src/runtime/runner.ts
CHANGED
|
@@ -298,13 +298,44 @@ async function runAssistant(opts: RunOpts) {
|
|
|
298
298
|
let accumulated = '';
|
|
299
299
|
let stepIndex = 0;
|
|
300
300
|
|
|
301
|
+
// Build provider options for reasoning/extended thinking
|
|
302
|
+
const providerOptions: Record<string, unknown> = {};
|
|
303
|
+
const THINKING_BUDGET = 16000;
|
|
304
|
+
// When reasoning is enabled for Anthropic, the API requires max_tokens to fit
|
|
305
|
+
// both thinking tokens AND response tokens. AI SDK adds budgetTokens to maxOutputTokens,
|
|
306
|
+
// so we need to reduce maxOutputTokens to leave room for thinking.
|
|
307
|
+
let effectiveMaxOutputTokens = maxOutputTokens;
|
|
308
|
+
|
|
309
|
+
if (opts.reasoning) {
|
|
310
|
+
if (opts.provider === 'anthropic') {
|
|
311
|
+
providerOptions.anthropic = {
|
|
312
|
+
thinking: { type: 'enabled', budgetTokens: THINKING_BUDGET },
|
|
313
|
+
};
|
|
314
|
+
// Reduce max output to leave room for thinking budget
|
|
315
|
+
if (maxOutputTokens && maxOutputTokens > THINKING_BUDGET) {
|
|
316
|
+
effectiveMaxOutputTokens = maxOutputTokens - THINKING_BUDGET;
|
|
317
|
+
}
|
|
318
|
+
} else if (opts.provider === 'openai') {
|
|
319
|
+
providerOptions.openai = {
|
|
320
|
+
reasoningSummary: 'auto',
|
|
321
|
+
};
|
|
322
|
+
} else if (opts.provider === 'google') {
|
|
323
|
+
providerOptions.google = {
|
|
324
|
+
thinkingConfig: { thinkingBudget: THINKING_BUDGET },
|
|
325
|
+
};
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
301
329
|
try {
|
|
302
330
|
const result = streamText({
|
|
303
331
|
model,
|
|
304
332
|
tools: toolset,
|
|
305
333
|
...(cachedSystem ? { system: cachedSystem } : {}),
|
|
306
334
|
messages: optimizedMessages,
|
|
307
|
-
...(
|
|
335
|
+
...(effectiveMaxOutputTokens
|
|
336
|
+
? { maxOutputTokens: effectiveMaxOutputTokens }
|
|
337
|
+
: {}),
|
|
338
|
+
...(Object.keys(providerOptions).length > 0 ? { providerOptions } : {}),
|
|
308
339
|
abortSignal: opts.abortSignal,
|
|
309
340
|
stopWhen: hasToolCall('finish'),
|
|
310
341
|
onStepFinish,
|