@agi-cli/server 0.1.169 → 0.1.171
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -3
- package/src/routes/auth.ts +83 -2
- package/src/routes/git/commit.ts +35 -12
- package/src/runtime/agent/runner-setup.ts +29 -68
- package/src/runtime/errors/handling.ts +3 -0
- package/src/runtime/message/compaction-auto.ts +15 -27
- package/src/runtime/message/service.ts +16 -64
- package/src/runtime/provider/copilot.ts +12 -0
- package/src/runtime/provider/index.ts +5 -3
- package/src/runtime/provider/oauth-adapter.ts +237 -0
- package/src/runtime/provider/openai.ts +1 -11
- package/src/runtime/stream/error-handler.ts +10 -14
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@agi-cli/server",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.171",
|
|
4
4
|
"description": "HTTP API server for AGI CLI",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./src/index.ts",
|
|
@@ -29,8 +29,8 @@
|
|
|
29
29
|
"typecheck": "tsc --noEmit"
|
|
30
30
|
},
|
|
31
31
|
"dependencies": {
|
|
32
|
-
"@agi-cli/sdk": "0.1.
|
|
33
|
-
"@agi-cli/database": "0.1.
|
|
32
|
+
"@agi-cli/sdk": "0.1.171",
|
|
33
|
+
"@agi-cli/database": "0.1.171",
|
|
34
34
|
"drizzle-orm": "^0.44.5",
|
|
35
35
|
"hono": "^4.9.9",
|
|
36
36
|
"zod": "^4.1.8"
|
package/src/routes/auth.ts
CHANGED
|
@@ -16,6 +16,8 @@ import {
|
|
|
16
16
|
exchangeWeb,
|
|
17
17
|
authorizeOpenAI,
|
|
18
18
|
exchangeOpenAI,
|
|
19
|
+
authorizeCopilot,
|
|
20
|
+
pollForCopilotTokenOnce,
|
|
19
21
|
type ProviderId,
|
|
20
22
|
} from '@agi-cli/sdk';
|
|
21
23
|
import { logger } from '@agi-cli/sdk';
|
|
@@ -26,6 +28,11 @@ const oauthVerifiers = new Map<
|
|
|
26
28
|
{ verifier: string; provider: string; createdAt: number; callbackUrl: string }
|
|
27
29
|
>();
|
|
28
30
|
|
|
31
|
+
const copilotDeviceSessions = new Map<
|
|
32
|
+
string,
|
|
33
|
+
{ deviceCode: string; interval: number; provider: string; createdAt: number }
|
|
34
|
+
>();
|
|
35
|
+
|
|
29
36
|
setInterval(() => {
|
|
30
37
|
const now = Date.now();
|
|
31
38
|
for (const [key, value] of oauthVerifiers.entries()) {
|
|
@@ -33,6 +40,11 @@ setInterval(() => {
|
|
|
33
40
|
oauthVerifiers.delete(key);
|
|
34
41
|
}
|
|
35
42
|
}
|
|
43
|
+
for (const [key, value] of copilotDeviceSessions.entries()) {
|
|
44
|
+
if (now - value.createdAt > 10 * 60 * 1000) {
|
|
45
|
+
copilotDeviceSessions.delete(key);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
36
48
|
}, 60 * 1000);
|
|
37
49
|
|
|
38
50
|
export function registerAuthRoutes(app: Hono) {
|
|
@@ -67,7 +79,8 @@ export function registerAuthRoutes(app: Hono) {
|
|
|
67
79
|
configured: !!providerAuth,
|
|
68
80
|
type: providerAuth?.type,
|
|
69
81
|
label: entry.label || id,
|
|
70
|
-
supportsOAuth:
|
|
82
|
+
supportsOAuth:
|
|
83
|
+
id === 'anthropic' || id === 'openai' || id === 'copilot',
|
|
71
84
|
modelCount: models.length,
|
|
72
85
|
costRange:
|
|
73
86
|
costs.length > 0
|
|
@@ -199,7 +212,12 @@ export function registerAuthRoutes(app: Hono) {
|
|
|
199
212
|
400,
|
|
200
213
|
);
|
|
201
214
|
} else {
|
|
202
|
-
return c.json(
|
|
215
|
+
return c.json(
|
|
216
|
+
{
|
|
217
|
+
error: `OAuth not supported for provider: ${provider}. Copilot uses device flow — use /v1/auth/copilot/device/start instead.`,
|
|
218
|
+
},
|
|
219
|
+
400,
|
|
220
|
+
);
|
|
203
221
|
}
|
|
204
222
|
|
|
205
223
|
const sessionId = crypto.randomUUID();
|
|
@@ -479,6 +497,69 @@ export function registerAuthRoutes(app: Hono) {
|
|
|
479
497
|
}
|
|
480
498
|
});
|
|
481
499
|
|
|
500
|
+
app.post('/v1/auth/copilot/device/start', async (c) => {
|
|
501
|
+
try {
|
|
502
|
+
const deviceData = await authorizeCopilot();
|
|
503
|
+
const sessionId = crypto.randomUUID();
|
|
504
|
+
copilotDeviceSessions.set(sessionId, {
|
|
505
|
+
deviceCode: deviceData.deviceCode,
|
|
506
|
+
interval: deviceData.interval,
|
|
507
|
+
provider: 'copilot',
|
|
508
|
+
createdAt: Date.now(),
|
|
509
|
+
});
|
|
510
|
+
return c.json({
|
|
511
|
+
sessionId,
|
|
512
|
+
userCode: deviceData.userCode,
|
|
513
|
+
verificationUri: deviceData.verificationUri,
|
|
514
|
+
});
|
|
515
|
+
} catch (error) {
|
|
516
|
+
const message =
|
|
517
|
+
error instanceof Error
|
|
518
|
+
? error.message
|
|
519
|
+
: 'Failed to start Copilot device flow';
|
|
520
|
+
logger.error('Copilot device flow start failed', error);
|
|
521
|
+
return c.json({ error: message }, 500);
|
|
522
|
+
}
|
|
523
|
+
});
|
|
524
|
+
|
|
525
|
+
app.post('/v1/auth/copilot/device/poll', async (c) => {
|
|
526
|
+
try {
|
|
527
|
+
const { sessionId } = await c.req.json<{ sessionId: string }>();
|
|
528
|
+
if (!sessionId || !copilotDeviceSessions.has(sessionId)) {
|
|
529
|
+
return c.json({ error: 'Session expired or invalid' }, 400);
|
|
530
|
+
}
|
|
531
|
+
const session = copilotDeviceSessions.get(sessionId)!;
|
|
532
|
+
const result = await pollForCopilotTokenOnce(session.deviceCode);
|
|
533
|
+
if (result.status === 'complete') {
|
|
534
|
+
copilotDeviceSessions.delete(sessionId);
|
|
535
|
+
await setAuth(
|
|
536
|
+
'copilot',
|
|
537
|
+
{
|
|
538
|
+
type: 'oauth',
|
|
539
|
+
refresh: result.accessToken,
|
|
540
|
+
access: result.accessToken,
|
|
541
|
+
expires: 0,
|
|
542
|
+
},
|
|
543
|
+
undefined,
|
|
544
|
+
'global',
|
|
545
|
+
);
|
|
546
|
+
return c.json({ status: 'complete' });
|
|
547
|
+
}
|
|
548
|
+
if (result.status === 'pending') {
|
|
549
|
+
return c.json({ status: 'pending' });
|
|
550
|
+
}
|
|
551
|
+
if (result.status === 'error') {
|
|
552
|
+
copilotDeviceSessions.delete(sessionId);
|
|
553
|
+
return c.json({ status: 'error', error: result.error });
|
|
554
|
+
}
|
|
555
|
+
return c.json({ status: 'pending' });
|
|
556
|
+
} catch (error) {
|
|
557
|
+
const message = error instanceof Error ? error.message : 'Poll failed';
|
|
558
|
+
logger.error('Copilot device poll failed', error);
|
|
559
|
+
return c.json({ error: message }, 500);
|
|
560
|
+
}
|
|
561
|
+
});
|
|
562
|
+
|
|
482
563
|
app.post('/v1/auth/onboarding/complete', async (c) => {
|
|
483
564
|
try {
|
|
484
565
|
await setOnboardingComplete();
|
package/src/routes/git/commit.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type { Hono } from 'hono';
|
|
2
2
|
import { execFile } from 'node:child_process';
|
|
3
3
|
import { promisify } from 'node:util';
|
|
4
|
-
import { generateText } from 'ai';
|
|
4
|
+
import { generateText, streamText } from 'ai';
|
|
5
5
|
import { eq } from 'drizzle-orm';
|
|
6
6
|
import type { ProviderId } from '@agi-cli/sdk';
|
|
7
7
|
import { loadConfig, getAuth, getFastModelForAuth } from '@agi-cli/sdk';
|
|
@@ -10,7 +10,11 @@ import { sessions } from '@agi-cli/database/schema';
|
|
|
10
10
|
import { gitCommitSchema, gitGenerateCommitMessageSchema } from './schemas.ts';
|
|
11
11
|
import { validateAndGetGitRoot, parseGitStatus } from './utils.ts';
|
|
12
12
|
import { resolveModel } from '../../runtime/provider/index.ts';
|
|
13
|
-
import {
|
|
13
|
+
import { debugLog } from '../../runtime/debug/index.ts';
|
|
14
|
+
import {
|
|
15
|
+
detectOAuth,
|
|
16
|
+
adaptSimpleCall,
|
|
17
|
+
} from '../../runtime/provider/oauth-adapter.ts';
|
|
14
18
|
|
|
15
19
|
const execFileAsync = promisify(execFile);
|
|
16
20
|
|
|
@@ -112,10 +116,7 @@ export function registerCommitRoutes(app: Hono) {
|
|
|
112
116
|
}
|
|
113
117
|
|
|
114
118
|
const auth = await getAuth(provider, config.projectRoot);
|
|
115
|
-
const
|
|
116
|
-
const spoofPrompt = needsSpoof
|
|
117
|
-
? getProviderSpoofPrompt(provider)
|
|
118
|
-
: undefined;
|
|
119
|
+
const oauth = detectOAuth(provider, auth);
|
|
119
120
|
|
|
120
121
|
const modelId =
|
|
121
122
|
getFastModelForAuth(provider, auth?.type) ??
|
|
@@ -151,15 +152,37 @@ refactor(auth): return success status from login functions
|
|
|
151
152
|
|
|
152
153
|
Commit message:`;
|
|
153
154
|
|
|
154
|
-
const
|
|
155
|
-
|
|
156
|
-
|
|
155
|
+
const commitInstructions =
|
|
156
|
+
'You are a helpful assistant that generates accurate git commit messages based on the actual diff content.';
|
|
157
|
+
|
|
158
|
+
const adapted = adaptSimpleCall(oauth, {
|
|
159
|
+
instructions: commitInstructions,
|
|
160
|
+
userContent: userPrompt,
|
|
161
|
+
maxOutputTokens: 500,
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
if (adapted.forceStream) {
|
|
165
|
+
debugLog('[COMMIT] Using streamText for OpenAI OAuth');
|
|
166
|
+
const result = streamText({
|
|
167
|
+
model,
|
|
168
|
+
system: adapted.system,
|
|
169
|
+
messages: adapted.messages,
|
|
170
|
+
providerOptions: adapted.providerOptions,
|
|
171
|
+
});
|
|
172
|
+
let text = '';
|
|
173
|
+
for await (const chunk of result.textStream) {
|
|
174
|
+
text += chunk;
|
|
175
|
+
}
|
|
176
|
+
const message = text.trim();
|
|
177
|
+
debugLog(`[COMMIT] OAuth result: "${message.slice(0, 80)}..."`);
|
|
178
|
+
return c.json({ status: 'ok', data: { message } });
|
|
179
|
+
}
|
|
157
180
|
|
|
158
181
|
const { text } = await generateText({
|
|
159
182
|
model,
|
|
160
|
-
system:
|
|
161
|
-
|
|
162
|
-
maxOutputTokens:
|
|
183
|
+
system: adapted.system,
|
|
184
|
+
messages: adapted.messages,
|
|
185
|
+
maxOutputTokens: adapted.maxOutputTokens,
|
|
163
186
|
});
|
|
164
187
|
|
|
165
188
|
const message = text.trim();
|
|
@@ -4,10 +4,7 @@ import { sessions } from '@agi-cli/database/schema';
|
|
|
4
4
|
import { eq } from 'drizzle-orm';
|
|
5
5
|
import { resolveModel } from '../provider/index.ts';
|
|
6
6
|
import { resolveAgentConfig } from './registry.ts';
|
|
7
|
-
import {
|
|
8
|
-
composeSystemPrompt,
|
|
9
|
-
getProviderSpoofPrompt,
|
|
10
|
-
} from '../prompt/builder.ts';
|
|
7
|
+
import { composeSystemPrompt } from '../prompt/builder.ts';
|
|
11
8
|
import { discoverProjectTools } from '@agi-cli/sdk';
|
|
12
9
|
import { adaptTools } from '../../tools/adapter.ts';
|
|
13
10
|
import { buildDatabaseTools } from '../../tools/database/index.ts';
|
|
@@ -16,6 +13,7 @@ import { buildHistoryMessages } from '../message/history-builder.ts';
|
|
|
16
13
|
import { getMaxOutputTokens } from '../utils/token.ts';
|
|
17
14
|
import { setupToolContext } from '../tools/setup.ts';
|
|
18
15
|
import { getCompactionSystemPrompt } from '../message/compaction.ts';
|
|
16
|
+
import { detectOAuth, adaptRunnerCall } from '../provider/oauth-adapter.ts';
|
|
19
17
|
import type { RunOpts } from '../session/queue.ts';
|
|
20
18
|
import type { ToolAdapterContext } from '../../tools/adapter.ts';
|
|
21
19
|
|
|
@@ -87,67 +85,35 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
87
85
|
const systemTimer = time('runner:composeSystemPrompt');
|
|
88
86
|
const { getAuth } = await import('@agi-cli/sdk');
|
|
89
87
|
const auth = await getAuth(opts.provider, cfg.projectRoot);
|
|
90
|
-
const
|
|
91
|
-
const spoofPrompt = needsSpoof
|
|
92
|
-
? getProviderSpoofPrompt(opts.provider)
|
|
93
|
-
: undefined;
|
|
88
|
+
const oauth = detectOAuth(opts.provider, auth);
|
|
94
89
|
|
|
95
|
-
debugLog(`[RUNNER] needsSpoof (OAuth): ${needsSpoof}`);
|
|
96
90
|
debugLog(
|
|
97
|
-
`[RUNNER]
|
|
91
|
+
`[RUNNER] needsSpoof (OAuth): ${oauth.needsSpoof}, isOpenAIOAuth: ${oauth.isOpenAIOAuth}`,
|
|
92
|
+
);
|
|
93
|
+
debugLog(
|
|
94
|
+
`[RUNNER] spoofPrompt: ${oauth.spoofPrompt ? `present (${opts.provider})` : 'none'}`,
|
|
98
95
|
);
|
|
99
96
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
const fullPrompt = await composeSystemPrompt({
|
|
112
|
-
provider: opts.provider,
|
|
113
|
-
model: opts.model,
|
|
114
|
-
projectRoot: cfg.projectRoot,
|
|
115
|
-
agentPrompt,
|
|
116
|
-
oneShot: opts.oneShot,
|
|
117
|
-
spoofPrompt: undefined,
|
|
118
|
-
includeProjectTree: isFirstMessage,
|
|
119
|
-
userContext: opts.userContext,
|
|
120
|
-
contextSummary,
|
|
121
|
-
});
|
|
122
|
-
oauthFullPromptComponents = fullPrompt.components;
|
|
97
|
+
const composed = await composeSystemPrompt({
|
|
98
|
+
provider: opts.provider,
|
|
99
|
+
model: opts.model,
|
|
100
|
+
projectRoot: cfg.projectRoot,
|
|
101
|
+
agentPrompt,
|
|
102
|
+
oneShot: opts.oneShot,
|
|
103
|
+
spoofPrompt: undefined,
|
|
104
|
+
includeProjectTree: isFirstMessage,
|
|
105
|
+
userContext: opts.userContext,
|
|
106
|
+
contextSummary,
|
|
107
|
+
});
|
|
123
108
|
|
|
124
|
-
|
|
109
|
+
const rawMaxOutputTokens = getMaxOutputTokens(opts.provider, opts.model);
|
|
110
|
+
const adapted = adaptRunnerCall(oauth, composed, {
|
|
111
|
+
provider: opts.provider,
|
|
112
|
+
rawMaxOutputTokens,
|
|
113
|
+
});
|
|
125
114
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
!!opts.userContext && fullPrompt.prompt.includes(opts.userContext);
|
|
129
|
-
debugLog(
|
|
130
|
-
`[system] oauth-full summary: ${JSON.stringify({
|
|
131
|
-
components: oauthFullPromptComponents ?? [],
|
|
132
|
-
length: fullPrompt.prompt.length,
|
|
133
|
-
includesUserContext,
|
|
134
|
-
})}`,
|
|
135
|
-
);
|
|
136
|
-
} else {
|
|
137
|
-
const composed = await composeSystemPrompt({
|
|
138
|
-
provider: opts.provider,
|
|
139
|
-
model: opts.model,
|
|
140
|
-
projectRoot: cfg.projectRoot,
|
|
141
|
-
agentPrompt,
|
|
142
|
-
oneShot: opts.oneShot,
|
|
143
|
-
spoofPrompt: undefined,
|
|
144
|
-
includeProjectTree: isFirstMessage,
|
|
145
|
-
userContext: opts.userContext,
|
|
146
|
-
contextSummary,
|
|
147
|
-
});
|
|
148
|
-
system = composed.prompt;
|
|
149
|
-
systemComponents = composed.components;
|
|
150
|
-
}
|
|
115
|
+
const { system } = adapted;
|
|
116
|
+
const { systemComponents, additionalSystemMessages } = adapted;
|
|
151
117
|
systemTimer.end();
|
|
152
118
|
debugLog(
|
|
153
119
|
`[system] summary: ${JSON.stringify({
|
|
@@ -173,7 +139,6 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
173
139
|
const allTools = await discoverProjectTools(cfg.projectRoot);
|
|
174
140
|
|
|
175
141
|
if (opts.agent === 'research') {
|
|
176
|
-
// Get parent session ID for research sessions
|
|
177
142
|
const currentSession = sessionRows[0];
|
|
178
143
|
const parentSessionId = currentSession?.parentSessionId ?? null;
|
|
179
144
|
|
|
@@ -194,12 +159,7 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
194
159
|
debugLog(`[RUNNER] About to create model with provider: ${opts.provider}`);
|
|
195
160
|
debugLog(`[RUNNER] About to create model ID: ${opts.model}`);
|
|
196
161
|
|
|
197
|
-
const oauthSystemPrompt =
|
|
198
|
-
needsSpoof && opts.provider === 'openai' && additionalSystemMessages[0]
|
|
199
|
-
? additionalSystemMessages[0].content
|
|
200
|
-
: undefined;
|
|
201
162
|
const model = await resolveModel(opts.provider, opts.model, cfg, {
|
|
202
|
-
systemPrompt: oauthSystemPrompt,
|
|
203
163
|
sessionId: opts.sessionId,
|
|
204
164
|
messageId: opts.assistantMessageId,
|
|
205
165
|
});
|
|
@@ -207,7 +167,7 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
207
167
|
`[RUNNER] Model created: ${JSON.stringify({ id: model.modelId, provider: model.provider })}`,
|
|
208
168
|
);
|
|
209
169
|
|
|
210
|
-
const maxOutputTokens =
|
|
170
|
+
const maxOutputTokens = adapted.maxOutputTokens;
|
|
211
171
|
debugLog(`[RUNNER] maxOutputTokens for ${opts.model}: ${maxOutputTokens}`);
|
|
212
172
|
|
|
213
173
|
const { sharedCtx, firstToolTimer, firstToolSeen } = await setupToolContext(
|
|
@@ -219,7 +179,7 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
219
179
|
const authType = providerAuth?.type;
|
|
220
180
|
const toolset = adaptTools(gated, sharedCtx, opts.provider, authType);
|
|
221
181
|
|
|
222
|
-
const providerOptions
|
|
182
|
+
const providerOptions = { ...adapted.providerOptions };
|
|
223
183
|
let effectiveMaxOutputTokens = maxOutputTokens;
|
|
224
184
|
|
|
225
185
|
if (opts.reasoningText) {
|
|
@@ -237,6 +197,7 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
237
197
|
}
|
|
238
198
|
} else if (underlyingProvider === 'openai') {
|
|
239
199
|
providerOptions.openai = {
|
|
200
|
+
...((providerOptions.openai as Record<string, unknown>) || {}),
|
|
240
201
|
reasoningEffort: 'high',
|
|
241
202
|
reasoningSummary: 'auto',
|
|
242
203
|
};
|
|
@@ -267,7 +228,7 @@ export async function setupRunner(opts: RunOpts): Promise<SetupResult> {
|
|
|
267
228
|
firstToolTimer,
|
|
268
229
|
firstToolSeen,
|
|
269
230
|
providerOptions,
|
|
270
|
-
needsSpoof,
|
|
231
|
+
needsSpoof: oauth.needsSpoof,
|
|
271
232
|
};
|
|
272
233
|
}
|
|
273
234
|
|
|
@@ -4,13 +4,13 @@ import { eq } from 'drizzle-orm';
|
|
|
4
4
|
import { streamText } from 'ai';
|
|
5
5
|
import { resolveModel } from '../provider/index.ts';
|
|
6
6
|
import { getAuth } from '@agi-cli/sdk';
|
|
7
|
-
import { getProviderSpoofPrompt } from '../prompt/builder.ts';
|
|
8
7
|
import { loadConfig } from '@agi-cli/sdk';
|
|
9
8
|
import { debugLog } from '../debug/index.ts';
|
|
10
9
|
import { getModelLimits } from './compaction-limits.ts';
|
|
11
10
|
import { buildCompactionContext } from './compaction-context.ts';
|
|
12
11
|
import { getCompactionSystemPrompt } from './compaction-detect.ts';
|
|
13
12
|
import { markSessionCompacted } from './compaction-mark.ts';
|
|
13
|
+
import { detectOAuth, adaptSimpleCall } from '../provider/oauth-adapter.ts';
|
|
14
14
|
|
|
15
15
|
export async function performAutoCompaction(
|
|
16
16
|
db: Awaited<ReturnType<typeof getDb>>,
|
|
@@ -56,24 +56,13 @@ export async function performAutoCompaction(
|
|
|
56
56
|
);
|
|
57
57
|
|
|
58
58
|
const auth = await getAuth(
|
|
59
|
-
provider as
|
|
60
|
-
| 'anthropic'
|
|
61
|
-
| 'openai'
|
|
62
|
-
| 'google'
|
|
63
|
-
| 'openrouter'
|
|
64
|
-
| 'opencode'
|
|
65
|
-
| 'setu'
|
|
66
|
-
| 'zai'
|
|
67
|
-
| 'zai-coding',
|
|
59
|
+
provider as Parameters<typeof getAuth>[0],
|
|
68
60
|
cfg.projectRoot,
|
|
69
61
|
);
|
|
70
|
-
const
|
|
71
|
-
const spoofPrompt = needsSpoof
|
|
72
|
-
? getProviderSpoofPrompt(provider as 'anthropic' | 'openai')
|
|
73
|
-
: undefined;
|
|
62
|
+
const oauth = detectOAuth(provider, auth);
|
|
74
63
|
|
|
75
64
|
debugLog(
|
|
76
|
-
`[compaction] OAuth
|
|
65
|
+
`[compaction] OAuth: needsSpoof=${oauth.needsSpoof}, isOpenAIOAuth=${oauth.isOpenAIOAuth}`,
|
|
77
66
|
);
|
|
78
67
|
|
|
79
68
|
const model = await resolveModel(
|
|
@@ -83,10 +72,13 @@ export async function performAutoCompaction(
|
|
|
83
72
|
);
|
|
84
73
|
|
|
85
74
|
const compactionPrompt = getCompactionSystemPrompt();
|
|
86
|
-
const
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
:
|
|
75
|
+
const userContent = `IMPORTANT: Generate a comprehensive summary. This will replace the detailed conversation history.\n\nPlease summarize this conversation:\n\n<conversation-to-summarize>\n${context}\n</conversation-to-summarize>`;
|
|
76
|
+
|
|
77
|
+
const adapted = adaptSimpleCall(oauth, {
|
|
78
|
+
instructions: compactionPrompt,
|
|
79
|
+
userContent,
|
|
80
|
+
maxOutputTokens: 2000,
|
|
81
|
+
});
|
|
90
82
|
|
|
91
83
|
const compactPartId = crypto.randomUUID();
|
|
92
84
|
const now = Date.now();
|
|
@@ -106,14 +98,10 @@ export async function performAutoCompaction(
|
|
|
106
98
|
|
|
107
99
|
const result = streamText({
|
|
108
100
|
model,
|
|
109
|
-
system:
|
|
110
|
-
messages:
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
content: `${userInstructions}\n\nPlease summarize this conversation:\n\n<conversation-to-summarize>\n${context}\n</conversation-to-summarize>`,
|
|
114
|
-
},
|
|
115
|
-
],
|
|
116
|
-
maxOutputTokens: 2000,
|
|
101
|
+
system: adapted.system,
|
|
102
|
+
messages: adapted.messages,
|
|
103
|
+
maxOutputTokens: adapted.maxOutputTokens,
|
|
104
|
+
providerOptions: adapted.providerOptions,
|
|
117
105
|
});
|
|
118
106
|
|
|
119
107
|
let summary = '';
|
|
@@ -10,6 +10,7 @@ import { resolveModel } from '../provider/index.ts';
|
|
|
10
10
|
import { getFastModelForAuth, type ProviderId } from '@agi-cli/sdk';
|
|
11
11
|
import { debugLog } from '../debug/index.ts';
|
|
12
12
|
import { isCompactCommand, buildCompactionContext } from './compaction.ts';
|
|
13
|
+
import { detectOAuth, adaptSimpleCall } from '../provider/oauth-adapter.ts';
|
|
13
14
|
|
|
14
15
|
type SessionRow = typeof sessions.$inferSelect;
|
|
15
16
|
|
|
@@ -52,7 +53,6 @@ export async function dispatchAssistantMessage(
|
|
|
52
53
|
files,
|
|
53
54
|
} = options;
|
|
54
55
|
|
|
55
|
-
// DEBUG: Log userContext in dispatch
|
|
56
56
|
debugLog(
|
|
57
57
|
`[MESSAGE_SERVICE] dispatchAssistantMessage called with userContext: ${userContext ? `${userContext.substring(0, 50)}...` : 'NONE'}`,
|
|
58
58
|
);
|
|
@@ -144,12 +144,10 @@ export async function dispatchAssistantMessage(
|
|
|
144
144
|
payload: { id: assistantMessageId, role: 'assistant' },
|
|
145
145
|
});
|
|
146
146
|
|
|
147
|
-
// DEBUG: Log before enqueue
|
|
148
147
|
debugLog(
|
|
149
148
|
`[MESSAGE_SERVICE] Enqueuing assistant run with userContext: ${userContext ? `${userContext.substring(0, 50)}...` : 'NONE'}`,
|
|
150
149
|
);
|
|
151
150
|
|
|
152
|
-
// Detect /compact command and build context with model-aware limits
|
|
153
151
|
const isCompact = isCompactCommand(content);
|
|
154
152
|
let compactionContext: string | undefined;
|
|
155
153
|
|
|
@@ -157,7 +155,6 @@ export async function dispatchAssistantMessage(
|
|
|
157
155
|
debugLog('[MESSAGE_SERVICE] Detected /compact command, building context');
|
|
158
156
|
const { getModelLimits } = await import('./compaction.ts');
|
|
159
157
|
const limits = getModelLimits(provider, model);
|
|
160
|
-
// Use 50% of context window for compaction, minimum 15k tokens
|
|
161
158
|
const contextTokenLimit = limits
|
|
162
159
|
? Math.max(Math.floor(limits.context * 0.5), 15000)
|
|
163
160
|
: 15000;
|
|
@@ -171,7 +168,6 @@ export async function dispatchAssistantMessage(
|
|
|
171
168
|
);
|
|
172
169
|
}
|
|
173
170
|
|
|
174
|
-
// Read tool approval mode from config
|
|
175
171
|
const toolApprovalMode = cfg.defaults.toolApproval ?? 'auto';
|
|
176
172
|
|
|
177
173
|
enqueueAssistantRun(
|
|
@@ -289,19 +285,15 @@ async function generateSessionTitle(args: {
|
|
|
289
285
|
debugLog(`[TITLE_GEN] Provider: ${provider}, Model: ${modelName}`);
|
|
290
286
|
|
|
291
287
|
const { getAuth } = await import('@agi-cli/sdk');
|
|
292
|
-
const { getProviderSpoofPrompt } = await import('../prompt/builder.ts');
|
|
293
288
|
const auth = await getAuth(provider, cfg.projectRoot);
|
|
294
|
-
const
|
|
295
|
-
const spoofPrompt = needsSpoof
|
|
296
|
-
? getProviderSpoofPrompt(provider)
|
|
297
|
-
: undefined;
|
|
289
|
+
const oauth = detectOAuth(provider, auth);
|
|
298
290
|
|
|
299
291
|
const titleModel = getFastModelForAuth(provider, auth?.type) ?? modelName;
|
|
300
292
|
debugLog(`[TITLE_GEN] Using title model: ${titleModel}`);
|
|
301
293
|
const model = await resolveModel(provider, titleModel, cfg);
|
|
302
294
|
|
|
303
295
|
debugLog(
|
|
304
|
-
`[TITLE_GEN]
|
|
296
|
+
`[TITLE_GEN] oauth: needsSpoof=${oauth.needsSpoof}, isOpenAIOAuth=${oauth.isOpenAIOAuth}`,
|
|
305
297
|
);
|
|
306
298
|
|
|
307
299
|
const promptText = String(content ?? '').slice(0, 2000);
|
|
@@ -313,63 +305,23 @@ Examples: "Fix TypeScript build errors", "Add dark mode toggle", "Refactor auth
|
|
|
313
305
|
|
|
314
306
|
Output ONLY the title, nothing else.`;
|
|
315
307
|
|
|
316
|
-
const
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
<user-message>
|
|
321
|
-
${promptText}
|
|
322
|
-
</user-message>`;
|
|
323
|
-
|
|
324
|
-
// Build system prompt and messages
|
|
325
|
-
// For OAuth: Keep spoof pure, add instructions to user message
|
|
326
|
-
// For API key: Use instructions as system
|
|
327
|
-
let system: string;
|
|
328
|
-
let messagesArray: Array<{ role: 'user'; content: string }>;
|
|
329
|
-
|
|
330
|
-
if (spoofPrompt) {
|
|
331
|
-
// OAuth mode: spoof stays pure, instructions go in user message
|
|
332
|
-
system = spoofPrompt;
|
|
333
|
-
messagesArray = [
|
|
334
|
-
{
|
|
335
|
-
role: 'user',
|
|
336
|
-
content: userMessageWithTags,
|
|
337
|
-
},
|
|
338
|
-
];
|
|
339
|
-
|
|
340
|
-
debugLog(
|
|
341
|
-
`[TITLE_GEN] Using OAuth mode (prompts: spoof:${provider}, title-generator, user-request)`,
|
|
342
|
-
);
|
|
343
|
-
debugLog(
|
|
344
|
-
`[TITLE_GEN] User content preview: ${promptText.substring(0, 100)}...`,
|
|
345
|
-
);
|
|
346
|
-
} else {
|
|
347
|
-
// API key mode: normal flow
|
|
348
|
-
system = titleInstructions;
|
|
349
|
-
messagesArray = [
|
|
350
|
-
{
|
|
351
|
-
role: 'user',
|
|
352
|
-
content: `<user-message>\n${promptText}\n</user-message>`,
|
|
353
|
-
},
|
|
354
|
-
];
|
|
308
|
+
const adapted = adaptSimpleCall(oauth, {
|
|
309
|
+
instructions: titleInstructions,
|
|
310
|
+
userContent: promptText,
|
|
311
|
+
});
|
|
355
312
|
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
debugLog(
|
|
360
|
-
`[TITLE_GEN] User content preview: ${promptText.substring(0, 100)}...`,
|
|
361
|
-
);
|
|
362
|
-
}
|
|
313
|
+
debugLog(
|
|
314
|
+
`[TITLE_GEN] mode=${adapted.forceStream ? 'openai-oauth' : oauth.needsSpoof ? 'spoof' : 'api-key'}`,
|
|
315
|
+
);
|
|
363
316
|
|
|
364
317
|
let modelTitle = '';
|
|
365
318
|
try {
|
|
366
|
-
|
|
367
|
-
if (needsSpoof) {
|
|
368
|
-
debugLog('[TITLE_GEN] Using streamText for OAuth...');
|
|
319
|
+
if (adapted.forceStream || oauth.needsSpoof) {
|
|
369
320
|
const result = streamText({
|
|
370
321
|
model,
|
|
371
|
-
system,
|
|
372
|
-
messages:
|
|
322
|
+
system: adapted.system,
|
|
323
|
+
messages: adapted.messages,
|
|
324
|
+
providerOptions: adapted.providerOptions,
|
|
373
325
|
});
|
|
374
326
|
for await (const chunk of result.textStream) {
|
|
375
327
|
modelTitle += chunk;
|
|
@@ -379,8 +331,8 @@ ${promptText}
|
|
|
379
331
|
debugLog('[TITLE_GEN] Using generateText...');
|
|
380
332
|
const out = await generateText({
|
|
381
333
|
model,
|
|
382
|
-
system,
|
|
383
|
-
messages:
|
|
334
|
+
system: adapted.system,
|
|
335
|
+
messages: adapted.messages,
|
|
384
336
|
});
|
|
385
337
|
modelTitle = (out?.text || '').trim();
|
|
386
338
|
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { getAuth, createCopilotModel } from '@agi-cli/sdk';
|
|
2
|
+
import type { AGIConfig } from '@agi-cli/sdk';
|
|
3
|
+
|
|
4
|
+
export async function resolveCopilotModel(model: string, cfg: AGIConfig) {
|
|
5
|
+
const auth = await getAuth('copilot', cfg.projectRoot);
|
|
6
|
+
if (auth?.type === 'oauth') {
|
|
7
|
+
return createCopilotModel(model, { oauth: auth });
|
|
8
|
+
}
|
|
9
|
+
throw new Error(
|
|
10
|
+
'Copilot provider requires OAuth. Run `agi auth login copilot`.',
|
|
11
|
+
);
|
|
12
|
+
}
|
|
@@ -7,6 +7,7 @@ import { resolveSetuModel, type ResolveSetuModelOptions } from './setu.ts';
|
|
|
7
7
|
import { getZaiInstance, getZaiCodingInstance } from './zai.ts';
|
|
8
8
|
import { resolveOpencodeModel } from './opencode.ts';
|
|
9
9
|
import { getMoonshotInstance } from './moonshot.ts';
|
|
10
|
+
import { resolveCopilotModel } from './copilot.ts';
|
|
10
11
|
|
|
11
12
|
export type ProviderName = ProviderId;
|
|
12
13
|
|
|
@@ -22,9 +23,7 @@ export async function resolveModel(
|
|
|
22
23
|
},
|
|
23
24
|
) {
|
|
24
25
|
if (provider === 'openai') {
|
|
25
|
-
return resolveOpenAIModel(model, cfg
|
|
26
|
-
systemPrompt: options?.systemPrompt,
|
|
27
|
-
});
|
|
26
|
+
return resolveOpenAIModel(model, cfg);
|
|
28
27
|
}
|
|
29
28
|
if (provider === 'anthropic') {
|
|
30
29
|
const instance = await getAnthropicInstance(cfg);
|
|
@@ -39,6 +38,9 @@ export async function resolveModel(
|
|
|
39
38
|
if (provider === 'opencode') {
|
|
40
39
|
return resolveOpencodeModel(model, cfg);
|
|
41
40
|
}
|
|
41
|
+
if (provider === 'copilot') {
|
|
42
|
+
return resolveCopilotModel(model, cfg);
|
|
43
|
+
}
|
|
42
44
|
if (provider === 'setu') {
|
|
43
45
|
return await resolveSetuModel(model, options?.sessionId, {
|
|
44
46
|
messageId: options?.messageId,
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OAuth Provider Adapter
|
|
3
|
+
*
|
|
4
|
+
* Consolidates all OAuth-specific LLM call adaptations into one place.
|
|
5
|
+
* Each OAuth provider has quirks (no system prompt, must stream, needs
|
|
6
|
+
* spoof prompt, special providerOptions, etc.). Instead of duplicating
|
|
7
|
+
* that branching logic across every callsite (title gen, compaction,
|
|
8
|
+
* commit, runner), this module exposes two layers:
|
|
9
|
+
*
|
|
10
|
+
* ## Layer 1 — Detection (`detectOAuth`)
|
|
11
|
+
* Examines provider + auth and returns an `OAuthContext` describing
|
|
12
|
+
* what adaptations are needed. Used by ALL callsites (simple + complex).
|
|
13
|
+
*
|
|
14
|
+
* ## Layer 2 — Simple call adaptation (`adaptSimpleCall`)
|
|
15
|
+
* For single-shot LLM calls (title gen, compaction, commit) that follow
|
|
16
|
+
* the pattern: system + user message → text result.
|
|
17
|
+
* Returns a ready-to-spread `AdaptedLLMCall` object.
|
|
18
|
+
*
|
|
19
|
+
* ## Adding a new OAuth provider
|
|
20
|
+
* 1. Add detection branch in `detectOAuth()`
|
|
21
|
+
* 2. Add adaptation branch in `adaptSimpleCall()`
|
|
22
|
+
* 3. If the provider needs a custom fetch wrapper, add it under
|
|
23
|
+
* `packages/sdk/src/providers/src/<provider>-oauth-client.ts`
|
|
24
|
+
* 4. Zero changes needed at any callsite.
|
|
25
|
+
*
|
|
26
|
+
* ## Architecture
|
|
27
|
+
*
|
|
28
|
+
* ```
|
|
29
|
+
* callsite (commit.ts, service.ts, compaction-auto.ts, runner-setup.ts)
|
|
30
|
+
* │
|
|
31
|
+
* ├─ detectOAuth(provider, auth) → OAuthContext
|
|
32
|
+
* │
|
|
33
|
+
* ├─ adaptSimpleCall(ctx, input) → AdaptedLLMCall (title, commit, compaction)
|
|
34
|
+
* │
|
|
35
|
+
* └─ adaptRunnerCall(ctx, composed, opts) → AdaptedRunnerSetup (main chat)
|
|
36
|
+
* │
|
|
37
|
+
* ├─ OpenAI OAuth (Codex): no system, inline instructions,
|
|
38
|
+
* │ providerOptions.openai.store=false, forceStream=true
|
|
39
|
+
* │
|
|
40
|
+
* ├─ Anthropic OAuth: spoofPrompt as system, instructions
|
|
41
|
+
* │ folded into user message, normal maxOutputTokens
|
|
42
|
+
* │
|
|
43
|
+
* └─ API key (default): system=instructions, plain user msg
|
|
44
|
+
* ```
|
|
45
|
+
*/
|
|
46
|
+
import { getProviderSpoofPrompt } from '../prompt/builder.ts';
|
|
47
|
+
import type { SharedV3ProviderOptions } from '@ai-sdk/provider';
|
|
48
|
+
|
|
49
|
+
export type OAuthContext = {
|
|
50
|
+
isOAuth: boolean;
|
|
51
|
+
needsSpoof: boolean;
|
|
52
|
+
isOpenAIOAuth: boolean;
|
|
53
|
+
spoofPrompt: string | undefined;
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Detect OAuth mode for a provider and return flags describing
|
|
58
|
+
* what adaptations are needed. This replaces the 4-line pattern
|
|
59
|
+
* that was previously copy-pasted at every callsite:
|
|
60
|
+
*
|
|
61
|
+
* const isOAuth = auth?.type === 'oauth';
|
|
62
|
+
* const needsSpoof = isOAuth && provider === 'anthropic';
|
|
63
|
+
* const isOpenAIOAuth = isOAuth && provider === 'openai';
|
|
64
|
+
* const spoofPrompt = needsSpoof ? getProviderSpoofPrompt(...) : undefined;
|
|
65
|
+
*/
|
|
66
|
+
export function detectOAuth(
|
|
67
|
+
provider: string,
|
|
68
|
+
auth: { type: string } | null | undefined,
|
|
69
|
+
): OAuthContext {
|
|
70
|
+
const isOAuth = auth?.type === 'oauth';
|
|
71
|
+
const needsSpoof = !!isOAuth && provider === 'anthropic';
|
|
72
|
+
const isCopilot = provider === 'copilot';
|
|
73
|
+
return {
|
|
74
|
+
isOAuth: !!isOAuth || isCopilot,
|
|
75
|
+
needsSpoof,
|
|
76
|
+
isOpenAIOAuth: (!!isOAuth && provider === 'openai') || isCopilot,
|
|
77
|
+
spoofPrompt: needsSpoof ? getProviderSpoofPrompt(provider) : undefined,
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Build OpenAI Codex-specific providerOptions.
|
|
83
|
+
* Codex requires `store: false` and passes the system prompt via
|
|
84
|
+
* `instructions` instead of the normal `system` field.
|
|
85
|
+
*
|
|
86
|
+
* Used directly by runner-setup.ts (complex flow) and indirectly
|
|
87
|
+
* by adaptSimpleCall (simple flows).
|
|
88
|
+
*/
|
|
89
|
+
export function buildCodexProviderOptions(instructions: string) {
|
|
90
|
+
return {
|
|
91
|
+
openai: { store: false as const, instructions },
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
export type AdaptedLLMCall = {
|
|
96
|
+
system?: string;
|
|
97
|
+
messages: Array<{ role: 'user'; content: string }>;
|
|
98
|
+
maxOutputTokens?: number;
|
|
99
|
+
providerOptions?: SharedV3ProviderOptions;
|
|
100
|
+
forceStream: boolean;
|
|
101
|
+
};
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Adapt a simple (single-shot) LLM call for the current OAuth context.
|
|
105
|
+
*
|
|
106
|
+
* Takes raw `instructions` (what would normally be the system prompt) and
|
|
107
|
+
* `userContent`, then returns the correct shape for the provider:
|
|
108
|
+
*
|
|
109
|
+
* - **OpenAI OAuth (Codex)**: no system prompt, instructions baked into
|
|
110
|
+
* user message AND providerOptions.openai.instructions, forceStream=true,
|
|
111
|
+
* no maxOutputTokens (Codex doesn't support it).
|
|
112
|
+
*
|
|
113
|
+
* - **Anthropic OAuth**: spoof prompt as system, real instructions folded
|
|
114
|
+
* into user message, normal maxOutputTokens.
|
|
115
|
+
*
|
|
116
|
+
* - **API key (default)**: instructions as system, plain user message,
|
|
117
|
+
* normal maxOutputTokens.
|
|
118
|
+
*
|
|
119
|
+
* Callsites just spread the result into streamText/generateText:
|
|
120
|
+
* ```ts
|
|
121
|
+
* const adapted = adaptSimpleCall(oauth, { instructions, userContent });
|
|
122
|
+
* const result = streamText({ model, ...adapted }); // almost — see forceStream
|
|
123
|
+
* ```
|
|
124
|
+
*/
|
|
125
|
+
export function adaptSimpleCall(
|
|
126
|
+
ctx: OAuthContext,
|
|
127
|
+
input: {
|
|
128
|
+
instructions: string;
|
|
129
|
+
userContent: string;
|
|
130
|
+
maxOutputTokens?: number;
|
|
131
|
+
},
|
|
132
|
+
): AdaptedLLMCall {
|
|
133
|
+
if (ctx.isOpenAIOAuth) {
|
|
134
|
+
return {
|
|
135
|
+
messages: [
|
|
136
|
+
{
|
|
137
|
+
role: 'user',
|
|
138
|
+
content: `${input.instructions}\n\n${input.userContent}`,
|
|
139
|
+
},
|
|
140
|
+
],
|
|
141
|
+
providerOptions: buildCodexProviderOptions(input.instructions),
|
|
142
|
+
forceStream: true,
|
|
143
|
+
};
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
if (ctx.needsSpoof && ctx.spoofPrompt) {
|
|
147
|
+
return {
|
|
148
|
+
system: ctx.spoofPrompt,
|
|
149
|
+
messages: [
|
|
150
|
+
{
|
|
151
|
+
role: 'user',
|
|
152
|
+
content: `${input.instructions}\n\n${input.userContent}`,
|
|
153
|
+
},
|
|
154
|
+
],
|
|
155
|
+
maxOutputTokens: input.maxOutputTokens,
|
|
156
|
+
forceStream: false,
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return {
|
|
161
|
+
system: input.instructions,
|
|
162
|
+
messages: [{ role: 'user', content: input.userContent }],
|
|
163
|
+
maxOutputTokens: input.maxOutputTokens,
|
|
164
|
+
forceStream: false,
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
export type AdaptedRunnerSetup = {
|
|
169
|
+
system: string;
|
|
170
|
+
systemComponents: string[];
|
|
171
|
+
additionalSystemMessages: Array<{
|
|
172
|
+
role: 'system' | 'user';
|
|
173
|
+
content: string;
|
|
174
|
+
}>;
|
|
175
|
+
maxOutputTokens: number | undefined;
|
|
176
|
+
providerOptions: SharedV3ProviderOptions;
|
|
177
|
+
};
|
|
178
|
+
|
|
179
|
+
/**
|
|
180
|
+
* Adapt the main chat runner's system prompt placement, maxOutputTokens,
|
|
181
|
+
* and providerOptions based on the OAuth context.
|
|
182
|
+
*
|
|
183
|
+
* Unlike `adaptSimpleCall` (which builds the full message), this only
|
|
184
|
+
* decides WHERE the already-composed system prompt goes:
|
|
185
|
+
*
|
|
186
|
+
* - **OpenAI OAuth (Codex)**: system='', composed prompt sent as a user
|
|
187
|
+
* message in additionalSystemMessages, providerOptions with store=false
|
|
188
|
+
* + instructions, maxOutputTokens stripped.
|
|
189
|
+
*
|
|
190
|
+
* - **Anthropic OAuth**: spoof prompt as system, composed prompt sent as
|
|
191
|
+
* an additional system message. Normal maxOutputTokens.
|
|
192
|
+
*
|
|
193
|
+
* - **API key (default)**: composed prompt IS the system prompt directly.
|
|
194
|
+
* No additional messages needed.
|
|
195
|
+
*
|
|
196
|
+
* ```ts
|
|
197
|
+
* const composed = await composeSystemPrompt({ ... });
|
|
198
|
+
* const adapted = adaptRunnerCall(oauth, composed, { provider, rawMaxOutputTokens });
|
|
199
|
+
* // adapted.system, adapted.additionalSystemMessages, adapted.providerOptions ready to use
|
|
200
|
+
* ```
|
|
201
|
+
*/
|
|
202
|
+
export function adaptRunnerCall(
|
|
203
|
+
ctx: OAuthContext,
|
|
204
|
+
composed: { prompt: string; components: string[] },
|
|
205
|
+
opts: {
|
|
206
|
+
provider: string;
|
|
207
|
+
rawMaxOutputTokens: number | undefined;
|
|
208
|
+
},
|
|
209
|
+
): AdaptedRunnerSetup {
|
|
210
|
+
if (ctx.spoofPrompt) {
|
|
211
|
+
return {
|
|
212
|
+
system: ctx.spoofPrompt,
|
|
213
|
+
systemComponents: [`spoof:${opts.provider || 'unknown'}`],
|
|
214
|
+
additionalSystemMessages: [{ role: 'system', content: composed.prompt }],
|
|
215
|
+
maxOutputTokens: opts.rawMaxOutputTokens,
|
|
216
|
+
providerOptions: {},
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
if (ctx.isOpenAIOAuth) {
|
|
221
|
+
return {
|
|
222
|
+
system: '',
|
|
223
|
+
systemComponents: composed.components,
|
|
224
|
+
additionalSystemMessages: [{ role: 'user', content: composed.prompt }],
|
|
225
|
+
maxOutputTokens: undefined,
|
|
226
|
+
providerOptions: buildCodexProviderOptions(composed.prompt),
|
|
227
|
+
};
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return {
|
|
231
|
+
system: composed.prompt,
|
|
232
|
+
systemComponents: composed.components,
|
|
233
|
+
additionalSystemMessages: [],
|
|
234
|
+
maxOutputTokens: opts.rawMaxOutputTokens,
|
|
235
|
+
providerOptions: {},
|
|
236
|
+
};
|
|
237
|
+
}
|
|
@@ -2,22 +2,12 @@ import type { AGIConfig } from '@agi-cli/sdk';
|
|
|
2
2
|
import { getAuth, createOpenAIOAuthModel } from '@agi-cli/sdk';
|
|
3
3
|
import { openai, createOpenAI } from '@ai-sdk/openai';
|
|
4
4
|
|
|
5
|
-
export async function resolveOpenAIModel(
|
|
6
|
-
model: string,
|
|
7
|
-
cfg: AGIConfig,
|
|
8
|
-
options?: {
|
|
9
|
-
systemPrompt?: string;
|
|
10
|
-
},
|
|
11
|
-
) {
|
|
5
|
+
export async function resolveOpenAIModel(model: string, cfg: AGIConfig) {
|
|
12
6
|
const auth = await getAuth('openai', cfg.projectRoot);
|
|
13
7
|
if (auth?.type === 'oauth') {
|
|
14
|
-
const isCodexModel = model.toLowerCase().includes('codex');
|
|
15
8
|
return createOpenAIOAuthModel(model, {
|
|
16
9
|
oauth: auth,
|
|
17
10
|
projectRoot: cfg.projectRoot,
|
|
18
|
-
reasoningEffort: isCodexModel ? 'high' : 'medium',
|
|
19
|
-
reasoningSummary: 'auto',
|
|
20
|
-
instructions: options?.systemPrompt,
|
|
21
11
|
});
|
|
22
12
|
}
|
|
23
13
|
if (auth?.type === 'api' && auth.key) {
|
|
@@ -73,17 +73,7 @@ export function createErrorHandler(
|
|
|
73
73
|
(causeError?.message as string) ??
|
|
74
74
|
'';
|
|
75
75
|
|
|
76
|
-
|
|
77
|
-
const fullErrorStr = JSON.stringify(err);
|
|
78
|
-
const hasSetuFiatCode =
|
|
79
|
-
fullErrorStr.includes('"code":"SETU_FIAT_SELECTED"') ||
|
|
80
|
-
fullErrorStr.includes("'code':'SETU_FIAT_SELECTED'");
|
|
81
|
-
|
|
82
|
-
// Only match if the error code is SETU_FIAT_SELECTED OR the exact error message
|
|
83
|
-
const isFiatSelected =
|
|
84
|
-
errorCode === 'SETU_FIAT_SELECTED' ||
|
|
85
|
-
errorMessage === 'Setu: fiat payment selected' ||
|
|
86
|
-
hasSetuFiatCode;
|
|
76
|
+
const isFiatSelected = errorCode === 'SETU_FIAT_SELECTED';
|
|
87
77
|
|
|
88
78
|
// Handle fiat payment selected - this is not an error, just a signal to pause
|
|
89
79
|
if (isFiatSelected) {
|
|
@@ -174,6 +164,8 @@ export function createErrorHandler(
|
|
|
174
164
|
fullErrorStrLower.includes('context_length_exceeded') ||
|
|
175
165
|
fullErrorStrLower.includes('request too large') ||
|
|
176
166
|
fullErrorStrLower.includes('exceeds the model') ||
|
|
167
|
+
fullErrorStrLower.includes('exceeds the limit') ||
|
|
168
|
+
fullErrorStrLower.includes('prompt token count') ||
|
|
177
169
|
fullErrorStrLower.includes('context window') ||
|
|
178
170
|
fullErrorStrLower.includes('input is too long') ||
|
|
179
171
|
errorCode === 'context_length_exceeded' ||
|
|
@@ -324,6 +316,9 @@ export function createErrorHandler(
|
|
|
324
316
|
isPromptTooLong && !opts.isCompactCommand
|
|
325
317
|
? `${errorPayload.message}. Context auto-compacted - please retry your message.`
|
|
326
318
|
: errorPayload.message;
|
|
319
|
+
const errorPartType = isPromptTooLong
|
|
320
|
+
? 'context_length_exceeded'
|
|
321
|
+
: errorPayload.type;
|
|
327
322
|
await db.insert(messageParts).values({
|
|
328
323
|
id: errorPartId,
|
|
329
324
|
messageId: opts.assistantMessageId,
|
|
@@ -332,7 +327,8 @@ export function createErrorHandler(
|
|
|
332
327
|
type: 'error',
|
|
333
328
|
content: JSON.stringify({
|
|
334
329
|
message: displayMessage,
|
|
335
|
-
type:
|
|
330
|
+
type: errorPartType,
|
|
331
|
+
errorType: isPromptTooLong ? 'context_length_exceeded' : undefined,
|
|
336
332
|
details: errorPayload.details,
|
|
337
333
|
isAborted: false,
|
|
338
334
|
}),
|
|
@@ -348,7 +344,7 @@ export function createErrorHandler(
|
|
|
348
344
|
.set({
|
|
349
345
|
status: 'error',
|
|
350
346
|
error: displayMessage,
|
|
351
|
-
errorType:
|
|
347
|
+
errorType: errorPartType,
|
|
352
348
|
errorDetails: JSON.stringify({
|
|
353
349
|
...errorPayload.details,
|
|
354
350
|
isApiError,
|
|
@@ -365,7 +361,7 @@ export function createErrorHandler(
|
|
|
365
361
|
messageId: opts.assistantMessageId,
|
|
366
362
|
partId: errorPartId,
|
|
367
363
|
error: displayMessage,
|
|
368
|
-
errorType:
|
|
364
|
+
errorType: errorPartType,
|
|
369
365
|
details: errorPayload.details,
|
|
370
366
|
isAborted: false,
|
|
371
367
|
autoCompacted: isPromptTooLong && !opts.isCompactCommand,
|