copilot-api-plus 1.0.34 → 1.0.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -5
- package/dist/main.js +207 -37
- package/dist/main.js.map +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -120,7 +120,7 @@ npx copilot-api-plus@latest start --account-type enterprise
|
|
|
120
120
|
|
|
121
121
|
### 2. OpenCode Zen 模式
|
|
122
122
|
|
|
123
|
-
使用 [OpenCode Zen](https://opencode.ai/zen) 的多模型 API
|
|
123
|
+
使用 [OpenCode Zen](https://opencode.ai/zen) 的多模型 API 服务,支持 GPT-5、Claude、Gemini 等顶级编程模型。
|
|
124
124
|
|
|
125
125
|
#### 前置要求
|
|
126
126
|
1. 访问 https://opencode.ai/zen
|
|
@@ -143,16 +143,37 @@ npx copilot-api-plus@latest start --zen --zen-api-key YOUR_API_KEY
|
|
|
143
143
|
|
|
144
144
|
| 模型 | ID | 说明 |
|
|
145
145
|
|------|-----|------|
|
|
146
|
-
|
|
|
147
|
-
|
|
|
146
|
+
| GPT-5.2 | `gpt-5.2` | OpenAI 最新模型 |
|
|
147
|
+
| GPT-5.1 Codex Max | `gpt-5.1-codex-max` | 代码优化版 |
|
|
148
|
+
| GPT-5.1 Codex | `gpt-5.1-codex` | 代码专用 |
|
|
148
149
|
| GPT-5 Codex | `gpt-5-codex` | OpenAI Responses API |
|
|
150
|
+
| Claude Opus 4.5 | `claude-opus-4-5` | Anthropic Claude (200K) |
|
|
151
|
+
| Claude Sonnet 4.5 | `claude-sonnet-4-5` | Anthropic Claude (200K) |
|
|
152
|
+
| Claude Sonnet 4 | `claude-sonnet-4` | Anthropic Claude |
|
|
149
153
|
| Gemini 3 Pro | `gemini-3-pro` | Google Gemini |
|
|
150
|
-
| Qwen3 Coder
|
|
154
|
+
| Qwen3 Coder | `qwen3-coder` | Alibaba Qwen |
|
|
151
155
|
| Kimi K2 | `kimi-k2` | Moonshot |
|
|
152
|
-
| Grok Code Fast 1 | `grok-code` | xAI |
|
|
156
|
+
| Grok Code Fast 1 | `grok-code-fast-1` | xAI |
|
|
153
157
|
|
|
154
158
|
更多模型请访问 [opencode.ai/zen](https://opencode.ai/zen)
|
|
155
159
|
|
|
160
|
+
#### API 端点
|
|
161
|
+
|
|
162
|
+
Zen 模式支持以下 API 端点:
|
|
163
|
+
|
|
164
|
+
| 端点 | 说明 |
|
|
165
|
+
|------|------|
|
|
166
|
+
| `/v1/chat/completions` | OpenAI 兼容 Chat API |
|
|
167
|
+
| `/v1/messages` | Anthropic 兼容 Messages API |
|
|
168
|
+
| `/v1/responses` | OpenAI Responses API (GPT-5 系列) |
|
|
169
|
+
| `/v1/models` | 获取可用模型列表 |
|
|
170
|
+
|
|
171
|
+
专用端点(无需 `--zen` 标志也可访问):
|
|
172
|
+
- `/zen/v1/chat/completions`
|
|
173
|
+
- `/zen/v1/messages`
|
|
174
|
+
- `/zen/v1/responses`
|
|
175
|
+
- `/zen/v1/models`
|
|
176
|
+
|
|
156
177
|
#### 管理 API Key
|
|
157
178
|
|
|
158
179
|
```bash
|
package/dist/main.js
CHANGED
|
@@ -2162,14 +2162,14 @@ function createErrorResponse(type, message, status) {
|
|
|
2162
2162
|
* Create Anthropic-compatible message response using Antigravity
|
|
2163
2163
|
* Note: Both Gemini and Claude models use the same endpoint and Gemini-style format
|
|
2164
2164
|
*/
|
|
2165
|
-
const MAX_RETRIES = 5;
|
|
2165
|
+
const MAX_RETRIES$3 = 5;
|
|
2166
2166
|
async function createAntigravityMessages(request) {
|
|
2167
2167
|
const endpoint = request.stream ? ANTIGRAVITY_STREAM_URL : ANTIGRAVITY_NO_STREAM_URL;
|
|
2168
2168
|
const body = buildGeminiRequest(request);
|
|
2169
|
-
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
|
|
2169
|
+
for (let attempt = 0; attempt <= MAX_RETRIES$3; attempt++) {
|
|
2170
2170
|
const accessToken = await getValidAccessToken();
|
|
2171
2171
|
if (!accessToken) return createErrorResponse("authentication_error", "No valid Antigravity access token available. Please run login first.", 401);
|
|
2172
|
-
consola.debug(`Antigravity request to ${endpoint} (attempt ${attempt + 1}/${MAX_RETRIES + 1})`);
|
|
2172
|
+
consola.debug(`Antigravity request to ${endpoint} (attempt ${attempt + 1}/${MAX_RETRIES$3 + 1})`);
|
|
2173
2173
|
try {
|
|
2174
2174
|
const response = await fetch(endpoint, {
|
|
2175
2175
|
method: "POST",
|
|
@@ -2184,7 +2184,7 @@ async function createAntigravityMessages(request) {
|
|
|
2184
2184
|
});
|
|
2185
2185
|
if (response.ok) return request.stream ? transformStreamResponse(response, request.model) : await transformNonStreamResponse(response, request.model);
|
|
2186
2186
|
const errorResult = await handleApiError(response);
|
|
2187
|
-
if (errorResult.shouldRetry && attempt < MAX_RETRIES) {
|
|
2187
|
+
if (errorResult.shouldRetry && attempt < MAX_RETRIES$3) {
|
|
2188
2188
|
consola.info(`Rate limited, retrying in ${errorResult.retryDelayMs}ms...`);
|
|
2189
2189
|
await sleep(errorResult.retryDelayMs);
|
|
2190
2190
|
continue;
|
|
@@ -2192,7 +2192,7 @@ async function createAntigravityMessages(request) {
|
|
|
2192
2192
|
return errorResult.response;
|
|
2193
2193
|
} catch (error) {
|
|
2194
2194
|
consola.error("Antigravity messages request error:", error);
|
|
2195
|
-
if (attempt < MAX_RETRIES) {
|
|
2195
|
+
if (attempt < MAX_RETRIES$3) {
|
|
2196
2196
|
await sleep(500);
|
|
2197
2197
|
continue;
|
|
2198
2198
|
}
|
|
@@ -2204,7 +2204,7 @@ async function createAntigravityMessages(request) {
|
|
|
2204
2204
|
/**
|
|
2205
2205
|
* Parse retry delay from error response
|
|
2206
2206
|
*/
|
|
2207
|
-
function parseRetryDelay(errorText) {
|
|
2207
|
+
function parseRetryDelay$3(errorText) {
|
|
2208
2208
|
try {
|
|
2209
2209
|
const details = JSON.parse(errorText).error?.details ?? [];
|
|
2210
2210
|
for (const detail of details) {
|
|
@@ -2234,7 +2234,7 @@ async function handleApiError(response) {
|
|
|
2234
2234
|
await rotateAccount();
|
|
2235
2235
|
return {
|
|
2236
2236
|
shouldRetry: true,
|
|
2237
|
-
retryDelayMs: parseRetryDelay(errorText),
|
|
2237
|
+
retryDelayMs: parseRetryDelay$3(errorText),
|
|
2238
2238
|
response: createErrorResponse("api_error", `Antigravity API error: ${response.status}`, response.status)
|
|
2239
2239
|
};
|
|
2240
2240
|
}
|
|
@@ -2315,6 +2315,18 @@ function transformStreamResponse(response, model) {
|
|
|
2315
2315
|
} });
|
|
2316
2316
|
}
|
|
2317
2317
|
/**
|
|
2318
|
+
* Process candidate parts and handle finish
|
|
2319
|
+
*/
|
|
2320
|
+
function processCandidate(candidate, state$1, emit) {
|
|
2321
|
+
const parts = candidate?.content?.parts ?? [];
|
|
2322
|
+
for (const part of parts) processPart(part, state$1, emit);
|
|
2323
|
+
if (candidate?.finishReason === "STOP") {
|
|
2324
|
+
handleFinish(state$1, emit);
|
|
2325
|
+
return true;
|
|
2326
|
+
}
|
|
2327
|
+
return false;
|
|
2328
|
+
}
|
|
2329
|
+
/**
|
|
2318
2330
|
* Process the stream and emit events
|
|
2319
2331
|
*/
|
|
2320
2332
|
async function processStream(reader, decoder, state$1, controller) {
|
|
@@ -2323,8 +2335,7 @@ async function processStream(reader, decoder, state$1, controller) {
|
|
|
2323
2335
|
while (!finished) {
|
|
2324
2336
|
const { done, value } = await reader.read();
|
|
2325
2337
|
if (done) break;
|
|
2326
|
-
const
|
|
2327
|
-
const lines = processChunk(chunk, state$1);
|
|
2338
|
+
const lines = processChunk(decoder.decode(value, { stream: true }), state$1);
|
|
2328
2339
|
for (const line of lines) {
|
|
2329
2340
|
if (finished) break;
|
|
2330
2341
|
const data = parseSSELine(line);
|
|
@@ -2334,11 +2345,7 @@ async function processStream(reader, decoder, state$1, controller) {
|
|
|
2334
2345
|
state$1.inputTokens = usage.promptTokenCount ?? state$1.inputTokens;
|
|
2335
2346
|
state$1.outputTokens = usage.candidatesTokenCount ?? state$1.outputTokens;
|
|
2336
2347
|
}
|
|
2337
|
-
|
|
2338
|
-
const parts = candidate?.content?.parts ?? [];
|
|
2339
|
-
for (const part of parts) processPart(part, state$1, emit);
|
|
2340
|
-
if (candidate?.finishReason === "STOP") {
|
|
2341
|
-
handleFinish(state$1, emit);
|
|
2348
|
+
if (candidates[0] && processCandidate(candidates[0], state$1, emit)) {
|
|
2342
2349
|
finished = true;
|
|
2343
2350
|
break;
|
|
2344
2351
|
}
|
|
@@ -3263,6 +3270,23 @@ usageRoute.get("/", async (c) => {
|
|
|
3263
3270
|
|
|
3264
3271
|
//#endregion
|
|
3265
3272
|
//#region src/services/zen/create-chat-completions.ts
|
|
3273
|
+
const MAX_RETRIES$2 = 5;
|
|
3274
|
+
const DEFAULT_RETRY_DELAY$2 = 500;
|
|
3275
|
+
/**
|
|
3276
|
+
* Parse retry delay from error response
|
|
3277
|
+
*/
|
|
3278
|
+
function parseRetryDelay$2(response, errorText) {
|
|
3279
|
+
const retryAfter = response.headers.get("Retry-After");
|
|
3280
|
+
if (retryAfter) {
|
|
3281
|
+
const seconds = Number.parseInt(retryAfter, 10);
|
|
3282
|
+
if (!Number.isNaN(seconds)) return seconds * 1e3;
|
|
3283
|
+
}
|
|
3284
|
+
try {
|
|
3285
|
+
const errorData = JSON.parse(errorText);
|
|
3286
|
+
if (errorData.error?.retry_after) return errorData.error.retry_after * 1e3;
|
|
3287
|
+
} catch {}
|
|
3288
|
+
return DEFAULT_RETRY_DELAY$2;
|
|
3289
|
+
}
|
|
3266
3290
|
/**
|
|
3267
3291
|
* Create chat completions via OpenCode Zen
|
|
3268
3292
|
*/
|
|
@@ -3270,21 +3294,36 @@ async function createZenChatCompletions(request, signal) {
|
|
|
3270
3294
|
const apiKey = state.zenApiKey;
|
|
3271
3295
|
if (!apiKey) throw new Error("Zen API key not configured");
|
|
3272
3296
|
consola.debug(`Zen chat completions request for model: ${request.model}`);
|
|
3273
|
-
|
|
3274
|
-
|
|
3275
|
-
|
|
3276
|
-
|
|
3277
|
-
|
|
3278
|
-
|
|
3279
|
-
|
|
3280
|
-
|
|
3281
|
-
|
|
3282
|
-
|
|
3297
|
+
for (let attempt = 0; attempt <= MAX_RETRIES$2; attempt++) try {
|
|
3298
|
+
const response = await fetch("https://opencode.ai/zen/v1/chat/completions", {
|
|
3299
|
+
method: "POST",
|
|
3300
|
+
headers: {
|
|
3301
|
+
"Content-Type": "application/json",
|
|
3302
|
+
Authorization: `Bearer ${apiKey}`
|
|
3303
|
+
},
|
|
3304
|
+
body: JSON.stringify(request),
|
|
3305
|
+
signal
|
|
3306
|
+
});
|
|
3307
|
+
if (response.ok) return response;
|
|
3283
3308
|
const errorText = await response.text();
|
|
3309
|
+
if ((response.status === 429 || response.status >= 500) && attempt < MAX_RETRIES$2) {
|
|
3310
|
+
const retryDelay = parseRetryDelay$2(response, errorText);
|
|
3311
|
+
consola.info(`Zen rate limited (${response.status}), retrying in ${retryDelay}ms...`);
|
|
3312
|
+
await sleep(retryDelay);
|
|
3313
|
+
continue;
|
|
3314
|
+
}
|
|
3284
3315
|
consola.error(`Zen API error: ${response.status} ${errorText}`);
|
|
3285
3316
|
throw new Error(`Zen API error: ${response.status} ${errorText}`);
|
|
3317
|
+
} catch (error) {
|
|
3318
|
+
if (error instanceof Error && error.name === "AbortError") throw error;
|
|
3319
|
+
if (attempt < MAX_RETRIES$2) {
|
|
3320
|
+
consola.warn(`Zen request failed, retrying... (${attempt + 1})`);
|
|
3321
|
+
await sleep(DEFAULT_RETRY_DELAY$2);
|
|
3322
|
+
continue;
|
|
3323
|
+
}
|
|
3324
|
+
throw error;
|
|
3286
3325
|
}
|
|
3287
|
-
|
|
3326
|
+
throw new Error("Max retries exceeded");
|
|
3288
3327
|
}
|
|
3289
3328
|
|
|
3290
3329
|
//#endregion
|
|
@@ -3319,6 +3358,23 @@ zenCompletionRoutes.post("/", async (c) => {
|
|
|
3319
3358
|
|
|
3320
3359
|
//#endregion
|
|
3321
3360
|
//#region src/services/zen/create-messages.ts
|
|
3361
|
+
const MAX_RETRIES$1 = 5;
|
|
3362
|
+
const DEFAULT_RETRY_DELAY$1 = 500;
|
|
3363
|
+
/**
|
|
3364
|
+
* Parse retry delay from error response headers or body
|
|
3365
|
+
*/
|
|
3366
|
+
function parseRetryDelay$1(response, errorText) {
|
|
3367
|
+
const retryAfter = response.headers.get("Retry-After");
|
|
3368
|
+
if (retryAfter) {
|
|
3369
|
+
const seconds = Number.parseInt(retryAfter, 10);
|
|
3370
|
+
if (!Number.isNaN(seconds)) return seconds * 1e3;
|
|
3371
|
+
}
|
|
3372
|
+
try {
|
|
3373
|
+
const errorData = JSON.parse(errorText);
|
|
3374
|
+
if (errorData.error?.retry_after) return errorData.error.retry_after * 1e3;
|
|
3375
|
+
} catch {}
|
|
3376
|
+
return DEFAULT_RETRY_DELAY$1;
|
|
3377
|
+
}
|
|
3322
3378
|
/**
|
|
3323
3379
|
* Create messages via OpenCode Zen (Anthropic format)
|
|
3324
3380
|
*/
|
|
@@ -3326,22 +3382,37 @@ async function createZenMessages(request, signal) {
|
|
|
3326
3382
|
const apiKey = state.zenApiKey;
|
|
3327
3383
|
if (!apiKey) throw new Error("Zen API key not configured");
|
|
3328
3384
|
consola.debug(`Zen messages request for model: ${request.model}`);
|
|
3329
|
-
|
|
3330
|
-
|
|
3331
|
-
|
|
3332
|
-
|
|
3333
|
-
|
|
3334
|
-
|
|
3335
|
-
|
|
3336
|
-
|
|
3337
|
-
|
|
3338
|
-
|
|
3339
|
-
|
|
3385
|
+
for (let attempt = 0; attempt <= MAX_RETRIES$1; attempt++) try {
|
|
3386
|
+
const response = await fetch("https://opencode.ai/zen/v1/messages", {
|
|
3387
|
+
method: "POST",
|
|
3388
|
+
headers: {
|
|
3389
|
+
"Content-Type": "application/json",
|
|
3390
|
+
"x-api-key": apiKey,
|
|
3391
|
+
"anthropic-version": "2023-06-01"
|
|
3392
|
+
},
|
|
3393
|
+
body: JSON.stringify(request),
|
|
3394
|
+
signal
|
|
3395
|
+
});
|
|
3396
|
+
if (response.ok) return response;
|
|
3340
3397
|
const errorText = await response.text();
|
|
3398
|
+
if ((response.status === 429 || response.status >= 500) && attempt < MAX_RETRIES$1) {
|
|
3399
|
+
const retryDelay = parseRetryDelay$1(response, errorText);
|
|
3400
|
+
consola.info(`Zen rate limited (${response.status}), retrying in ${retryDelay}ms...`);
|
|
3401
|
+
await sleep(retryDelay);
|
|
3402
|
+
continue;
|
|
3403
|
+
}
|
|
3341
3404
|
consola.error(`Zen Messages API error: ${response.status} ${errorText}`);
|
|
3342
3405
|
throw new Error(`Zen Messages API error: ${response.status} ${errorText}`);
|
|
3406
|
+
} catch (error) {
|
|
3407
|
+
if (error instanceof Error && error.name === "AbortError") throw error;
|
|
3408
|
+
if (attempt < MAX_RETRIES$1) {
|
|
3409
|
+
consola.warn(`Zen request failed, retrying... (${attempt + 1})`);
|
|
3410
|
+
await sleep(DEFAULT_RETRY_DELAY$1);
|
|
3411
|
+
continue;
|
|
3412
|
+
}
|
|
3413
|
+
throw error;
|
|
3343
3414
|
}
|
|
3344
|
-
|
|
3415
|
+
throw new Error("Max retries exceeded");
|
|
3345
3416
|
}
|
|
3346
3417
|
|
|
3347
3418
|
//#endregion
|
|
@@ -3397,6 +3468,94 @@ zenModelRoutes.get("/", async (c) => {
|
|
|
3397
3468
|
}
|
|
3398
3469
|
});
|
|
3399
3470
|
|
|
3471
|
+
//#endregion
|
|
3472
|
+
//#region src/services/zen/create-responses.ts
|
|
3473
|
+
const MAX_RETRIES = 5;
|
|
3474
|
+
const DEFAULT_RETRY_DELAY = 500;
|
|
3475
|
+
/**
|
|
3476
|
+
* Parse retry delay from error response
|
|
3477
|
+
*/
|
|
3478
|
+
function parseRetryDelay(response, errorText) {
|
|
3479
|
+
const retryAfter = response.headers.get("Retry-After");
|
|
3480
|
+
if (retryAfter) {
|
|
3481
|
+
const seconds = Number.parseInt(retryAfter, 10);
|
|
3482
|
+
if (!Number.isNaN(seconds)) return seconds * 1e3;
|
|
3483
|
+
}
|
|
3484
|
+
try {
|
|
3485
|
+
const errorData = JSON.parse(errorText);
|
|
3486
|
+
if (errorData.error?.retry_after) return errorData.error.retry_after * 1e3;
|
|
3487
|
+
} catch {}
|
|
3488
|
+
return DEFAULT_RETRY_DELAY;
|
|
3489
|
+
}
|
|
3490
|
+
/**
|
|
3491
|
+
* Create responses via OpenCode Zen (OpenAI Responses API format)
|
|
3492
|
+
*/
|
|
3493
|
+
async function createZenResponses(request, signal) {
|
|
3494
|
+
const apiKey = state.zenApiKey;
|
|
3495
|
+
if (!apiKey) throw new Error("Zen API key not configured");
|
|
3496
|
+
consola.debug(`Zen responses request for model: ${request.model}`);
|
|
3497
|
+
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) try {
|
|
3498
|
+
const response = await fetch("https://opencode.ai/zen/v1/responses", {
|
|
3499
|
+
method: "POST",
|
|
3500
|
+
headers: {
|
|
3501
|
+
"Content-Type": "application/json",
|
|
3502
|
+
Authorization: `Bearer ${apiKey}`
|
|
3503
|
+
},
|
|
3504
|
+
body: JSON.stringify(request),
|
|
3505
|
+
signal
|
|
3506
|
+
});
|
|
3507
|
+
if (response.ok) return response;
|
|
3508
|
+
const errorText = await response.text();
|
|
3509
|
+
if ((response.status === 429 || response.status >= 500) && attempt < MAX_RETRIES) {
|
|
3510
|
+
const retryDelay = parseRetryDelay(response, errorText);
|
|
3511
|
+
consola.info(`Zen rate limited (${response.status}), retrying in ${retryDelay}ms...`);
|
|
3512
|
+
await sleep(retryDelay);
|
|
3513
|
+
continue;
|
|
3514
|
+
}
|
|
3515
|
+
consola.error(`Zen Responses API error: ${response.status} ${errorText}`);
|
|
3516
|
+
throw new Error(`Zen Responses API error: ${response.status} ${errorText}`);
|
|
3517
|
+
} catch (error) {
|
|
3518
|
+
if (error instanceof Error && error.name === "AbortError") throw error;
|
|
3519
|
+
if (attempt < MAX_RETRIES) {
|
|
3520
|
+
consola.warn(`Zen request failed, retrying... (${attempt + 1})`);
|
|
3521
|
+
await sleep(DEFAULT_RETRY_DELAY);
|
|
3522
|
+
continue;
|
|
3523
|
+
}
|
|
3524
|
+
throw error;
|
|
3525
|
+
}
|
|
3526
|
+
throw new Error("Max retries exceeded");
|
|
3527
|
+
}
|
|
3528
|
+
|
|
3529
|
+
//#endregion
|
|
3530
|
+
//#region src/routes/zen/responses/route.ts
|
|
3531
|
+
const zenResponsesRoutes = new Hono();
|
|
3532
|
+
zenResponsesRoutes.post("/", async (c) => {
|
|
3533
|
+
if (!state.zenMode || !state.zenApiKey) return c.json({ error: "Zen mode is not enabled. Start with --zen flag." }, 400);
|
|
3534
|
+
try {
|
|
3535
|
+
const body = await c.req.json();
|
|
3536
|
+
consola.debug("Zen responses request:", body.model);
|
|
3537
|
+
const response = await createZenResponses(body);
|
|
3538
|
+
if (body.stream) {
|
|
3539
|
+
const headers = new Headers();
|
|
3540
|
+
headers.set("Content-Type", "text/event-stream");
|
|
3541
|
+
headers.set("Cache-Control", "no-cache");
|
|
3542
|
+
headers.set("Connection", "keep-alive");
|
|
3543
|
+
return new Response(response.body, {
|
|
3544
|
+
status: response.status,
|
|
3545
|
+
headers
|
|
3546
|
+
});
|
|
3547
|
+
}
|
|
3548
|
+
const data = await response.json();
|
|
3549
|
+
return c.json(data);
|
|
3550
|
+
} catch (error) {
|
|
3551
|
+
consola.error("Zen responses error:", error);
|
|
3552
|
+
return c.json({ error: {
|
|
3553
|
+
message: error instanceof Error ? error.message : "Unknown error",
|
|
3554
|
+
type: "zen_error"
|
|
3555
|
+
} }, 500);
|
|
3556
|
+
}
|
|
3557
|
+
});
|
|
3558
|
+
|
|
3400
3559
|
//#endregion
|
|
3401
3560
|
//#region src/server.ts
|
|
3402
3561
|
const server = new Hono();
|
|
@@ -3473,9 +3632,20 @@ server.all("/v1/messages", async (c) => {
|
|
|
3473
3632
|
if (state.antigravityMode) return antigravityMessagesRoute.fetch(req, c.env);
|
|
3474
3633
|
return messageRoutes.fetch(req, c.env);
|
|
3475
3634
|
});
|
|
3635
|
+
server.all("/v1/responses/*", async (c) => {
|
|
3636
|
+
const req = createSubRequest(c, "/v1/responses");
|
|
3637
|
+
if (state.zenMode) return zenResponsesRoutes.fetch(req, c.env);
|
|
3638
|
+
return c.json({ error: "Responses API requires Zen mode" }, 400);
|
|
3639
|
+
});
|
|
3640
|
+
server.all("/v1/responses", async (c) => {
|
|
3641
|
+
const req = createSubRequest(c, "/v1/responses");
|
|
3642
|
+
if (state.zenMode) return zenResponsesRoutes.fetch(req, c.env);
|
|
3643
|
+
return c.json({ error: "Responses API requires Zen mode" }, 400);
|
|
3644
|
+
});
|
|
3476
3645
|
server.route("/zen/v1/chat/completions", zenCompletionRoutes);
|
|
3477
3646
|
server.route("/zen/v1/models", zenModelRoutes);
|
|
3478
3647
|
server.route("/zen/v1/messages", zenMessageRoutes);
|
|
3648
|
+
server.route("/zen/v1/responses", zenResponsesRoutes);
|
|
3479
3649
|
server.route("/antigravity/v1/chat/completions", antigravityChatCompletionsRoute);
|
|
3480
3650
|
server.route("/antigravity/v1/models", antigravityModelsRoute);
|
|
3481
3651
|
server.route("/antigravity/v1/messages", antigravityMessagesRoute);
|