opencode-aicodewith-auth 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +114 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +1290 -0
- package/dist/lib/constants.d.ts +25 -0
- package/dist/lib/logger.d.ts +5 -0
- package/dist/lib/prompts/codex-opencode-bridge.d.ts +18 -0
- package/dist/lib/prompts/codex.d.ts +3 -0
- package/dist/lib/prompts/opencode-codex.d.ts +20 -0
- package/dist/lib/request/fetch-helpers.d.ts +11 -0
- package/dist/lib/request/helpers/input-utils.d.ts +5 -0
- package/dist/lib/request/helpers/model-map.d.ts +27 -0
- package/dist/lib/request/request-transformer.d.ts +7 -0
- package/dist/lib/request/response-handler.d.ts +13 -0
- package/dist/lib/types.d.ts +56 -0
- package/dist/provider.d.ts +26 -0
- package/dist/provider.js +63 -0
- package/package.json +63 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,1290 @@
|
|
|
1
|
+
// @bun
|
|
2
|
+
// provider.ts
|
|
3
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
4
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
5
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
6
|
+
var isClaude = (modelId) => modelId.startsWith("claude-");
|
|
7
|
+
var isGemini = (modelId) => modelId.startsWith("gemini-");
|
|
8
|
+
var isResponses = (modelId) => modelId.startsWith("gpt-") || modelId.startsWith("codex");
|
|
9
|
+
var normalizeModelId = (modelId) => String(modelId).trim();
|
|
10
|
+
function createAicodewith(options = {}) {
|
|
11
|
+
const openai = createOpenAI({
|
|
12
|
+
apiKey: options.apiKey,
|
|
13
|
+
baseURL: options.baseURL,
|
|
14
|
+
headers: options.headers,
|
|
15
|
+
fetch: options.fetch
|
|
16
|
+
});
|
|
17
|
+
const openaiLanguageModel = typeof openai.languageModel === "function" ? openai.languageModel : openai.chat;
|
|
18
|
+
const openaiChatModel = typeof openai.chat === "function" ? openai.chat : openaiLanguageModel;
|
|
19
|
+
const anthropic = createAnthropic({
|
|
20
|
+
apiKey: options.anthropic?.apiKey ?? options.apiKey,
|
|
21
|
+
baseURL: options.anthropic?.baseURL,
|
|
22
|
+
headers: options.anthropic?.headers ?? options.headers,
|
|
23
|
+
fetch: options.fetch
|
|
24
|
+
});
|
|
25
|
+
const google = createGoogleGenerativeAI({
|
|
26
|
+
apiKey: options.google?.apiKey ?? options.apiKey,
|
|
27
|
+
baseURL: options.google?.baseURL,
|
|
28
|
+
headers: options.google?.headers ?? options.headers,
|
|
29
|
+
fetch: options.fetch
|
|
30
|
+
});
|
|
31
|
+
const createModel = (modelId) => {
|
|
32
|
+
const id = normalizeModelId(modelId);
|
|
33
|
+
if (isClaude(id))
|
|
34
|
+
return anthropic.languageModel(id);
|
|
35
|
+
if (isGemini(id))
|
|
36
|
+
return google.languageModel(id);
|
|
37
|
+
if (isResponses(id) && typeof openai.responses === "function")
|
|
38
|
+
return openai.responses(id);
|
|
39
|
+
return openaiLanguageModel(id);
|
|
40
|
+
};
|
|
41
|
+
const provider = (modelId) => createModel(modelId);
|
|
42
|
+
provider.languageModel = createModel;
|
|
43
|
+
provider.chat = (modelId) => {
|
|
44
|
+
const id = normalizeModelId(modelId);
|
|
45
|
+
if (isClaude(id))
|
|
46
|
+
return anthropic.languageModel(id);
|
|
47
|
+
if (isGemini(id))
|
|
48
|
+
return google.languageModel(id);
|
|
49
|
+
return openaiChatModel(id);
|
|
50
|
+
};
|
|
51
|
+
provider.responses = (modelId) => {
|
|
52
|
+
const id = normalizeModelId(modelId);
|
|
53
|
+
if (isClaude(id) || isGemini(id))
|
|
54
|
+
return provider.chat(id);
|
|
55
|
+
return openai.responses(id);
|
|
56
|
+
};
|
|
57
|
+
return provider;
|
|
58
|
+
}
|
|
59
|
+
var aicodewith = createAicodewith();
|
|
60
|
+
|
|
61
|
+
// index.ts
|
|
62
|
+
import { mkdir as mkdir2, readFile as readFile2, writeFile as writeFile2 } from "fs/promises";
|
|
63
|
+
import path from "path";
|
|
64
|
+
import os from "os";
|
|
65
|
+
|
|
66
|
+
// lib/constants.ts
|
|
67
|
+
var PLUGIN_NAME = "opencode-aicodewith-auth";
|
|
68
|
+
var PROVIDER_ID = "aicodewith";
|
|
69
|
+
var AUTH_METHOD_LABEL = "AICodewith API Key";
|
|
70
|
+
var CODEX_BASE_URL = "https://api.aicodewith.com/chatgpt/v1";
|
|
71
|
+
var AICODEWITH_ANTHROPIC_BASE_URL = "https://api.aicodewith.com";
|
|
72
|
+
var AICODEWITH_GEMINI_BASE_URL = "https://api.aicodewith.com/gemini_cli";
|
|
73
|
+
var GEMINI_USER_AGENT = "GeminiCLI/v25.2.1 (darwin; arm64)";
|
|
74
|
+
var GEMINI_API_CLIENT = "google-genai-sdk/1.30.0 gl-node/v25.2.1";
|
|
75
|
+
var GEMINI_PRIVILEGED_USER_ID_ENV = "AICODEWITH_GEMINI_USER_ID";
|
|
76
|
+
var USER_AGENT = "codex_cli_rs/0.77.0 (Mac OS 26.2.0; arm64) iTerm.app/3.6.6";
|
|
77
|
+
var ORIGINATOR = "codex_cli_rs";
|
|
78
|
+
var HEADER_NAMES = {
|
|
79
|
+
AUTHORIZATION: "authorization",
|
|
80
|
+
ORIGINATOR: "originator",
|
|
81
|
+
SESSION_ID: "session_id",
|
|
82
|
+
CONVERSATION_ID: "conversation_id",
|
|
83
|
+
USER_AGENT: "user-agent",
|
|
84
|
+
ACCEPT: "accept",
|
|
85
|
+
CONTENT_TYPE: "content-type",
|
|
86
|
+
OPENAI_BETA: "openai-beta",
|
|
87
|
+
CHATGPT_ACCOUNT_ID: "chatgpt-account-id",
|
|
88
|
+
X_GOOG_API_KEY: "x-goog-api-key",
|
|
89
|
+
X_GOOG_API_CLIENT: "x-goog-api-client",
|
|
90
|
+
X_GEMINI_PRIVILEGED_USER_ID: "x-gemini-api-privileged-user-id"
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
// lib/logger.ts
|
|
94
|
+
import { writeFileSync, mkdirSync, existsSync } from "fs";
|
|
95
|
+
import { join } from "path";
|
|
96
|
+
import { homedir } from "os";
|
|
97
|
+
var LOGGING_ENABLED = process.env.ENABLE_PLUGIN_REQUEST_LOGGING === "1";
|
|
98
|
+
var DEBUG_ENABLED = process.env.DEBUG_CODEX_PLUGIN === "1" || LOGGING_ENABLED;
|
|
99
|
+
var LOG_DIR = join(homedir(), ".opencode", "logs", PLUGIN_NAME);
|
|
100
|
+
if (LOGGING_ENABLED) {
|
|
101
|
+
console.log(`[${PLUGIN_NAME}] Request logging ENABLED - logs will be saved to:`, LOG_DIR);
|
|
102
|
+
}
|
|
103
|
+
if (DEBUG_ENABLED && !LOGGING_ENABLED) {
|
|
104
|
+
console.log(`[${PLUGIN_NAME}] Debug logging ENABLED`);
|
|
105
|
+
}
|
|
106
|
+
var requestCounter = 0;
|
|
107
|
+
function logRequest(stage, data) {
|
|
108
|
+
if (!LOGGING_ENABLED)
|
|
109
|
+
return;
|
|
110
|
+
if (!existsSync(LOG_DIR)) {
|
|
111
|
+
mkdirSync(LOG_DIR, { recursive: true });
|
|
112
|
+
}
|
|
113
|
+
const timestamp = new Date().toISOString();
|
|
114
|
+
const requestId = ++requestCounter;
|
|
115
|
+
const filename = join(LOG_DIR, `request-${requestId}-${stage}.json`);
|
|
116
|
+
try {
|
|
117
|
+
writeFileSync(filename, JSON.stringify({
|
|
118
|
+
timestamp,
|
|
119
|
+
requestId,
|
|
120
|
+
stage,
|
|
121
|
+
...data
|
|
122
|
+
}, null, 2), "utf8");
|
|
123
|
+
console.log(`[${PLUGIN_NAME}] Logged ${stage} to ${filename}`);
|
|
124
|
+
} catch (e) {
|
|
125
|
+
const error = e;
|
|
126
|
+
console.error(`[${PLUGIN_NAME}] Failed to write log:`, error.message);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
function logDebug(message, data) {
|
|
130
|
+
if (!DEBUG_ENABLED)
|
|
131
|
+
return;
|
|
132
|
+
if (data !== undefined) {
|
|
133
|
+
console.log(`[${PLUGIN_NAME}] ${message}`, data);
|
|
134
|
+
} else {
|
|
135
|
+
console.log(`[${PLUGIN_NAME}] ${message}`);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
function logWarn(message, data) {
|
|
139
|
+
if (!DEBUG_ENABLED && !LOGGING_ENABLED)
|
|
140
|
+
return;
|
|
141
|
+
if (data !== undefined) {
|
|
142
|
+
console.warn(`[${PLUGIN_NAME}] ${message}`, data);
|
|
143
|
+
} else {
|
|
144
|
+
console.warn(`[${PLUGIN_NAME}] ${message}`);
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// lib/prompts/codex.ts
|
|
149
|
+
import { existsSync as existsSync2, mkdirSync as mkdirSync2, readFileSync, writeFileSync as writeFileSync2 } from "fs";
|
|
150
|
+
import { homedir as homedir2 } from "os";
|
|
151
|
+
import { dirname, join as join2 } from "path";
|
|
152
|
+
import { fileURLToPath } from "url";
|
|
153
|
+
var GITHUB_API_RELEASES = "https://api.github.com/repos/openai/codex/releases/latest";
|
|
154
|
+
var GITHUB_HTML_RELEASES = "https://github.com/openai/codex/releases/latest";
|
|
155
|
+
var CACHE_DIR = join2(homedir2(), ".opencode", "cache");
|
|
156
|
+
var __filename2 = fileURLToPath(import.meta.url);
|
|
157
|
+
var __dirname2 = dirname(__filename2);
|
|
158
|
+
var PROMPT_FILES = {
|
|
159
|
+
"gpt-5.2-codex": "gpt-5.2-codex_prompt.md",
|
|
160
|
+
"codex-max": "gpt-5.1-codex-max_prompt.md",
|
|
161
|
+
codex: "gpt_5_codex_prompt.md",
|
|
162
|
+
"gpt-5.2": "gpt_5_2_prompt.md",
|
|
163
|
+
"gpt-5.1": "gpt_5_1_prompt.md"
|
|
164
|
+
};
|
|
165
|
+
var CACHE_FILES = {
|
|
166
|
+
"gpt-5.2-codex": "gpt-5.2-codex-instructions.md",
|
|
167
|
+
"codex-max": "codex-max-instructions.md",
|
|
168
|
+
codex: "codex-instructions.md",
|
|
169
|
+
"gpt-5.2": "gpt-5.2-instructions.md",
|
|
170
|
+
"gpt-5.1": "gpt-5.1-instructions.md"
|
|
171
|
+
};
|
|
172
|
+
var FALLBACK_FILE = join2(__dirname2, "fallback-instructions.txt");
|
|
173
|
+
function getModelFamily(normalizedModel) {
|
|
174
|
+
if (normalizedModel.includes("gpt-5.2-codex") || normalizedModel.includes("gpt 5.2 codex")) {
|
|
175
|
+
return "gpt-5.2-codex";
|
|
176
|
+
}
|
|
177
|
+
if (normalizedModel.includes("codex-max")) {
|
|
178
|
+
return "codex-max";
|
|
179
|
+
}
|
|
180
|
+
if (normalizedModel.includes("codex") || normalizedModel.startsWith("codex-")) {
|
|
181
|
+
return "codex";
|
|
182
|
+
}
|
|
183
|
+
if (normalizedModel.includes("gpt-5.2")) {
|
|
184
|
+
return "gpt-5.2";
|
|
185
|
+
}
|
|
186
|
+
return "gpt-5.1";
|
|
187
|
+
}
|
|
188
|
+
async function getLatestReleaseTag() {
|
|
189
|
+
try {
|
|
190
|
+
const response = await fetch(GITHUB_API_RELEASES);
|
|
191
|
+
if (response.ok) {
|
|
192
|
+
const data = await response.json();
|
|
193
|
+
if (data.tag_name) {
|
|
194
|
+
return data.tag_name;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
} catch {}
|
|
198
|
+
const htmlResponse = await fetch(GITHUB_HTML_RELEASES);
|
|
199
|
+
if (!htmlResponse.ok) {
|
|
200
|
+
throw new Error(`Failed to fetch latest release: ${htmlResponse.status}`);
|
|
201
|
+
}
|
|
202
|
+
const finalUrl = htmlResponse.url;
|
|
203
|
+
if (finalUrl) {
|
|
204
|
+
const parts = finalUrl.split("/tag/");
|
|
205
|
+
const last = parts[parts.length - 1];
|
|
206
|
+
if (last && !last.includes("/")) {
|
|
207
|
+
return last;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
const html = await htmlResponse.text();
|
|
211
|
+
const match = html.match(/\/openai\/codex\/releases\/tag\/([^"]+)/);
|
|
212
|
+
if (match && match[1]) {
|
|
213
|
+
return match[1];
|
|
214
|
+
}
|
|
215
|
+
throw new Error("Failed to determine latest release tag from GitHub");
|
|
216
|
+
}
|
|
217
|
+
async function getCodexInstructions(normalizedModel = "gpt-5.1-codex") {
|
|
218
|
+
const modelFamily = getModelFamily(normalizedModel);
|
|
219
|
+
const promptFile = PROMPT_FILES[modelFamily];
|
|
220
|
+
const cacheFile = join2(CACHE_DIR, CACHE_FILES[modelFamily]);
|
|
221
|
+
const cacheMetaFile = join2(CACHE_DIR, `${CACHE_FILES[modelFamily].replace(".md", "-meta.json")}`);
|
|
222
|
+
try {
|
|
223
|
+
let cachedETag = null;
|
|
224
|
+
let cachedTag = null;
|
|
225
|
+
let cachedTimestamp = null;
|
|
226
|
+
if (existsSync2(cacheMetaFile)) {
|
|
227
|
+
const metadata = JSON.parse(readFileSync(cacheMetaFile, "utf8"));
|
|
228
|
+
cachedETag = metadata.etag;
|
|
229
|
+
cachedTag = metadata.tag;
|
|
230
|
+
cachedTimestamp = metadata.lastChecked;
|
|
231
|
+
}
|
|
232
|
+
const CACHE_TTL_MS = 15 * 60 * 1000;
|
|
233
|
+
if (cachedTimestamp && Date.now() - cachedTimestamp < CACHE_TTL_MS && existsSync2(cacheFile)) {
|
|
234
|
+
return readFileSync(cacheFile, "utf8");
|
|
235
|
+
}
|
|
236
|
+
const latestTag = await getLatestReleaseTag();
|
|
237
|
+
const url = `https://raw.githubusercontent.com/openai/codex/${latestTag}/codex-rs/core/${promptFile}`;
|
|
238
|
+
if (cachedTag != latestTag) {
|
|
239
|
+
cachedETag = null;
|
|
240
|
+
}
|
|
241
|
+
const headers = {};
|
|
242
|
+
if (cachedETag) {
|
|
243
|
+
headers["If-None-Match"] = cachedETag;
|
|
244
|
+
}
|
|
245
|
+
const response = await fetch(url, { headers });
|
|
246
|
+
if (response.status === 304) {
|
|
247
|
+
if (existsSync2(cacheFile)) {
|
|
248
|
+
return readFileSync(cacheFile, "utf8");
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
if (response.ok) {
|
|
252
|
+
const instructions = await response.text();
|
|
253
|
+
const newETag = response.headers.get("etag");
|
|
254
|
+
if (!existsSync2(CACHE_DIR)) {
|
|
255
|
+
mkdirSync2(CACHE_DIR, { recursive: true });
|
|
256
|
+
}
|
|
257
|
+
writeFileSync2(cacheFile, instructions, "utf8");
|
|
258
|
+
writeFileSync2(cacheMetaFile, JSON.stringify({
|
|
259
|
+
etag: newETag,
|
|
260
|
+
tag: latestTag,
|
|
261
|
+
lastChecked: Date.now(),
|
|
262
|
+
url
|
|
263
|
+
}), "utf8");
|
|
264
|
+
return instructions;
|
|
265
|
+
}
|
|
266
|
+
throw new Error(`HTTP ${response.status}`);
|
|
267
|
+
} catch (error) {
|
|
268
|
+
const err = error;
|
|
269
|
+
console.error(`[${PLUGIN_NAME}] Failed to fetch Codex instructions from GitHub:`, err.message);
|
|
270
|
+
if (existsSync2(FALLBACK_FILE)) {
|
|
271
|
+
console.error(`[${PLUGIN_NAME}] Using fallback instructions`);
|
|
272
|
+
return readFileSync(FALLBACK_FILE, "utf8");
|
|
273
|
+
}
|
|
274
|
+
throw new Error("No fallback instructions available");
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// lib/prompts/codex-opencode-bridge.ts
|
|
279
|
+
var CODEX_OPENCODE_BRIDGE = `# Codex Running in OpenCode
|
|
280
|
+
|
|
281
|
+
You are running Codex through OpenCode, an open-source terminal coding assistant. OpenCode provides different tools but follows Codex operating principles.
|
|
282
|
+
|
|
283
|
+
## CRITICAL: Tool Replacements
|
|
284
|
+
|
|
285
|
+
<critical_rule priority="0">
|
|
286
|
+
\u274C APPLY_PATCH DOES NOT EXIST \u2192 \u2705 USE "edit" INSTEAD
|
|
287
|
+
- NEVER use: apply_patch, applyPatch
|
|
288
|
+
- ALWAYS use: edit tool for ALL file modifications
|
|
289
|
+
- Before modifying files: Verify you're using "edit", NOT "apply_patch"
|
|
290
|
+
</critical_rule>
|
|
291
|
+
|
|
292
|
+
<critical_rule priority="0">
|
|
293
|
+
\u274C UPDATE_PLAN DOES NOT EXIST \u2192 \u2705 USE "todowrite" INSTEAD
|
|
294
|
+
- NEVER use: update_plan, updatePlan, read_plan, readPlan
|
|
295
|
+
- ALWAYS use: todowrite for task/plan updates, todoread to read plans
|
|
296
|
+
- Before plan operations: Verify you're using "todowrite", NOT "update_plan"
|
|
297
|
+
</critical_rule>
|
|
298
|
+
|
|
299
|
+
## Available OpenCode Tools
|
|
300
|
+
|
|
301
|
+
**File Operations:**
|
|
302
|
+
- \`write\` - Create new files
|
|
303
|
+
- Overwriting existing files requires a prior Read in this session; default to ASCII unless the file already uses Unicode.
|
|
304
|
+
- \`edit\` - Modify existing files (REPLACES apply_patch)
|
|
305
|
+
- Requires a prior Read in this session; preserve exact indentation; ensure \`oldString\` uniquely matches or use \`replaceAll\`; edit fails if ambiguous or missing.
|
|
306
|
+
- \`read\` - Read file contents
|
|
307
|
+
|
|
308
|
+
**Search/Discovery:**
|
|
309
|
+
- \`grep\` - Search file contents (tool, not bash grep); use \`include\` to filter patterns; set \`path\` only when not searching workspace root; for cross-file match counts use bash with \`rg\`.
|
|
310
|
+
- \`glob\` - Find files by pattern; defaults to workspace cwd unless \`path\` is set.
|
|
311
|
+
- \`list\` - List directories (requires absolute paths)
|
|
312
|
+
|
|
313
|
+
**Execution:**
|
|
314
|
+
- \`bash\` - Run shell commands
|
|
315
|
+
- No workdir parameter; do not include it in tool calls.
|
|
316
|
+
- Always include a short description for the command.
|
|
317
|
+
- Do not use cd; use absolute paths in commands.
|
|
318
|
+
- Quote paths containing spaces with double quotes.
|
|
319
|
+
- Chain multiple commands with ';' or '&&'; avoid newlines.
|
|
320
|
+
- Use Grep/Glob tools for searches; only use bash with \`rg\` when you need counts or advanced features.
|
|
321
|
+
- Do not use \`ls\`/\`cat\` in bash; use \`list\`/\`read\` tools instead.
|
|
322
|
+
- For deletions (rm), verify by listing parent dir with \`list\`.
|
|
323
|
+
|
|
324
|
+
**Network:**
|
|
325
|
+
- \`webfetch\` - Fetch web content
|
|
326
|
+
- Use fully-formed URLs (http/https; http auto-upgrades to https).
|
|
327
|
+
- Always set \`format\` to one of: text | markdown | html; prefer markdown unless otherwise required.
|
|
328
|
+
- Read-only; short cache window.
|
|
329
|
+
|
|
330
|
+
**Task Management:**
|
|
331
|
+
- \`todowrite\` - Manage tasks/plans (REPLACES update_plan)
|
|
332
|
+
- \`todoread\` - Read current plan
|
|
333
|
+
|
|
334
|
+
## Substitution Rules
|
|
335
|
+
|
|
336
|
+
Base instruction says: You MUST use instead:
|
|
337
|
+
apply_patch \u2192 edit
|
|
338
|
+
update_plan \u2192 todowrite
|
|
339
|
+
read_plan \u2192 todoread
|
|
340
|
+
|
|
341
|
+
**Path Usage:** Use per-tool conventions to avoid conflicts:
|
|
342
|
+
- Tool calls: \`read\`, \`edit\`, \`write\`, \`list\` require absolute paths.
|
|
343
|
+
- Searches: \`grep\`/\`glob\` default to the workspace cwd; prefer relative include patterns; set \`path\` only when a different root is needed.
|
|
344
|
+
- Presentation: In assistant messages, show workspace-relative paths; use absolute paths only inside tool calls.
|
|
345
|
+
- Tool schema overrides general path preferences\u2014do not convert required absolute paths to relative.
|
|
346
|
+
|
|
347
|
+
## Verification Checklist
|
|
348
|
+
|
|
349
|
+
Before file/plan modifications:
|
|
350
|
+
1. Am I using "edit" NOT "apply_patch"?
|
|
351
|
+
2. Am I using "todowrite" NOT "update_plan"?
|
|
352
|
+
3. Is this tool in the approved list above?
|
|
353
|
+
4. Am I following each tool's path requirements?
|
|
354
|
+
|
|
355
|
+
If ANY answer is NO \u2192 STOP and correct before proceeding.
|
|
356
|
+
|
|
357
|
+
## OpenCode Working Style
|
|
358
|
+
|
|
359
|
+
**Communication:**
|
|
360
|
+
- Send brief preambles (8-12 words) before tool calls, building on prior context
|
|
361
|
+
- Provide progress updates during longer tasks
|
|
362
|
+
|
|
363
|
+
**Execution:**
|
|
364
|
+
- Keep working autonomously until query is fully resolved before yielding
|
|
365
|
+
- Don't return to user with partial solutions
|
|
366
|
+
|
|
367
|
+
**Code Approach:**
|
|
368
|
+
- New projects: Be ambitious and creative
|
|
369
|
+
- Existing codebases: Surgical precision - modify only what's requested unless explicitly instructed to do otherwise
|
|
370
|
+
|
|
371
|
+
**Testing:**
|
|
372
|
+
- If tests exist: Start specific to your changes, then broader validation
|
|
373
|
+
|
|
374
|
+
## Advanced Tools
|
|
375
|
+
|
|
376
|
+
**Task Tool (Sub-Agents):**
|
|
377
|
+
- Use the Task tool (functions.task) to launch sub-agents
|
|
378
|
+
- Check the Task tool description for current agent types and their capabilities
|
|
379
|
+
- Useful for complex analysis, specialized workflows, or tasks requiring isolated context
|
|
380
|
+
- The agent list is dynamically generated - refer to tool schema for available agents
|
|
381
|
+
|
|
382
|
+
**Parallelization:**
|
|
383
|
+
- When multiple independent tool calls are needed, use multi_tool_use.parallel to run them concurrently.
|
|
384
|
+
- Reserve sequential calls for ordered or data-dependent steps.
|
|
385
|
+
|
|
386
|
+
**MCP Tools:**
|
|
387
|
+
- Model Context Protocol servers provide additional capabilities
|
|
388
|
+
- MCP tools are prefixed: \`mcp__<server-name>__<tool-name>\`
|
|
389
|
+
- Check your available tools for MCP integrations
|
|
390
|
+
- Use when the tool's functionality matches your task needs
|
|
391
|
+
|
|
392
|
+
## What Remains from Codex
|
|
393
|
+
|
|
394
|
+
Sandbox policies, approval mechanisms, final answer formatting, git commit protocols, and file reference formats all follow Codex instructions. In approval policy "never", never request escalations.
|
|
395
|
+
|
|
396
|
+
## Approvals & Safety
|
|
397
|
+
- Assume workspace-write filesystem, network enabled, approval on-failure unless explicitly stated otherwise.
|
|
398
|
+
- When a command fails due to sandboxing or permissions, retry with escalated permissions if allowed by policy, including a one-line justification.
|
|
399
|
+
- Treat destructive commands (e.g., \`rm\`, \`git reset --hard\`) as requiring explicit user request or approval.
|
|
400
|
+
- When uncertain, prefer non-destructive verification first (e.g., confirm file existence with \`list\`, then delete with \`bash\`).`;
|
|
401
|
+
|
|
402
|
+
// lib/prompts/opencode-codex.ts
|
|
403
|
+
import { join as join3 } from "path";
|
|
404
|
+
import { homedir as homedir3 } from "os";
|
|
405
|
+
import { mkdir, readFile, writeFile } from "fs/promises";
|
|
406
|
+
var OPENCODE_CODEX_URL = "https://raw.githubusercontent.com/anomalyco/opencode/dev/packages/opencode/src/session/prompt/codex.txt";
|
|
407
|
+
var CACHE_DIR2 = join3(homedir3(), ".opencode", "cache");
|
|
408
|
+
var CACHE_FILE = join3(CACHE_DIR2, "opencode-codex.txt");
|
|
409
|
+
var CACHE_META_FILE = join3(CACHE_DIR2, "opencode-codex-meta.json");
|
|
410
|
+
async function getOpenCodeCodexPrompt() {
|
|
411
|
+
await mkdir(CACHE_DIR2, { recursive: true });
|
|
412
|
+
let cachedContent = null;
|
|
413
|
+
let cachedMeta = null;
|
|
414
|
+
try {
|
|
415
|
+
cachedContent = await readFile(CACHE_FILE, "utf-8");
|
|
416
|
+
const metaContent = await readFile(CACHE_META_FILE, "utf-8");
|
|
417
|
+
cachedMeta = JSON.parse(metaContent);
|
|
418
|
+
} catch {}
|
|
419
|
+
const CACHE_TTL_MS = 15 * 60 * 1000;
|
|
420
|
+
if (cachedMeta?.lastChecked && Date.now() - cachedMeta.lastChecked < CACHE_TTL_MS && cachedContent) {
|
|
421
|
+
return cachedContent;
|
|
422
|
+
}
|
|
423
|
+
const headers = {};
|
|
424
|
+
if (cachedMeta?.etag) {
|
|
425
|
+
headers["If-None-Match"] = cachedMeta.etag;
|
|
426
|
+
}
|
|
427
|
+
try {
|
|
428
|
+
const response = await fetch(OPENCODE_CODEX_URL, { headers });
|
|
429
|
+
if (response.status === 304 && cachedContent) {
|
|
430
|
+
return cachedContent;
|
|
431
|
+
}
|
|
432
|
+
if (response.ok) {
|
|
433
|
+
const content = await response.text();
|
|
434
|
+
const etag = response.headers.get("etag") || "";
|
|
435
|
+
await writeFile(CACHE_FILE, content, "utf-8");
|
|
436
|
+
await writeFile(CACHE_META_FILE, JSON.stringify({
|
|
437
|
+
etag,
|
|
438
|
+
lastFetch: new Date().toISOString(),
|
|
439
|
+
lastChecked: Date.now()
|
|
440
|
+
}, null, 2), "utf-8");
|
|
441
|
+
return content;
|
|
442
|
+
}
|
|
443
|
+
if (cachedContent) {
|
|
444
|
+
return cachedContent;
|
|
445
|
+
}
|
|
446
|
+
throw new Error(`Failed to fetch OpenCode codex.txt: ${response.status}`);
|
|
447
|
+
} catch (error) {
|
|
448
|
+
if (cachedContent) {
|
|
449
|
+
return cachedContent;
|
|
450
|
+
}
|
|
451
|
+
throw new Error(`Failed to fetch OpenCode codex.txt and no cache available: ${error}`);
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
// lib/request/helpers/model-map.ts
|
|
456
|
+
var MODEL_MAP = {
|
|
457
|
+
"gpt-5.1-codex": "gpt-5.1-codex",
|
|
458
|
+
"gpt-5.1-codex-low": "gpt-5.1-codex",
|
|
459
|
+
"gpt-5.1-codex-medium": "gpt-5.1-codex",
|
|
460
|
+
"gpt-5.1-codex-high": "gpt-5.1-codex",
|
|
461
|
+
"gpt-5.1-codex-max": "gpt-5.1-codex-max",
|
|
462
|
+
"gpt-5.1-codex-max-low": "gpt-5.1-codex-max",
|
|
463
|
+
"gpt-5.1-codex-max-medium": "gpt-5.1-codex-max",
|
|
464
|
+
"gpt-5.1-codex-max-high": "gpt-5.1-codex-max",
|
|
465
|
+
"gpt-5.1-codex-max-xhigh": "gpt-5.1-codex-max",
|
|
466
|
+
"gpt-5.2": "gpt-5.2",
|
|
467
|
+
"gpt-5.2-none": "gpt-5.2",
|
|
468
|
+
"gpt-5.2-low": "gpt-5.2",
|
|
469
|
+
"gpt-5.2-medium": "gpt-5.2",
|
|
470
|
+
"gpt-5.2-high": "gpt-5.2",
|
|
471
|
+
"gpt-5.2-xhigh": "gpt-5.2",
|
|
472
|
+
"gpt-5.2-codex": "gpt-5.2-codex",
|
|
473
|
+
"gpt-5.2-codex-low": "gpt-5.2-codex",
|
|
474
|
+
"gpt-5.2-codex-medium": "gpt-5.2-codex",
|
|
475
|
+
"gpt-5.2-codex-high": "gpt-5.2-codex",
|
|
476
|
+
"gpt-5.2-codex-xhigh": "gpt-5.2-codex",
|
|
477
|
+
"gpt-5.1-codex-mini": "gpt-5.1-codex-mini",
|
|
478
|
+
"gpt-5.1-codex-mini-medium": "gpt-5.1-codex-mini",
|
|
479
|
+
"gpt-5.1-codex-mini-high": "gpt-5.1-codex-mini",
|
|
480
|
+
"gpt-5.1": "gpt-5.1",
|
|
481
|
+
"gpt-5.1-none": "gpt-5.1",
|
|
482
|
+
"gpt-5.1-low": "gpt-5.1",
|
|
483
|
+
"gpt-5.1-medium": "gpt-5.1",
|
|
484
|
+
"gpt-5.1-high": "gpt-5.1",
|
|
485
|
+
"gpt-5.1-chat-latest": "gpt-5.1",
|
|
486
|
+
"gpt-5-codex": "gpt-5.1-codex",
|
|
487
|
+
"codex-mini-latest": "gpt-5.1-codex-mini",
|
|
488
|
+
"gpt-5-codex-mini": "gpt-5.1-codex-mini",
|
|
489
|
+
"gpt-5-codex-mini-medium": "gpt-5.1-codex-mini",
|
|
490
|
+
"gpt-5-codex-mini-high": "gpt-5.1-codex-mini",
|
|
491
|
+
"gpt-5": "gpt-5.1",
|
|
492
|
+
"gpt-5-mini": "gpt-5.1",
|
|
493
|
+
"gpt-5-nano": "gpt-5.1"
|
|
494
|
+
};
|
|
495
|
+
function getNormalizedModel(modelId) {
|
|
496
|
+
try {
|
|
497
|
+
if (MODEL_MAP[modelId]) {
|
|
498
|
+
return MODEL_MAP[modelId];
|
|
499
|
+
}
|
|
500
|
+
const lowerModelId = modelId.toLowerCase();
|
|
501
|
+
const match = Object.keys(MODEL_MAP).find((key) => key.toLowerCase() === lowerModelId);
|
|
502
|
+
return match ? MODEL_MAP[match] : undefined;
|
|
503
|
+
} catch {
|
|
504
|
+
return;
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
// lib/request/helpers/input-utils.ts
|
|
509
|
+
var OPENCODE_PROMPT_SIGNATURES = [
|
|
510
|
+
"you are a coding agent running in the opencode",
|
|
511
|
+
"you are opencode, an agent",
|
|
512
|
+
"you are opencode, an interactive cli agent",
|
|
513
|
+
"you are opencode, an interactive cli tool",
|
|
514
|
+
"you are opencode, the best coding agent on the planet"
|
|
515
|
+
].map((signature) => signature.toLowerCase());
|
|
516
|
+
var OPENCODE_CONTEXT_MARKERS = [
|
|
517
|
+
"here is some useful information about the environment you are running in:",
|
|
518
|
+
"<env>",
|
|
519
|
+
"instructions from:",
|
|
520
|
+
"<instructions>"
|
|
521
|
+
].map((marker) => marker.toLowerCase());
|
|
522
|
+
var getContentText = (item) => {
|
|
523
|
+
if (typeof item.content === "string") {
|
|
524
|
+
return item.content;
|
|
525
|
+
}
|
|
526
|
+
if (Array.isArray(item.content)) {
|
|
527
|
+
return item.content.filter((c) => c.type === "input_text" && c.text).map((c) => c.text).join(`
|
|
528
|
+
`);
|
|
529
|
+
}
|
|
530
|
+
return "";
|
|
531
|
+
};
|
|
532
|
+
var replaceContentText = (item, contentText) => {
|
|
533
|
+
if (typeof item.content === "string") {
|
|
534
|
+
return { ...item, content: contentText };
|
|
535
|
+
}
|
|
536
|
+
if (Array.isArray(item.content)) {
|
|
537
|
+
return {
|
|
538
|
+
...item,
|
|
539
|
+
content: [{ type: "input_text", text: contentText }]
|
|
540
|
+
};
|
|
541
|
+
}
|
|
542
|
+
return { ...item, content: contentText };
|
|
543
|
+
};
|
|
544
|
+
var extractOpenCodeContext = (contentText) => {
|
|
545
|
+
const lower = contentText.toLowerCase();
|
|
546
|
+
let earliestIndex = -1;
|
|
547
|
+
for (const marker of OPENCODE_CONTEXT_MARKERS) {
|
|
548
|
+
const index = lower.indexOf(marker);
|
|
549
|
+
if (index >= 0 && (earliestIndex === -1 || index < earliestIndex)) {
|
|
550
|
+
earliestIndex = index;
|
|
551
|
+
}
|
|
552
|
+
}
|
|
553
|
+
if (earliestIndex === -1)
|
|
554
|
+
return null;
|
|
555
|
+
return contentText.slice(earliestIndex).trimStart();
|
|
556
|
+
};
|
|
557
|
+
function isOpenCodeSystemPrompt(item, cachedPrompt) {
|
|
558
|
+
const isSystemRole = item.role === "developer" || item.role === "system";
|
|
559
|
+
if (!isSystemRole)
|
|
560
|
+
return false;
|
|
561
|
+
const contentText = getContentText(item);
|
|
562
|
+
if (!contentText)
|
|
563
|
+
return false;
|
|
564
|
+
if (cachedPrompt) {
|
|
565
|
+
const contentTrimmed = contentText.trim();
|
|
566
|
+
const cachedTrimmed = cachedPrompt.trim();
|
|
567
|
+
if (contentTrimmed === cachedTrimmed) {
|
|
568
|
+
return true;
|
|
569
|
+
}
|
|
570
|
+
if (contentTrimmed.startsWith(cachedTrimmed)) {
|
|
571
|
+
return true;
|
|
572
|
+
}
|
|
573
|
+
const contentPrefix = contentTrimmed.substring(0, 200);
|
|
574
|
+
const cachedPrefix = cachedTrimmed.substring(0, 200);
|
|
575
|
+
if (contentPrefix === cachedPrefix) {
|
|
576
|
+
return true;
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
const normalized = contentText.trimStart().toLowerCase();
|
|
580
|
+
return OPENCODE_PROMPT_SIGNATURES.some((signature) => normalized.startsWith(signature));
|
|
581
|
+
}
|
|
582
|
+
function filterOpenCodeSystemPromptsWithCachedPrompt(input, cachedPrompt) {
|
|
583
|
+
if (!Array.isArray(input))
|
|
584
|
+
return input;
|
|
585
|
+
return input.flatMap((item) => {
|
|
586
|
+
if (item.role === "user")
|
|
587
|
+
return [item];
|
|
588
|
+
if (!isOpenCodeSystemPrompt(item, cachedPrompt)) {
|
|
589
|
+
return [item];
|
|
590
|
+
}
|
|
591
|
+
const contentText = getContentText(item);
|
|
592
|
+
const preservedContext = extractOpenCodeContext(contentText);
|
|
593
|
+
if (preservedContext) {
|
|
594
|
+
return [replaceContentText(item, preservedContext)];
|
|
595
|
+
}
|
|
596
|
+
return [];
|
|
597
|
+
});
|
|
598
|
+
}
|
|
599
|
+
var getCallId = (item) => {
|
|
600
|
+
const rawCallId = item.call_id;
|
|
601
|
+
if (typeof rawCallId !== "string")
|
|
602
|
+
return null;
|
|
603
|
+
const trimmed = rawCallId.trim();
|
|
604
|
+
return trimmed.length > 0 ? trimmed : null;
|
|
605
|
+
};
|
|
606
|
+
var convertOrphanedOutputToMessage = (item, callId) => {
|
|
607
|
+
const toolName = typeof item.name === "string" ? item.name : "tool";
|
|
608
|
+
const labelCallId = callId ?? "unknown";
|
|
609
|
+
let text;
|
|
610
|
+
try {
|
|
611
|
+
const out = item.output;
|
|
612
|
+
text = typeof out === "string" ? out : JSON.stringify(out);
|
|
613
|
+
} catch {
|
|
614
|
+
text = String(item.output ?? "");
|
|
615
|
+
}
|
|
616
|
+
if (text.length > 16000) {
|
|
617
|
+
text = text.slice(0, 16000) + `
|
|
618
|
+
...[truncated]`;
|
|
619
|
+
}
|
|
620
|
+
return {
|
|
621
|
+
type: "message",
|
|
622
|
+
role: "assistant",
|
|
623
|
+
content: `[Previous ${toolName} result; call_id=${labelCallId}]: ${text}`
|
|
624
|
+
};
|
|
625
|
+
};
|
|
626
|
+
var collectCallIds = (input) => {
|
|
627
|
+
const functionCallIds = new Set;
|
|
628
|
+
const localShellCallIds = new Set;
|
|
629
|
+
const customToolCallIds = new Set;
|
|
630
|
+
for (const item of input) {
|
|
631
|
+
const callId = getCallId(item);
|
|
632
|
+
if (!callId)
|
|
633
|
+
continue;
|
|
634
|
+
switch (item.type) {
|
|
635
|
+
case "function_call":
|
|
636
|
+
functionCallIds.add(callId);
|
|
637
|
+
break;
|
|
638
|
+
case "local_shell_call":
|
|
639
|
+
localShellCallIds.add(callId);
|
|
640
|
+
break;
|
|
641
|
+
case "custom_tool_call":
|
|
642
|
+
customToolCallIds.add(callId);
|
|
643
|
+
break;
|
|
644
|
+
default:
|
|
645
|
+
break;
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
return { functionCallIds, localShellCallIds, customToolCallIds };
|
|
649
|
+
};
|
|
650
|
+
var normalizeOrphanedToolOutputs = (input) => {
|
|
651
|
+
const { functionCallIds, localShellCallIds, customToolCallIds } = collectCallIds(input);
|
|
652
|
+
return input.map((item) => {
|
|
653
|
+
if (item.type === "function_call_output") {
|
|
654
|
+
const callId = getCallId(item);
|
|
655
|
+
const hasMatch = !!callId && (functionCallIds.has(callId) || localShellCallIds.has(callId));
|
|
656
|
+
if (!hasMatch) {
|
|
657
|
+
return convertOrphanedOutputToMessage(item, callId);
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
if (item.type === "custom_tool_call_output") {
|
|
661
|
+
const callId = getCallId(item);
|
|
662
|
+
const hasMatch = !!callId && customToolCallIds.has(callId);
|
|
663
|
+
if (!hasMatch) {
|
|
664
|
+
return convertOrphanedOutputToMessage(item, callId);
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
if (item.type === "local_shell_call_output") {
|
|
668
|
+
const callId = getCallId(item);
|
|
669
|
+
const hasMatch = !!callId && localShellCallIds.has(callId);
|
|
670
|
+
if (!hasMatch) {
|
|
671
|
+
return convertOrphanedOutputToMessage(item, callId);
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
return item;
|
|
675
|
+
});
|
|
676
|
+
};
|
|
677
|
+
|
|
678
|
+
// lib/request/request-transformer.ts
|
|
679
|
+
function normalizeModel(model) {
|
|
680
|
+
if (!model)
|
|
681
|
+
return "gpt-5.1";
|
|
682
|
+
const modelId = model.includes("/") ? model.split("/").pop() : model;
|
|
683
|
+
const mappedModel = getNormalizedModel(modelId);
|
|
684
|
+
if (mappedModel) {
|
|
685
|
+
return mappedModel;
|
|
686
|
+
}
|
|
687
|
+
const normalized = modelId.toLowerCase();
|
|
688
|
+
if (normalized.includes("gpt-5.2-codex") || normalized.includes("gpt 5.2 codex")) {
|
|
689
|
+
return "gpt-5.2-codex";
|
|
690
|
+
}
|
|
691
|
+
if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) {
|
|
692
|
+
return "gpt-5.2";
|
|
693
|
+
}
|
|
694
|
+
if (normalized.includes("gpt-5.1-codex-max") || normalized.includes("gpt 5.1 codex max")) {
|
|
695
|
+
return "gpt-5.1-codex-max";
|
|
696
|
+
}
|
|
697
|
+
if (normalized.includes("gpt-5.1-codex-mini") || normalized.includes("gpt 5.1 codex mini")) {
|
|
698
|
+
return "gpt-5.1-codex-mini";
|
|
699
|
+
}
|
|
700
|
+
if (normalized.includes("codex-mini-latest") || normalized.includes("gpt-5-codex-mini") || normalized.includes("gpt 5 codex mini")) {
|
|
701
|
+
return "codex-mini-latest";
|
|
702
|
+
}
|
|
703
|
+
if (normalized.includes("gpt-5.1-codex") || normalized.includes("gpt 5.1 codex")) {
|
|
704
|
+
return "gpt-5.1-codex";
|
|
705
|
+
}
|
|
706
|
+
if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) {
|
|
707
|
+
return "gpt-5.1";
|
|
708
|
+
}
|
|
709
|
+
if (normalized.includes("codex")) {
|
|
710
|
+
return "gpt-5.1-codex";
|
|
711
|
+
}
|
|
712
|
+
if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) {
|
|
713
|
+
return "gpt-5.1";
|
|
714
|
+
}
|
|
715
|
+
return "gpt-5.1";
|
|
716
|
+
}
|
|
717
|
+
function resolveReasoningConfig(modelName, body) {
|
|
718
|
+
const providerOpenAI = body.providerOptions?.openai;
|
|
719
|
+
const existingEffort = body.reasoning?.effort ?? providerOpenAI?.reasoningEffort;
|
|
720
|
+
const existingSummary = body.reasoning?.summary ?? providerOpenAI?.reasoningSummary;
|
|
721
|
+
const mergedConfig = {
|
|
722
|
+
...existingEffort ? { reasoningEffort: existingEffort } : {},
|
|
723
|
+
...existingSummary ? { reasoningSummary: existingSummary } : {}
|
|
724
|
+
};
|
|
725
|
+
return getReasoningConfig(modelName, mergedConfig);
|
|
726
|
+
}
|
|
727
|
+
function resolveTextVerbosity(body) {
|
|
728
|
+
const providerOpenAI = body.providerOptions?.openai;
|
|
729
|
+
return body.text?.verbosity ?? providerOpenAI?.textVerbosity;
|
|
730
|
+
}
|
|
731
|
+
function resolveInclude(body) {
|
|
732
|
+
const providerOpenAI = body.providerOptions?.openai;
|
|
733
|
+
const base = body.include ?? providerOpenAI?.include ?? ["reasoning.encrypted_content"];
|
|
734
|
+
const include = Array.from(new Set(base.filter(Boolean)));
|
|
735
|
+
if (!include.includes("reasoning.encrypted_content")) {
|
|
736
|
+
include.push("reasoning.encrypted_content");
|
|
737
|
+
}
|
|
738
|
+
return include;
|
|
739
|
+
}
|
|
740
|
+
function filterInput(input) {
|
|
741
|
+
if (!Array.isArray(input))
|
|
742
|
+
return input;
|
|
743
|
+
return input.filter((item) => {
|
|
744
|
+
if (item.type === "item_reference") {
|
|
745
|
+
return false;
|
|
746
|
+
}
|
|
747
|
+
return true;
|
|
748
|
+
}).map((item) => {
|
|
749
|
+
if (item.id) {
|
|
750
|
+
const { id, ...itemWithoutId } = item;
|
|
751
|
+
return itemWithoutId;
|
|
752
|
+
}
|
|
753
|
+
return item;
|
|
754
|
+
});
|
|
755
|
+
}
|
|
756
|
+
async function filterOpenCodeSystemPrompts(input) {
|
|
757
|
+
if (!Array.isArray(input))
|
|
758
|
+
return input;
|
|
759
|
+
let cachedPrompt = null;
|
|
760
|
+
try {
|
|
761
|
+
cachedPrompt = await getOpenCodeCodexPrompt();
|
|
762
|
+
} catch {}
|
|
763
|
+
return filterOpenCodeSystemPromptsWithCachedPrompt(input, cachedPrompt);
|
|
764
|
+
}
|
|
765
|
+
function addCodexBridgeMessage(input, hasTools) {
|
|
766
|
+
if (!hasTools || !Array.isArray(input))
|
|
767
|
+
return input;
|
|
768
|
+
const bridgeMessage = {
|
|
769
|
+
type: "message",
|
|
770
|
+
role: "developer",
|
|
771
|
+
content: [
|
|
772
|
+
{
|
|
773
|
+
type: "input_text",
|
|
774
|
+
text: CODEX_OPENCODE_BRIDGE
|
|
775
|
+
}
|
|
776
|
+
]
|
|
777
|
+
};
|
|
778
|
+
return [bridgeMessage, ...input];
|
|
779
|
+
}
|
|
780
|
+
function getReasoningConfig(modelName, userConfig = {}) {
|
|
781
|
+
const normalizedName = modelName?.toLowerCase() ?? "";
|
|
782
|
+
const isGpt52Codex = normalizedName.includes("gpt-5.2-codex") || normalizedName.includes("gpt 5.2 codex");
|
|
783
|
+
const isGpt52General = (normalizedName.includes("gpt-5.2") || normalizedName.includes("gpt 5.2")) && !isGpt52Codex;
|
|
784
|
+
const isCodexMax = normalizedName.includes("codex-max") || normalizedName.includes("codex max");
|
|
785
|
+
const isCodexMini = normalizedName.includes("codex-mini") || normalizedName.includes("codex mini") || normalizedName.includes("codex_mini") || normalizedName.includes("codex-mini-latest");
|
|
786
|
+
const isCodex = normalizedName.includes("codex") && !isCodexMini;
|
|
787
|
+
const isLightweight = !isCodexMini && (normalizedName.includes("nano") || normalizedName.includes("mini"));
|
|
788
|
+
const isGpt51General = (normalizedName.includes("gpt-5.1") || normalizedName.includes("gpt 5.1")) && !isCodex && !isCodexMax && !isCodexMini;
|
|
789
|
+
const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax;
|
|
790
|
+
const supportsNone = isGpt52General || isGpt51General;
|
|
791
|
+
const defaultEffort = isCodexMini ? "medium" : supportsXhigh ? "high" : isLightweight ? "minimal" : "medium";
|
|
792
|
+
let effort = userConfig.reasoningEffort || defaultEffort;
|
|
793
|
+
if (isCodexMini) {
|
|
794
|
+
if (effort === "minimal" || effort === "low" || effort === "none") {
|
|
795
|
+
effort = "medium";
|
|
796
|
+
}
|
|
797
|
+
if (effort === "xhigh") {
|
|
798
|
+
effort = "high";
|
|
799
|
+
}
|
|
800
|
+
if (effort !== "high" && effort !== "medium") {
|
|
801
|
+
effort = "medium";
|
|
802
|
+
}
|
|
803
|
+
}
|
|
804
|
+
if (!supportsXhigh && effort === "xhigh") {
|
|
805
|
+
effort = "high";
|
|
806
|
+
}
|
|
807
|
+
if (!supportsNone && effort === "none") {
|
|
808
|
+
effort = "low";
|
|
809
|
+
}
|
|
810
|
+
if (isCodex && effort === "minimal") {
|
|
811
|
+
effort = "low";
|
|
812
|
+
}
|
|
813
|
+
return {
|
|
814
|
+
effort,
|
|
815
|
+
summary: userConfig.reasoningSummary || "auto"
|
|
816
|
+
};
|
|
817
|
+
}
|
|
818
|
+
async function transformRequestBody(body, codexInstructions) {
|
|
819
|
+
const originalModel = body.model;
|
|
820
|
+
const normalizedModel = normalizeModel(body.model);
|
|
821
|
+
logDebug(`Model lookup: "${originalModel}" -> "${normalizedModel}"`, {
|
|
822
|
+
hasTools: !!body.tools
|
|
823
|
+
});
|
|
824
|
+
body.model = normalizedModel;
|
|
825
|
+
body.store = false;
|
|
826
|
+
body.stream = true;
|
|
827
|
+
body.instructions = codexInstructions;
|
|
828
|
+
if (body.input && Array.isArray(body.input)) {
|
|
829
|
+
const originalIds = body.input.filter((item) => item.id).map((item) => item.id);
|
|
830
|
+
if (originalIds.length > 0) {
|
|
831
|
+
logDebug("Filtering message IDs", originalIds);
|
|
832
|
+
}
|
|
833
|
+
body.input = filterInput(body.input);
|
|
834
|
+
const remainingIds = (body.input || []).filter((item) => item.id).map((item) => item.id);
|
|
835
|
+
if (remainingIds.length > 0) {
|
|
836
|
+
logWarn("IDs still present after filtering", remainingIds);
|
|
837
|
+
}
|
|
838
|
+
body.input = await filterOpenCodeSystemPrompts(body.input);
|
|
839
|
+
body.input = addCodexBridgeMessage(body.input, !!body.tools);
|
|
840
|
+
if (body.input) {
|
|
841
|
+
body.input = normalizeOrphanedToolOutputs(body.input);
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
const reasoningConfig = resolveReasoningConfig(normalizedModel, body);
|
|
845
|
+
body.reasoning = {
|
|
846
|
+
...body.reasoning,
|
|
847
|
+
...reasoningConfig
|
|
848
|
+
};
|
|
849
|
+
const verbosity = resolveTextVerbosity(body);
|
|
850
|
+
if (verbosity) {
|
|
851
|
+
body.text = {
|
|
852
|
+
...body.text,
|
|
853
|
+
verbosity
|
|
854
|
+
};
|
|
855
|
+
}
|
|
856
|
+
body.include = resolveInclude(body);
|
|
857
|
+
body.max_output_tokens = undefined;
|
|
858
|
+
body.max_completion_tokens = undefined;
|
|
859
|
+
return body;
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
// lib/request/response-handler.ts
|
|
863
|
+
function parseSseStream(sseText) {
|
|
864
|
+
const lines = sseText.split(`
|
|
865
|
+
`);
|
|
866
|
+
for (const line of lines) {
|
|
867
|
+
if (line.startsWith("data: ")) {
|
|
868
|
+
try {
|
|
869
|
+
const data = JSON.parse(line.substring(6));
|
|
870
|
+
if (data.type === "response.done" || data.type === "response.completed") {
|
|
871
|
+
return data.response;
|
|
872
|
+
}
|
|
873
|
+
} catch (e) {}
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
return null;
|
|
877
|
+
}
|
|
878
|
+
async function convertSseToJson(response, headers) {
|
|
879
|
+
if (!response.body) {
|
|
880
|
+
throw new Error(`[${PLUGIN_NAME}] Response has no body`);
|
|
881
|
+
}
|
|
882
|
+
const reader = response.body.getReader();
|
|
883
|
+
const decoder = new TextDecoder;
|
|
884
|
+
let fullText = "";
|
|
885
|
+
try {
|
|
886
|
+
while (true) {
|
|
887
|
+
const { done, value } = await reader.read();
|
|
888
|
+
if (done)
|
|
889
|
+
break;
|
|
890
|
+
fullText += decoder.decode(value, { stream: true });
|
|
891
|
+
}
|
|
892
|
+
if (LOGGING_ENABLED) {
|
|
893
|
+
logRequest("stream-full", { fullContent: fullText });
|
|
894
|
+
}
|
|
895
|
+
const finalResponse = parseSseStream(fullText);
|
|
896
|
+
if (!finalResponse) {
|
|
897
|
+
console.error(`[${PLUGIN_NAME}] Could not find final response in SSE stream`);
|
|
898
|
+
logRequest("stream-error", { error: "No response.done event found" });
|
|
899
|
+
return new Response(fullText, {
|
|
900
|
+
status: response.status,
|
|
901
|
+
statusText: response.statusText,
|
|
902
|
+
headers
|
|
903
|
+
});
|
|
904
|
+
}
|
|
905
|
+
const jsonHeaders = new Headers(headers);
|
|
906
|
+
jsonHeaders.set("content-type", "application/json; charset=utf-8");
|
|
907
|
+
return new Response(JSON.stringify(finalResponse), {
|
|
908
|
+
status: response.status,
|
|
909
|
+
statusText: response.statusText,
|
|
910
|
+
headers: jsonHeaders
|
|
911
|
+
});
|
|
912
|
+
} catch (error) {
|
|
913
|
+
console.error(`[${PLUGIN_NAME}] Error converting stream:`, error);
|
|
914
|
+
logRequest("stream-error", { error: String(error) });
|
|
915
|
+
throw error;
|
|
916
|
+
}
|
|
917
|
+
}
|
|
918
|
+
function ensureContentType(headers) {
|
|
919
|
+
const responseHeaders = new Headers(headers);
|
|
920
|
+
if (!responseHeaders.has("content-type")) {
|
|
921
|
+
responseHeaders.set("content-type", "text/event-stream; charset=utf-8");
|
|
922
|
+
}
|
|
923
|
+
return responseHeaders;
|
|
924
|
+
}
|
|
925
|
+
|
|
926
|
+
// lib/request/fetch-helpers.ts
|
|
927
|
+
function extractRequestUrl(input) {
|
|
928
|
+
if (typeof input === "string")
|
|
929
|
+
return input;
|
|
930
|
+
if (input instanceof URL)
|
|
931
|
+
return input.toString();
|
|
932
|
+
return input.url;
|
|
933
|
+
}
|
|
934
|
+
async function transformRequestForCodex(init) {
|
|
935
|
+
if (!init?.body || typeof init.body !== "string")
|
|
936
|
+
return;
|
|
937
|
+
try {
|
|
938
|
+
const body = JSON.parse(init.body);
|
|
939
|
+
const normalizedModel = normalizeModel(body.model);
|
|
940
|
+
const codexInstructions = await getCodexInstructions(normalizedModel);
|
|
941
|
+
const transformedBody = await transformRequestBody(body, codexInstructions);
|
|
942
|
+
logRequest("after-transform", {
|
|
943
|
+
model: transformedBody.model,
|
|
944
|
+
hasTools: !!transformedBody.tools,
|
|
945
|
+
hasInput: !!transformedBody.input
|
|
946
|
+
});
|
|
947
|
+
return {
|
|
948
|
+
body: transformedBody,
|
|
949
|
+
updatedInit: { ...init, body: JSON.stringify(transformedBody) }
|
|
950
|
+
};
|
|
951
|
+
} catch {
|
|
952
|
+
return;
|
|
953
|
+
}
|
|
954
|
+
}
|
|
955
|
+
function createAicodewithHeaders(init, apiKey, opts) {
|
|
956
|
+
const headers = new Headers(init?.headers ?? {});
|
|
957
|
+
headers.delete(HEADER_NAMES.OPENAI_BETA);
|
|
958
|
+
headers.delete(HEADER_NAMES.CHATGPT_ACCOUNT_ID);
|
|
959
|
+
headers.delete("x-api-key");
|
|
960
|
+
headers.set(HEADER_NAMES.AUTHORIZATION, `Bearer ${apiKey}`);
|
|
961
|
+
headers.set(HEADER_NAMES.ORIGINATOR, ORIGINATOR);
|
|
962
|
+
headers.set(HEADER_NAMES.USER_AGENT, USER_AGENT);
|
|
963
|
+
headers.set(HEADER_NAMES.ACCEPT, "text/event-stream");
|
|
964
|
+
if (!headers.has(HEADER_NAMES.CONTENT_TYPE)) {
|
|
965
|
+
headers.set(HEADER_NAMES.CONTENT_TYPE, "application/json");
|
|
966
|
+
}
|
|
967
|
+
const cacheKey = opts?.promptCacheKey;
|
|
968
|
+
if (cacheKey) {
|
|
969
|
+
headers.set(HEADER_NAMES.CONVERSATION_ID, cacheKey);
|
|
970
|
+
headers.set(HEADER_NAMES.SESSION_ID, cacheKey);
|
|
971
|
+
} else {
|
|
972
|
+
headers.delete(HEADER_NAMES.CONVERSATION_ID);
|
|
973
|
+
headers.delete(HEADER_NAMES.SESSION_ID);
|
|
974
|
+
}
|
|
975
|
+
return headers;
|
|
976
|
+
}
|
|
977
|
+
async function handleErrorResponse(response) {
|
|
978
|
+
logRequest("error-response", {
|
|
979
|
+
status: response.status,
|
|
980
|
+
statusText: response.statusText
|
|
981
|
+
});
|
|
982
|
+
return response;
|
|
983
|
+
}
|
|
984
|
+
async function handleSuccessResponse(response, isStreaming) {
|
|
985
|
+
const responseHeaders = ensureContentType(response.headers);
|
|
986
|
+
if (!isStreaming) {
|
|
987
|
+
return await convertSseToJson(response, responseHeaders);
|
|
988
|
+
}
|
|
989
|
+
return new Response(response.body, {
|
|
990
|
+
status: response.status,
|
|
991
|
+
statusText: response.statusText,
|
|
992
|
+
headers: responseHeaders
|
|
993
|
+
});
|
|
994
|
+
}
|
|
995
|
+
|
|
996
|
+
// index.ts
|
|
997
|
+
var CODEX_MODEL_PREFIXES = ["gpt-", "codex"];
|
|
998
|
+
var PACKAGE_NAME = "opencode-aicodewith-auth";
|
|
999
|
+
var PROVIDER_NAME = "AICodewith";
|
|
1000
|
+
var PLUGIN_ENTRY = import.meta.url;
|
|
1001
|
+
var PROVIDER_NPM = new URL("./provider.ts", import.meta.url).href;
|
|
1002
|
+
var DEFAULT_API = "https://api.openai.com/v1";
|
|
1003
|
+
var DEFAULT_ENV = ["AICODEWITH_API_KEY"];
|
|
1004
|
+
var DEFAULT_OUTPUT_TOKEN_MAX = 32000;
|
|
1005
|
+
var ALLOWED_MODEL_IDS = [
|
|
1006
|
+
"gpt-5.2-codex",
|
|
1007
|
+
"gpt-5.2",
|
|
1008
|
+
"claude-sonnet-4-5-20250929",
|
|
1009
|
+
"claude-opus-4-5-20251101",
|
|
1010
|
+
"gemini-3-pro-high"
|
|
1011
|
+
];
|
|
1012
|
+
var ALLOWED_MODEL_SET = new Set(ALLOWED_MODEL_IDS);
|
|
1013
|
+
var homeDir = process.env.OPENCODE_TEST_HOME || os.homedir();
|
|
1014
|
+
var configRoot = process.env.XDG_CONFIG_HOME || path.join(homeDir, ".config");
|
|
1015
|
+
var configDir = path.join(configRoot, "opencode");
|
|
1016
|
+
var configPath = path.join(configDir, "opencode.json");
|
|
1017
|
+
var ensureConfigPromise;
|
|
1018
|
+
var toModelMap = (ids, existing = {}) => ids.reduce((acc, id) => {
|
|
1019
|
+
acc[id] = Object.prototype.hasOwnProperty.call(existing, id) ? existing[id] : {};
|
|
1020
|
+
return acc;
|
|
1021
|
+
}, {});
|
|
1022
|
+
var readJson = async (filePath) => {
|
|
1023
|
+
try {
|
|
1024
|
+
const text = await readFile2(filePath, "utf-8");
|
|
1025
|
+
return JSON.parse(text);
|
|
1026
|
+
} catch {
|
|
1027
|
+
return;
|
|
1028
|
+
}
|
|
1029
|
+
};
|
|
1030
|
+
var isPackageEntry = (value) => value === PACKAGE_NAME || value.startsWith(`${PACKAGE_NAME}@`);
|
|
1031
|
+
var ensurePluginEntry = (list) => {
|
|
1032
|
+
if (!Array.isArray(list))
|
|
1033
|
+
return [PLUGIN_ENTRY];
|
|
1034
|
+
const hasPlugin = list.some((entry) => typeof entry === "string" && (entry === PLUGIN_ENTRY || isPackageEntry(entry)));
|
|
1035
|
+
return hasPlugin ? list : [...list, PLUGIN_ENTRY];
|
|
1036
|
+
};
|
|
1037
|
+
var applyProviderConfig = (config) => {
|
|
1038
|
+
let changed = false;
|
|
1039
|
+
const providerMap = config.provider && typeof config.provider === "object" ? config.provider : {};
|
|
1040
|
+
const existing = providerMap[PROVIDER_ID] && typeof providerMap[PROVIDER_ID] === "object" ? providerMap[PROVIDER_ID] : {};
|
|
1041
|
+
const existingModels = existing.models && typeof existing.models === "object" ? existing.models : {};
|
|
1042
|
+
const next = { ...existing };
|
|
1043
|
+
if (!next.name) {
|
|
1044
|
+
next.name = PROVIDER_NAME;
|
|
1045
|
+
changed = true;
|
|
1046
|
+
}
|
|
1047
|
+
if (!Array.isArray(next.env)) {
|
|
1048
|
+
next.env = DEFAULT_ENV;
|
|
1049
|
+
changed = true;
|
|
1050
|
+
}
|
|
1051
|
+
if (!next.npm || typeof next.npm === "string" && isPackageEntry(next.npm)) {
|
|
1052
|
+
next.npm = PROVIDER_NPM;
|
|
1053
|
+
changed = true;
|
|
1054
|
+
}
|
|
1055
|
+
if (!next.api) {
|
|
1056
|
+
next.api = DEFAULT_API;
|
|
1057
|
+
changed = true;
|
|
1058
|
+
}
|
|
1059
|
+
const hasExtraModels = Object.keys(existingModels).some((id) => !ALLOWED_MODEL_SET.has(id));
|
|
1060
|
+
const hasMissingModels = ALLOWED_MODEL_IDS.some((id) => !Object.prototype.hasOwnProperty.call(existingModels, id));
|
|
1061
|
+
if (!next.models || hasExtraModels || hasMissingModels) {
|
|
1062
|
+
next.models = toModelMap(ALLOWED_MODEL_IDS, existingModels);
|
|
1063
|
+
changed = true;
|
|
1064
|
+
}
|
|
1065
|
+
providerMap[PROVIDER_ID] = next;
|
|
1066
|
+
if (config.provider !== providerMap) {
|
|
1067
|
+
config.provider = providerMap;
|
|
1068
|
+
changed = true;
|
|
1069
|
+
}
|
|
1070
|
+
const nextPlugins = ensurePluginEntry(config.plugin);
|
|
1071
|
+
if (nextPlugins !== config.plugin) {
|
|
1072
|
+
config.plugin = nextPlugins;
|
|
1073
|
+
changed = true;
|
|
1074
|
+
}
|
|
1075
|
+
return changed;
|
|
1076
|
+
};
|
|
1077
|
+
var ensureConfigFile = async () => {
|
|
1078
|
+
if (ensureConfigPromise)
|
|
1079
|
+
return ensureConfigPromise;
|
|
1080
|
+
ensureConfigPromise = (async () => {
|
|
1081
|
+
const config = await readJson(configPath) ?? {};
|
|
1082
|
+
if (!config || typeof config !== "object")
|
|
1083
|
+
return;
|
|
1084
|
+
const changed = applyProviderConfig(config);
|
|
1085
|
+
if (!changed)
|
|
1086
|
+
return;
|
|
1087
|
+
await mkdir2(configDir, { recursive: true });
|
|
1088
|
+
await writeFile2(configPath, `${JSON.stringify(config, null, 2)}
|
|
1089
|
+
`, "utf-8");
|
|
1090
|
+
})();
|
|
1091
|
+
return ensureConfigPromise;
|
|
1092
|
+
};
|
|
1093
|
+
var isPluginInput = (input) => {
|
|
1094
|
+
if (!input || typeof input !== "object")
|
|
1095
|
+
return false;
|
|
1096
|
+
return "client" in input && "project" in input && "directory" in input;
|
|
1097
|
+
};
|
|
1098
|
+
function createAicodewith2(input) {
|
|
1099
|
+
if (isPluginInput(input))
|
|
1100
|
+
return {};
|
|
1101
|
+
return createAicodewith(input);
|
|
1102
|
+
}
|
|
1103
|
+
var parseRequestBody = (init) => {
|
|
1104
|
+
if (!init?.body || typeof init.body !== "string") {
|
|
1105
|
+
return { body: undefined, model: undefined, isStreaming: false };
|
|
1106
|
+
}
|
|
1107
|
+
try {
|
|
1108
|
+
const body = JSON.parse(init.body);
|
|
1109
|
+
const model = typeof body?.model === "string" ? body.model : undefined;
|
|
1110
|
+
return { body, model, isStreaming: body?.stream === true };
|
|
1111
|
+
} catch {
|
|
1112
|
+
return { body: undefined, model: undefined, isStreaming: false };
|
|
1113
|
+
}
|
|
1114
|
+
};
|
|
1115
|
+
var isGeminiUrl = (url) => url.includes(":generateContent") || url.includes(":streamGenerateContent") || url.includes("/models/") && url.includes("/v1");
|
|
1116
|
+
var isClaudeUrl = (url) => url.includes("/v1/messages");
|
|
1117
|
+
var isModel = (model, prefix) => Boolean(model && model.startsWith(prefix));
|
|
1118
|
+
var isCodexModel = (model) => Boolean(model && CODEX_MODEL_PREFIXES.some((prefix) => model.startsWith(prefix)));
|
|
1119
|
+
var rewriteUrl = (originalUrl, baseUrl) => {
|
|
1120
|
+
const base = new URL(baseUrl);
|
|
1121
|
+
const original = new URL(originalUrl);
|
|
1122
|
+
const basePath = base.pathname.replace(/\/$/, "");
|
|
1123
|
+
const normalizedBase = `${base.origin}${basePath}`;
|
|
1124
|
+
const normalizedOriginal = `${original.origin}${original.pathname}`;
|
|
1125
|
+
if (normalizedOriginal.startsWith(normalizedBase)) {
|
|
1126
|
+
return original.toString();
|
|
1127
|
+
}
|
|
1128
|
+
const rewritten = new URL(original.toString());
|
|
1129
|
+
rewritten.protocol = base.protocol;
|
|
1130
|
+
rewritten.host = base.host;
|
|
1131
|
+
rewritten.pathname = `${basePath}${original.pathname}`;
|
|
1132
|
+
return rewritten.toString();
|
|
1133
|
+
};
|
|
1134
|
+
var ensureGeminiSseParam = (url) => {
|
|
1135
|
+
const parsed = new URL(url);
|
|
1136
|
+
const alt = parsed.searchParams.get("alt");
|
|
1137
|
+
if (alt === "sse")
|
|
1138
|
+
return url;
|
|
1139
|
+
parsed.searchParams.set("alt", "sse");
|
|
1140
|
+
return parsed.toString();
|
|
1141
|
+
};
|
|
1142
|
+
var buildGeminiUrl = (originalUrl, streaming) => {
|
|
1143
|
+
const original = new URL(originalUrl);
|
|
1144
|
+
let path2 = original.pathname;
|
|
1145
|
+
if (!path2.includes("/v1beta/") && !path2.includes("/v1/")) {
|
|
1146
|
+
path2 = `/v1beta${path2.startsWith("/") ? "" : "/"}${path2}`;
|
|
1147
|
+
}
|
|
1148
|
+
const base = new URL(AICODEWITH_GEMINI_BASE_URL);
|
|
1149
|
+
const basePath = base.pathname.replace(/\/$/, "");
|
|
1150
|
+
const target = new URL(base.origin);
|
|
1151
|
+
target.pathname = `${basePath}${path2}`;
|
|
1152
|
+
target.search = original.search;
|
|
1153
|
+
const url = target.toString();
|
|
1154
|
+
return streaming ? ensureGeminiSseParam(url) : url;
|
|
1155
|
+
};
|
|
1156
|
+
var createGeminiHeaders = (init, apiKey) => {
|
|
1157
|
+
const headers = new Headers(init?.headers ?? {});
|
|
1158
|
+
headers.delete(HEADER_NAMES.AUTHORIZATION);
|
|
1159
|
+
headers.delete("x-api-key");
|
|
1160
|
+
headers.set(HEADER_NAMES.USER_AGENT, GEMINI_USER_AGENT);
|
|
1161
|
+
headers.set(HEADER_NAMES.X_GOOG_API_CLIENT, GEMINI_API_CLIENT);
|
|
1162
|
+
headers.set(HEADER_NAMES.X_GOOG_API_KEY, apiKey);
|
|
1163
|
+
if (!headers.has(HEADER_NAMES.ACCEPT)) {
|
|
1164
|
+
headers.set(HEADER_NAMES.ACCEPT, "*/*");
|
|
1165
|
+
}
|
|
1166
|
+
if (!headers.has(HEADER_NAMES.CONTENT_TYPE)) {
|
|
1167
|
+
headers.set(HEADER_NAMES.CONTENT_TYPE, "application/json");
|
|
1168
|
+
}
|
|
1169
|
+
const userId = process.env[GEMINI_PRIVILEGED_USER_ID_ENV];
|
|
1170
|
+
if (userId && !headers.has(HEADER_NAMES.X_GEMINI_PRIVILEGED_USER_ID)) {
|
|
1171
|
+
headers.set(HEADER_NAMES.X_GEMINI_PRIVILEGED_USER_ID, userId);
|
|
1172
|
+
}
|
|
1173
|
+
return headers;
|
|
1174
|
+
};
|
|
1175
|
+
var getOutputTokenLimit = (input, output) => {
|
|
1176
|
+
const modelLimit = input.model.limit.output;
|
|
1177
|
+
if (typeof modelLimit === "number" && modelLimit > 0) {
|
|
1178
|
+
return modelLimit;
|
|
1179
|
+
}
|
|
1180
|
+
const optionLimit = output.options?.maxTokens;
|
|
1181
|
+
if (typeof optionLimit === "number" && optionLimit > 0) {
|
|
1182
|
+
return optionLimit;
|
|
1183
|
+
}
|
|
1184
|
+
return DEFAULT_OUTPUT_TOKEN_MAX;
|
|
1185
|
+
};
|
|
1186
|
+
var AicodewithCodexAuthPlugin = async (_ctx) => {
|
|
1187
|
+
await ensureConfigFile().catch((error) => {
|
|
1188
|
+
console.warn(`[${PACKAGE_NAME}] Failed to update opencode config: ${error instanceof Error ? error.message : error}`);
|
|
1189
|
+
});
|
|
1190
|
+
const authHook = {
|
|
1191
|
+
provider: PROVIDER_ID,
|
|
1192
|
+
loader: async (getAuth, _provider) => {
|
|
1193
|
+
const auth = await getAuth();
|
|
1194
|
+
if (auth.type !== "api" || !auth.key) {
|
|
1195
|
+
return {};
|
|
1196
|
+
}
|
|
1197
|
+
const apiKey = auth.key.trim();
|
|
1198
|
+
if (!apiKey)
|
|
1199
|
+
return {};
|
|
1200
|
+
return {
|
|
1201
|
+
apiKey,
|
|
1202
|
+
fetch: async (input, init) => {
|
|
1203
|
+
const originalUrl = extractRequestUrl(input);
|
|
1204
|
+
const { model, isStreaming } = parseRequestBody(init);
|
|
1205
|
+
const isClaudeRequest = isModel(model, "claude-") || isClaudeUrl(originalUrl);
|
|
1206
|
+
const isGeminiRequest = isModel(model, "gemini-") || isGeminiUrl(originalUrl);
|
|
1207
|
+
const isGeminiStreaming = isGeminiRequest && (isStreaming || originalUrl.includes("streamGenerateContent"));
|
|
1208
|
+
const isCodexRequest = !isClaudeRequest && !isGeminiRequest && isCodexModel(model);
|
|
1209
|
+
if (isCodexRequest) {
|
|
1210
|
+
const transformation = await transformRequestForCodex(init);
|
|
1211
|
+
const requestInit = transformation?.updatedInit ?? init;
|
|
1212
|
+
const headers = createAicodewithHeaders(requestInit, apiKey, {
|
|
1213
|
+
promptCacheKey: transformation?.body.prompt_cache_key
|
|
1214
|
+
});
|
|
1215
|
+
const targetUrl = rewriteUrl(originalUrl, CODEX_BASE_URL);
|
|
1216
|
+
const response = await fetch(targetUrl, {
|
|
1217
|
+
...requestInit,
|
|
1218
|
+
headers
|
|
1219
|
+
});
|
|
1220
|
+
if (!response.ok) {
|
|
1221
|
+
return await handleErrorResponse(response);
|
|
1222
|
+
}
|
|
1223
|
+
return await handleSuccessResponse(response, isStreaming);
|
|
1224
|
+
}
|
|
1225
|
+
if (isGeminiRequest) {
|
|
1226
|
+
const geminiUrl = buildGeminiUrl(originalUrl, isGeminiStreaming);
|
|
1227
|
+
const headers = createGeminiHeaders(init, apiKey);
|
|
1228
|
+
const requestInit = { ...init, headers };
|
|
1229
|
+
return await fetch(geminiUrl, requestInit);
|
|
1230
|
+
}
|
|
1231
|
+
if (isClaudeRequest) {
|
|
1232
|
+
const targetUrl = rewriteUrl(originalUrl, AICODEWITH_ANTHROPIC_BASE_URL);
|
|
1233
|
+
return await fetch(targetUrl, init);
|
|
1234
|
+
}
|
|
1235
|
+
return await fetch(originalUrl, init);
|
|
1236
|
+
}
|
|
1237
|
+
};
|
|
1238
|
+
},
|
|
1239
|
+
methods: [
|
|
1240
|
+
{
|
|
1241
|
+
type: "api",
|
|
1242
|
+
label: AUTH_METHOD_LABEL,
|
|
1243
|
+
prompts: [
|
|
1244
|
+
{
|
|
1245
|
+
type: "text",
|
|
1246
|
+
key: "apiKey",
|
|
1247
|
+
message: "AICodewith API key",
|
|
1248
|
+
placeholder: "sk-..."
|
|
1249
|
+
}
|
|
1250
|
+
],
|
|
1251
|
+
authorize: async (inputs) => {
|
|
1252
|
+
const key = inputs?.apiKey?.trim();
|
|
1253
|
+
if (!key)
|
|
1254
|
+
return { type: "failed" };
|
|
1255
|
+
return { type: "success", key };
|
|
1256
|
+
}
|
|
1257
|
+
}
|
|
1258
|
+
]
|
|
1259
|
+
};
|
|
1260
|
+
return {
|
|
1261
|
+
auth: authHook,
|
|
1262
|
+
config: async (config) => {
|
|
1263
|
+
applyProviderConfig(config);
|
|
1264
|
+
},
|
|
1265
|
+
"chat.params": async (input, output) => {
|
|
1266
|
+
if (input.model.providerID !== PROVIDER_ID)
|
|
1267
|
+
return;
|
|
1268
|
+
if (!input.model.id?.startsWith("claude-"))
|
|
1269
|
+
return;
|
|
1270
|
+
const thinking = output.options?.thinking;
|
|
1271
|
+
if (!thinking || typeof thinking !== "object")
|
|
1272
|
+
return;
|
|
1273
|
+
const budgetTokens = thinking.budgetTokens;
|
|
1274
|
+
if (typeof budgetTokens !== "number")
|
|
1275
|
+
return;
|
|
1276
|
+
const maxTokens = getOutputTokenLimit(input, output);
|
|
1277
|
+
if (budgetTokens < maxTokens)
|
|
1278
|
+
return;
|
|
1279
|
+
const next = { ...output.options };
|
|
1280
|
+
delete next.thinking;
|
|
1281
|
+
output.options = next;
|
|
1282
|
+
}
|
|
1283
|
+
};
|
|
1284
|
+
};
|
|
1285
|
+
var opencode_aicodewith_auth_default = AicodewithCodexAuthPlugin;
|
|
1286
|
+
export {
|
|
1287
|
+
opencode_aicodewith_auth_default as default,
|
|
1288
|
+
createAicodewith2 as createAicodewith,
|
|
1289
|
+
AicodewithCodexAuthPlugin
|
|
1290
|
+
};
|