codex-rotating-proxy 0.1.2 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/login.js +6 -6
- package/dist/server.js +29 -8
- package/dist/translate.js +34 -6
- package/package.json +1 -1
package/dist/login.js
CHANGED
|
@@ -51,8 +51,9 @@ export async function loginFlow(accountName) {
|
|
|
51
51
|
client_id: CLIENT_ID,
|
|
52
52
|
code_verifier: verifier,
|
|
53
53
|
});
|
|
54
|
-
//
|
|
55
|
-
|
|
54
|
+
// Use access_token directly (JWT bearer) — NOT an sk-proj-* API key.
|
|
55
|
+
// Codex models like gpt-5.3-codex require the JWT access token,
|
|
56
|
+
// not an exchanged API key.
|
|
56
57
|
// Parse JWT for display info + account ID
|
|
57
58
|
const claims = parseJwt(tokens.id_token);
|
|
58
59
|
const email = claims.email ?? "unknown";
|
|
@@ -61,7 +62,7 @@ export async function loginFlow(accountName) {
|
|
|
61
62
|
const name = accountName || email.split("@")[0];
|
|
62
63
|
addAccount({
|
|
63
64
|
name,
|
|
64
|
-
token:
|
|
65
|
+
token: tokens.access_token,
|
|
65
66
|
refreshToken: tokens.refresh_token,
|
|
66
67
|
accountId,
|
|
67
68
|
addedAt: new Date().toISOString(),
|
|
@@ -115,14 +116,13 @@ export async function refreshAccount(account) {
|
|
|
115
116
|
refresh_token: account.refreshToken,
|
|
116
117
|
scope: REFRESH_SCOPES,
|
|
117
118
|
});
|
|
118
|
-
const apiKey = await exchangeForApiKey(tokens.id_token);
|
|
119
119
|
addAccount({
|
|
120
120
|
...account,
|
|
121
|
-
token:
|
|
121
|
+
token: tokens.access_token,
|
|
122
122
|
refreshToken: tokens.refresh_token,
|
|
123
123
|
lastRefresh: new Date().toISOString(),
|
|
124
124
|
});
|
|
125
|
-
return
|
|
125
|
+
return tokens.access_token;
|
|
126
126
|
}
|
|
127
127
|
catch {
|
|
128
128
|
return null;
|
package/dist/server.js
CHANGED
|
@@ -83,6 +83,7 @@ export function startProxy() {
|
|
|
83
83
|
// ── Detect chat completions → responses translation ─────
|
|
84
84
|
const isChatCompletions = url.pathname === "/v1/chat/completions" && req.method === "POST";
|
|
85
85
|
let targetPath = url.pathname;
|
|
86
|
+
let targetBase = upstream;
|
|
86
87
|
let parsedBody = null;
|
|
87
88
|
let isStreaming = false;
|
|
88
89
|
if (isChatCompletions && body) {
|
|
@@ -90,9 +91,12 @@ export function startProxy() {
|
|
|
90
91
|
parsedBody = JSON.parse(body.toString("utf-8"));
|
|
91
92
|
isStreaming = !!parsedBody.stream;
|
|
92
93
|
const translated = chatToResponsesRequest(parsedBody);
|
|
94
|
+
log("cyan", `↔ translating chat/completions → responses (stream=${isStreaming})`);
|
|
95
|
+
log("cyan", ` request: ${JSON.stringify(translated).slice(0, 200)}`);
|
|
93
96
|
body = Buffer.from(JSON.stringify(translated));
|
|
94
|
-
|
|
95
|
-
|
|
97
|
+
// Codex models (gpt-5.x-codex) use ChatGPT backend, not api.openai.com
|
|
98
|
+
targetBase = "https://chatgpt.com/backend-api";
|
|
99
|
+
targetPath = "/codex/responses";
|
|
96
100
|
}
|
|
97
101
|
catch (err) {
|
|
98
102
|
log("red", `✗ failed to parse/translate body: ${err}`);
|
|
@@ -111,7 +115,7 @@ export function startProxy() {
|
|
|
111
115
|
const entry = pool.getNext();
|
|
112
116
|
if (!entry)
|
|
113
117
|
break;
|
|
114
|
-
const target = `${
|
|
118
|
+
const target = `${targetBase}${targetPath}${url.search}`;
|
|
115
119
|
log("cyan", `→ ${req.method} ${targetPath} via ${entry.name}`);
|
|
116
120
|
// Inner loop: try once, and if 401 + refreshable, refresh and retry
|
|
117
121
|
let currentToken = entry.account.token;
|
|
@@ -176,12 +180,16 @@ export function startProxy() {
|
|
|
176
180
|
const reader = fetchRes.body.getReader();
|
|
177
181
|
const decoder = new TextDecoder();
|
|
178
182
|
let buffer = "";
|
|
183
|
+
let emittedCount = 0;
|
|
179
184
|
try {
|
|
180
185
|
while (true) {
|
|
181
186
|
const { done, value } = await reader.read();
|
|
182
187
|
if (done)
|
|
183
188
|
break;
|
|
184
|
-
|
|
189
|
+
const raw = decoder.decode(value, { stream: true });
|
|
190
|
+
if (emittedCount === 0)
|
|
191
|
+
log("cyan", ` upstream first chunk: ${raw.slice(0, 300).replace(/\n/g, "\\n")}`);
|
|
192
|
+
buffer += raw;
|
|
185
193
|
const lines = buffer.split("\n");
|
|
186
194
|
buffer = lines.pop() ?? "";
|
|
187
195
|
for (const line of lines) {
|
|
@@ -189,29 +197,42 @@ export function startProxy() {
|
|
|
189
197
|
if (!trimmed)
|
|
190
198
|
continue;
|
|
191
199
|
const translated = translator.feed(trimmed);
|
|
192
|
-
for (const out of translated)
|
|
200
|
+
for (const out of translated) {
|
|
201
|
+
if (emittedCount < 3)
|
|
202
|
+
log("cyan", ` emit[${emittedCount}]: ${out.slice(0, 200).replace(/\n/g, "\\n")}`);
|
|
193
203
|
res.write(out);
|
|
204
|
+
emittedCount++;
|
|
205
|
+
}
|
|
194
206
|
}
|
|
195
207
|
}
|
|
196
208
|
// Process remaining buffer
|
|
197
209
|
if (buffer.trim()) {
|
|
198
210
|
const translated = translator.feed(buffer.trim());
|
|
199
|
-
for (const out of translated)
|
|
211
|
+
for (const out of translated) {
|
|
200
212
|
res.write(out);
|
|
213
|
+
emittedCount++;
|
|
214
|
+
}
|
|
201
215
|
}
|
|
202
216
|
const flushed = translator.flush();
|
|
203
|
-
for (const out of flushed)
|
|
217
|
+
for (const out of flushed) {
|
|
204
218
|
res.write(out);
|
|
219
|
+
emittedCount++;
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
catch (err) {
|
|
223
|
+
log("red", ` stream error: ${err}`);
|
|
205
224
|
}
|
|
206
|
-
|
|
225
|
+
log("cyan", ` stream done, emitted ${emittedCount} chunks`);
|
|
207
226
|
res.end();
|
|
208
227
|
}
|
|
209
228
|
else {
|
|
210
229
|
// Non-streaming: buffer full response and translate
|
|
211
230
|
const text = await fetchRes.text();
|
|
231
|
+
log("cyan", ` upstream response: ${text.slice(0, 300)}`);
|
|
212
232
|
try {
|
|
213
233
|
const respBody = JSON.parse(text);
|
|
214
234
|
const translated = responsesToChatResponse(respBody, parsedBody.model);
|
|
235
|
+
log("cyan", ` translated: ${JSON.stringify(translated).slice(0, 300)}`);
|
|
215
236
|
json(res, 200, translated);
|
|
216
237
|
}
|
|
217
238
|
catch {
|
package/dist/translate.js
CHANGED
|
@@ -54,10 +54,7 @@ export function chatToResponsesRequest(body) {
|
|
|
54
54
|
out.temperature = body.temperature;
|
|
55
55
|
if (body.top_p !== undefined)
|
|
56
56
|
out.top_p = body.top_p;
|
|
57
|
-
|
|
58
|
-
out.max_output_tokens = body.max_completion_tokens;
|
|
59
|
-
else if (body.max_tokens !== undefined)
|
|
60
|
-
out.max_output_tokens = body.max_tokens;
|
|
57
|
+
// Note: ChatGPT backend doesn't support max_output_tokens, skip it
|
|
61
58
|
if (body.stop !== undefined)
|
|
62
59
|
out.stop = body.stop;
|
|
63
60
|
if (body.frequency_penalty !== undefined)
|
|
@@ -68,8 +65,7 @@ export function chatToResponsesRequest(body) {
|
|
|
68
65
|
out.user = body.user;
|
|
69
66
|
if (body.parallel_tool_calls !== undefined)
|
|
70
67
|
out.parallel_tool_calls = body.parallel_tool_calls;
|
|
71
|
-
|
|
72
|
-
out.store = body.store;
|
|
68
|
+
out.store = false; // ChatGPT backend requires store=false
|
|
73
69
|
if (body.metadata !== undefined)
|
|
74
70
|
out.metadata = body.metadata;
|
|
75
71
|
// reasoning_effort
|
|
@@ -155,6 +151,7 @@ export function responsesToChatResponse(resp, model) {
|
|
|
155
151
|
export function createStreamTranslator(model) {
|
|
156
152
|
const id = `chatcmpl-${Date.now()}`;
|
|
157
153
|
let sentRole = false;
|
|
154
|
+
let sentDone = false;
|
|
158
155
|
let toolCallIndex = -1;
|
|
159
156
|
const toolCallIds = new Map(); // item_id → index
|
|
160
157
|
function chunk(delta, finishReason = null) {
|
|
@@ -239,9 +236,40 @@ export function createStreamTranslator(model) {
|
|
|
239
236
|
results.push(usageChunk(resp.usage));
|
|
240
237
|
results.push("data: [DONE]\n\n");
|
|
241
238
|
}
|
|
239
|
+
else if (type === "error") {
|
|
240
|
+
// Forward API errors as Chat Completions error format
|
|
241
|
+
const err = event.error ?? {};
|
|
242
|
+
results.push(`data: ${JSON.stringify({
|
|
243
|
+
error: {
|
|
244
|
+
message: err.message ?? "Unknown error",
|
|
245
|
+
type: err.type ?? "api_error",
|
|
246
|
+
code: err.code ?? null,
|
|
247
|
+
},
|
|
248
|
+
})}\n\n`);
|
|
249
|
+
results.push("data: [DONE]\n\n");
|
|
250
|
+
}
|
|
251
|
+
else if (type === "response.failed") {
|
|
252
|
+
const resp = event.response;
|
|
253
|
+
const err = resp?.error ?? {};
|
|
254
|
+
if (!sentDone) {
|
|
255
|
+
results.push(`data: ${JSON.stringify({
|
|
256
|
+
error: {
|
|
257
|
+
message: err.message ?? "Response failed",
|
|
258
|
+
type: "api_error",
|
|
259
|
+
code: err.code ?? null,
|
|
260
|
+
},
|
|
261
|
+
})}\n\n`);
|
|
262
|
+
results.push("data: [DONE]\n\n");
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
if (results.some(r => r.includes("[DONE]")))
|
|
266
|
+
sentDone = true;
|
|
242
267
|
return results;
|
|
243
268
|
},
|
|
244
269
|
flush() {
|
|
270
|
+
// If stream ended without a proper termination, send [DONE]
|
|
271
|
+
if (!sentDone)
|
|
272
|
+
return ["data: [DONE]\n\n"];
|
|
245
273
|
return [];
|
|
246
274
|
},
|
|
247
275
|
};
|