codex-rotating-proxy 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -115,28 +115,30 @@ codex-proxy config --cooldown 30 # cooldown minutes
115
115
 
116
116
  ## Using with opencode
117
117
 
118
- Add the proxy as your OpenAI provider base URL in `~/.config/opencode/opencode.json`:
118
+ The built-in `openai` provider ignores `baseURL` overrides for Codex models. Instead, register the proxy as a custom provider using `@ai-sdk/openai-compatible` in `~/.config/opencode/opencode.json`:
119
119
 
120
120
  ```json
121
121
  {
122
122
  "$schema": "https://opencode.ai/config.json",
123
+ "model": "rotating-openai/gpt-5.3-codex",
123
124
  "provider": {
124
- "openai": {
125
+ "rotating-openai": {
126
+ "npm": "@ai-sdk/openai-compatible",
127
+ "name": "Rotating OpenAI",
125
128
  "options": {
126
129
  "baseURL": "http://localhost:4000/v1"
130
+ },
131
+ "models": {
132
+ "gpt-5.3-codex": {
133
+ "name": "GPT-5.3 Codex"
134
+ }
127
135
  }
128
136
  }
129
137
  }
130
138
  }
131
139
  ```
132
140
 
133
- Then set your model as usual — the proxy forwards whatever model the client requests:
134
-
135
- ```json
136
- {
137
- "model": "openai/gpt-4o"
138
- }
139
- ```
141
+ You can add any OpenAI model to the `models` map — the proxy forwards whatever model the client requests.
140
142
 
141
143
  Start both:
142
144
 
@@ -162,7 +164,8 @@ Set the base URL to `http://localhost:4000/v1`. Set the API key to any non-empty
162
164
  - **Sticky routing** — stays on one account until it hits a rate limit, then rotates to the next
163
165
  - **Auto-rotation** — detects HTTP 429, 402, and quota-related 403 responses
164
166
  - **Token refresh** — OAuth tokens are automatically refreshed on 401; no manual re-login needed
165
- - **Streaming** — full SSE streaming support for chat completions
167
+ - **Chat Completions compatibility** — automatically translates `/v1/chat/completions` requests to the Responses API, so tools that only speak Chat Completions work with Codex models
168
+ - **Streaming** — full SSE streaming support for both chat completions and responses API
166
169
  - **Hot reload** — logging in while the proxy is running adds the new account immediately
167
170
  - **Zero dependencies** — just Node.js
168
171
 
package/dist/login.js CHANGED
@@ -51,8 +51,9 @@ export async function loginFlow(accountName) {
51
51
  client_id: CLIENT_ID,
52
52
  code_verifier: verifier,
53
53
  });
54
- // Step 2: exchange id_token for an OpenAI API key
55
- const apiKey = await exchangeForApiKey(tokens.id_token);
54
+ // Use access_token directly (JWT bearer) — NOT an sk-proj-* API key.
55
+ // Codex models like gpt-5.3-codex require the JWT access token,
56
+ // not an exchanged API key.
56
57
  // Parse JWT for display info + account ID
57
58
  const claims = parseJwt(tokens.id_token);
58
59
  const email = claims.email ?? "unknown";
@@ -61,7 +62,7 @@ export async function loginFlow(accountName) {
61
62
  const name = accountName || email.split("@")[0];
62
63
  addAccount({
63
64
  name,
64
- token: apiKey,
65
+ token: tokens.access_token,
65
66
  refreshToken: tokens.refresh_token,
66
67
  accountId,
67
68
  addedAt: new Date().toISOString(),
@@ -115,14 +116,13 @@ export async function refreshAccount(account) {
115
116
  refresh_token: account.refreshToken,
116
117
  scope: REFRESH_SCOPES,
117
118
  });
118
- const apiKey = await exchangeForApiKey(tokens.id_token);
119
119
  addAccount({
120
120
  ...account,
121
- token: apiKey,
121
+ token: tokens.access_token,
122
122
  refreshToken: tokens.refresh_token,
123
123
  lastRefresh: new Date().toISOString(),
124
124
  });
125
- return apiKey;
125
+ return tokens.access_token;
126
126
  }
127
127
  catch {
128
128
  return null;
package/dist/server.js CHANGED
@@ -4,6 +4,7 @@ import { type as osType, release, arch } from "node:os";
4
4
  import { getAccounts, getSettings, writePid, removePid } from "./config.js";
5
5
  import { refreshAccount } from "./login.js";
6
6
  import { AccountPool, log } from "./pool.js";
7
+ import { chatToResponsesRequest, responsesToChatResponse, createStreamTranslator } from "./translate.js";
7
8
  const ROTATE_ON = new Set([429, 402]);
8
9
  const STRIP_REQ = new Set([
9
10
  "host", "authorization", "connection", "content-length",
@@ -79,6 +80,29 @@ export function startProxy() {
79
80
  for await (const chunk of req)
80
81
  chunks.push(chunk);
81
82
  let body = chunks.length > 0 ? Buffer.concat(chunks) : null;
83
+ // ── Detect chat completions → responses translation ─────
84
+ const isChatCompletions = url.pathname === "/v1/chat/completions" && req.method === "POST";
85
+ let targetPath = url.pathname;
86
+ let targetBase = upstream;
87
+ let parsedBody = null;
88
+ let isStreaming = false;
89
+ if (isChatCompletions && body) {
90
+ try {
91
+ parsedBody = JSON.parse(body.toString("utf-8"));
92
+ isStreaming = !!parsedBody.stream;
93
+ const translated = chatToResponsesRequest(parsedBody);
94
+ log("cyan", `↔ translating chat/completions → responses (stream=${isStreaming})`);
95
+ log("cyan", ` request: ${JSON.stringify(translated).slice(0, 200)}`);
96
+ body = Buffer.from(JSON.stringify(translated));
97
+ // Codex models (gpt-5.x-codex) use ChatGPT backend, not api.openai.com
98
+ targetBase = "https://chatgpt.com/backend-api";
99
+ targetPath = "/codex/responses";
100
+ }
101
+ catch (err) {
102
+ log("red", `✗ failed to parse/translate body: ${err}`);
103
+ // fall through with original body
104
+ }
105
+ }
82
106
  // ── Forward headers ───────────────────────────────────────
83
107
  const fwdHeaders = {};
84
108
  for (const [k, v] of Object.entries(req.headers)) {
@@ -91,8 +115,8 @@ export function startProxy() {
91
115
  const entry = pool.getNext();
92
116
  if (!entry)
93
117
  break;
94
- const target = `${upstream}${url.pathname}${url.search}`;
95
- log("cyan", `→ ${req.method} ${url.pathname} via ${entry.name}`);
118
+ const target = `${targetBase}${targetPath}${url.search}`;
119
+ log("cyan", `→ ${req.method} ${targetPath} via ${entry.name}`);
96
120
  // Inner loop: try once, and if 401 + refreshable, refresh and retry
97
121
  let currentToken = entry.account.token;
98
122
  for (let retry = 0; retry < 2; retry++) {
@@ -103,6 +127,7 @@ export function startProxy() {
103
127
  ...fwdHeaders,
104
128
  ...codexHeaders(entry.account),
105
129
  authorization: `Bearer ${currentToken}`,
130
+ "accept-encoding": "identity",
106
131
  ...(body ? { "content-length": String(body.byteLength) } : {}),
107
132
  },
108
133
  body,
@@ -141,8 +166,84 @@ export function startProxy() {
141
166
  forward(res, 403, fetchRes.headers, text);
142
167
  return;
143
168
  }
144
- // ── Stream response back ──────────────────────────
145
169
  log("green", `✓ ${fetchRes.status}`);
170
+ // ── Translate response if chat completions ─────────
171
+ if (isChatCompletions && parsedBody) {
172
+ if (isStreaming) {
173
+ // Streaming: translate Responses SSE → Chat Completions SSE
174
+ res.writeHead(200, {
175
+ "content-type": "text/event-stream",
176
+ "cache-control": "no-cache",
177
+ "connection": "keep-alive",
178
+ });
179
+ const translator = createStreamTranslator(parsedBody.model);
180
+ const reader = fetchRes.body.getReader();
181
+ const decoder = new TextDecoder();
182
+ let buffer = "";
183
+ let emittedCount = 0;
184
+ try {
185
+ while (true) {
186
+ const { done, value } = await reader.read();
187
+ if (done)
188
+ break;
189
+ const raw = decoder.decode(value, { stream: true });
190
+ if (emittedCount === 0)
191
+ log("cyan", ` upstream first chunk: ${raw.slice(0, 300).replace(/\n/g, "\\n")}`);
192
+ buffer += raw;
193
+ const lines = buffer.split("\n");
194
+ buffer = lines.pop() ?? "";
195
+ for (const line of lines) {
196
+ const trimmed = line.trim();
197
+ if (!trimmed)
198
+ continue;
199
+ const translated = translator.feed(trimmed);
200
+ for (const out of translated) {
201
+ if (emittedCount < 3)
202
+ log("cyan", ` emit[${emittedCount}]: ${out.slice(0, 200).replace(/\n/g, "\\n")}`);
203
+ res.write(out);
204
+ emittedCount++;
205
+ }
206
+ }
207
+ }
208
+ // Process remaining buffer
209
+ if (buffer.trim()) {
210
+ const translated = translator.feed(buffer.trim());
211
+ for (const out of translated) {
212
+ res.write(out);
213
+ emittedCount++;
214
+ }
215
+ }
216
+ const flushed = translator.flush();
217
+ for (const out of flushed) {
218
+ res.write(out);
219
+ emittedCount++;
220
+ }
221
+ }
222
+ catch (err) {
223
+ log("red", ` stream error: ${err}`);
224
+ }
225
+ log("cyan", ` stream done, emitted ${emittedCount} chunks`);
226
+ res.end();
227
+ }
228
+ else {
229
+ // Non-streaming: buffer full response and translate
230
+ const text = await fetchRes.text();
231
+ log("cyan", ` upstream response: ${text.slice(0, 300)}`);
232
+ try {
233
+ const respBody = JSON.parse(text);
234
+ const translated = responsesToChatResponse(respBody, parsedBody.model);
235
+ log("cyan", ` translated: ${JSON.stringify(translated).slice(0, 300)}`);
236
+ json(res, 200, translated);
237
+ }
238
+ catch {
239
+ // Can't parse — forward raw
240
+ res.writeHead(fetchRes.status, { "content-type": "application/json" });
241
+ res.end(text);
242
+ }
243
+ }
244
+ return;
245
+ }
246
+ // ── Pass-through (non-translated) ─────────────────
146
247
  const resHeaders = {};
147
248
  fetchRes.headers.forEach((v, k) => {
148
249
  if (!STRIP_RES.has(k.toLowerCase()))
@@ -0,0 +1,276 @@
1
+ // ── Chat Completions ↔ Responses API translation layer ─────────────
2
+ // ── Request: Chat Completions → Responses ──────────────────────────
3
+ export function chatToResponsesRequest(body) {
4
+ const out = { model: body.model };
5
+ // Extract system message → instructions
6
+ const messages = body.messages ?? [];
7
+ const systemMsgs = messages.filter((m) => m.role === "system");
8
+ const nonSystem = messages.filter((m) => m.role !== "system");
9
+ if (systemMsgs.length > 0) {
10
+ out.instructions = systemMsgs
11
+ .map((m) => typeof m.content === "string" ? m.content : JSON.stringify(m.content))
12
+ .join("\n");
13
+ }
14
+ // Convert messages → input
15
+ out.input = [];
16
+ for (const msg of nonSystem) {
17
+ if (msg.role === "user") {
18
+ out.input.push({ role: "user", content: convertInputContent(msg.content) });
19
+ }
20
+ else if (msg.role === "assistant") {
21
+ // Text part as a message item
22
+ if (msg.content) {
23
+ out.input.push({
24
+ type: "message",
25
+ role: "assistant",
26
+ status: "completed",
27
+ content: [{ type: "output_text", text: msg.content, annotations: [] }],
28
+ });
29
+ }
30
+ // Tool calls as separate function_call items
31
+ if (msg.tool_calls) {
32
+ for (const tc of msg.tool_calls) {
33
+ out.input.push({
34
+ type: "function_call",
35
+ call_id: tc.id,
36
+ name: tc.function.name,
37
+ arguments: tc.function.arguments,
38
+ });
39
+ }
40
+ }
41
+ }
42
+ else if (msg.role === "tool") {
43
+ out.input.push({
44
+ type: "function_call_output",
45
+ call_id: msg.tool_call_id,
46
+ output: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
47
+ });
48
+ }
49
+ }
50
+ // Simple field mappings
51
+ if (body.stream !== undefined)
52
+ out.stream = body.stream;
53
+ if (body.temperature !== undefined)
54
+ out.temperature = body.temperature;
55
+ if (body.top_p !== undefined)
56
+ out.top_p = body.top_p;
57
+ // Note: ChatGPT backend doesn't support max_output_tokens, skip it
58
+ if (body.stop !== undefined)
59
+ out.stop = body.stop;
60
+ if (body.frequency_penalty !== undefined)
61
+ out.frequency_penalty = body.frequency_penalty;
62
+ if (body.presence_penalty !== undefined)
63
+ out.presence_penalty = body.presence_penalty;
64
+ if (body.user !== undefined)
65
+ out.user = body.user;
66
+ if (body.parallel_tool_calls !== undefined)
67
+ out.parallel_tool_calls = body.parallel_tool_calls;
68
+ out.store = false; // ChatGPT backend requires store=false
69
+ if (body.metadata !== undefined)
70
+ out.metadata = body.metadata;
71
+ // reasoning_effort
72
+ if (body.reasoning_effort !== undefined) {
73
+ out.reasoning = { effort: body.reasoning_effort };
74
+ }
75
+ // response_format → text.format
76
+ if (body.response_format) {
77
+ out.text = { format: body.response_format };
78
+ }
79
+ // tools: unwrap function wrapper
80
+ if (body.tools) {
81
+ out.tools = body.tools.map((t) => {
82
+ if (t.type === "function" && t.function) {
83
+ return { type: "function", ...t.function };
84
+ }
85
+ return t;
86
+ });
87
+ }
88
+ // tool_choice: translate object form
89
+ if (body.tool_choice !== undefined) {
90
+ if (typeof body.tool_choice === "object" && body.tool_choice.function) {
91
+ out.tool_choice = { type: "function", name: body.tool_choice.function.name };
92
+ }
93
+ else {
94
+ out.tool_choice = body.tool_choice;
95
+ }
96
+ }
97
+ return out;
98
+ }
99
+ function convertInputContent(content) {
100
+ if (typeof content === "string")
101
+ return content;
102
+ if (!Array.isArray(content))
103
+ return content;
104
+ return content.map((part) => {
105
+ if (part.type === "text")
106
+ return { type: "input_text", text: part.text };
107
+ if (part.type === "image_url")
108
+ return { type: "input_image", image_url: part.image_url.url ?? part.image_url };
109
+ return part;
110
+ });
111
+ }
112
+ // ── Response: Responses → Chat Completions (non-streaming) ─────────
113
+ export function responsesToChatResponse(resp, model) {
114
+ const output = resp.output ?? [];
115
+ let textContent = "";
116
+ const toolCalls = [];
117
+ for (const item of output) {
118
+ if (item.type === "message" && item.content) {
119
+ for (const part of item.content) {
120
+ if (part.type === "output_text")
121
+ textContent += part.text;
122
+ }
123
+ }
124
+ else if (item.type === "function_call") {
125
+ toolCalls.push({
126
+ id: item.call_id,
127
+ type: "function",
128
+ function: { name: item.name, arguments: item.arguments },
129
+ });
130
+ }
131
+ }
132
+ const finishReason = toolCalls.length > 0 ? "tool_calls" :
133
+ resp.status === "completed" ? "stop" :
134
+ resp.status === "incomplete" ? "length" : "stop";
135
+ const message = { role: "assistant", content: textContent || null };
136
+ if (toolCalls.length > 0)
137
+ message.tool_calls = toolCalls;
138
+ return {
139
+ id: resp.id?.replace(/^resp_/, "chatcmpl-") ?? "chatcmpl-proxy",
140
+ object: "chat.completion",
141
+ created: Math.floor(resp.created_at ?? Date.now() / 1000),
142
+ model: resp.model ?? model,
143
+ choices: [{ index: 0, message, finish_reason: finishReason, logprobs: null }],
144
+ usage: resp.usage ? {
145
+ prompt_tokens: resp.usage.input_tokens ?? 0,
146
+ completion_tokens: resp.usage.output_tokens ?? 0,
147
+ total_tokens: resp.usage.total_tokens ?? 0,
148
+ } : undefined,
149
+ };
150
+ }
151
+ export function createStreamTranslator(model) {
152
+ const id = `chatcmpl-${Date.now()}`;
153
+ let sentRole = false;
154
+ let sentDone = false;
155
+ let toolCallIndex = -1;
156
+ const toolCallIds = new Map(); // item_id → index
157
+ function chunk(delta, finishReason = null) {
158
+ return `data: ${JSON.stringify({
159
+ id,
160
+ object: "chat.completion.chunk",
161
+ created: Math.floor(Date.now() / 1000),
162
+ model,
163
+ choices: [{ index: 0, delta, finish_reason: finishReason }],
164
+ })}\n\n`;
165
+ }
166
+ function usageChunk(usage) {
167
+ return `data: ${JSON.stringify({
168
+ id,
169
+ object: "chat.completion.chunk",
170
+ created: Math.floor(Date.now() / 1000),
171
+ model,
172
+ choices: [],
173
+ usage: {
174
+ prompt_tokens: usage.input_tokens ?? 0,
175
+ completion_tokens: usage.output_tokens ?? 0,
176
+ total_tokens: usage.total_tokens ?? 0,
177
+ },
178
+ })}\n\n`;
179
+ }
180
+ return {
181
+ feed(line) {
182
+ if (!line.startsWith("data: "))
183
+ return [];
184
+ const jsonStr = line.slice(6).trim();
185
+ if (!jsonStr || jsonStr === "[DONE]")
186
+ return [];
187
+ let event;
188
+ try {
189
+ event = JSON.parse(jsonStr);
190
+ }
191
+ catch {
192
+ return [];
193
+ }
194
+ const results = [];
195
+ const type = event.type;
196
+ if (type === "response.output_item.added") {
197
+ // Role announcement on first message
198
+ if (event.item?.type === "message" && !sentRole) {
199
+ sentRole = true;
200
+ results.push(chunk({ role: "assistant", content: "" }));
201
+ }
202
+ // Function call start
203
+ if (event.item?.type === "function_call") {
204
+ toolCallIndex++;
205
+ toolCallIds.set(event.item.id, toolCallIndex);
206
+ results.push(chunk({
207
+ tool_calls: [{
208
+ index: toolCallIndex,
209
+ id: event.item.call_id,
210
+ type: "function",
211
+ function: { name: event.item.name, arguments: "" },
212
+ }],
213
+ }));
214
+ }
215
+ }
216
+ else if (type === "response.output_text.delta") {
217
+ if (!sentRole) {
218
+ sentRole = true;
219
+ results.push(chunk({ role: "assistant", content: "" }));
220
+ }
221
+ results.push(chunk({ content: event.delta }));
222
+ }
223
+ else if (type === "response.function_call_arguments.delta") {
224
+ const idx = toolCallIds.get(event.item_id) ?? 0;
225
+ results.push(chunk({
226
+ tool_calls: [{ index: idx, function: { arguments: event.delta } }],
227
+ }));
228
+ }
229
+ else if (type === "response.completed") {
230
+ const resp = event.response;
231
+ const hasFnCalls = (resp?.output ?? []).some((o) => o.type === "function_call");
232
+ const finishReason = hasFnCalls ? "tool_calls" :
233
+ resp?.status === "incomplete" ? "length" : "stop";
234
+ results.push(chunk({}, finishReason));
235
+ if (resp?.usage)
236
+ results.push(usageChunk(resp.usage));
237
+ results.push("data: [DONE]\n\n");
238
+ }
239
+ else if (type === "error") {
240
+ // Forward API errors as Chat Completions error format
241
+ const err = event.error ?? {};
242
+ results.push(`data: ${JSON.stringify({
243
+ error: {
244
+ message: err.message ?? "Unknown error",
245
+ type: err.type ?? "api_error",
246
+ code: err.code ?? null,
247
+ },
248
+ })}\n\n`);
249
+ results.push("data: [DONE]\n\n");
250
+ }
251
+ else if (type === "response.failed") {
252
+ const resp = event.response;
253
+ const err = resp?.error ?? {};
254
+ if (!sentDone) {
255
+ results.push(`data: ${JSON.stringify({
256
+ error: {
257
+ message: err.message ?? "Response failed",
258
+ type: "api_error",
259
+ code: err.code ?? null,
260
+ },
261
+ })}\n\n`);
262
+ results.push("data: [DONE]\n\n");
263
+ }
264
+ }
265
+ if (results.some(r => r.includes("[DONE]")))
266
+ sentDone = true;
267
+ return results;
268
+ },
269
+ flush() {
270
+ // If stream ended without a proper termination, send [DONE]
271
+ if (!sentDone)
272
+ return ["data: [DONE]\n\n"];
273
+ return [];
274
+ },
275
+ };
276
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codex-rotating-proxy",
3
- "version": "0.1.1",
3
+ "version": "0.1.3",
4
4
  "description": "OpenAI API proxy that rotates between multiple accounts when rate limits hit",
5
5
  "type": "module",
6
6
  "bin": {