copilot-cursor-proxy 1.0.4 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -50,8 +50,8 @@ Copy the HTTPS URL (e.g., `https://xxxxx.trycloudflare.com`).
50
50
  Cursor → (HTTPS tunnel) → proxy-router (:4142) → copilot-api (:4141) → GitHub Copilot
51
51
  ```
52
52
 
53
- * **Port 4141 (`copilot-api`):** Authenticates with GitHub and provides the OpenAI-compatible API.
54
- * *Powered by [copilot-api](https://www.npmjs.com/package/copilot-api) (installed via `npx`).*
53
+ * **Port 4141 (`copilot-api`):** Authenticates with GitHub, provides the OpenAI-compatible API, and natively handles the Responses API for GPT-5.x models.
54
+ * *Powered by [@jeffreycao/copilot-api](https://github.com/caozhiyuan/copilot-api) (installed via `npx`).*
55
55
  * **Port 4142 (`proxy-router`):** Converts Anthropic-format messages to OpenAI format, bridges Responses API for GPT-5.x models, handles the `cus-` prefix, and serves the dashboard.
56
56
  * **HTTPS tunnel:** Cursor requires HTTPS — a tunnel exposes the local proxy.
57
57
 
@@ -81,32 +81,32 @@ Cursor → (HTTPS tunnel) → proxy-router (:4142) → copilot-api (:4141) → G
81
81
 
82
82
  > **💡 Tip:** Visit the [Dashboard](http://localhost:4142) to see all available models and copy their IDs.
83
83
 
84
- ### Tested Models (15/21 passing)
84
+ ### Tested Models (19/20 passing)
85
85
 
86
86
  | Cursor Model Name | Actual Model | Status |
87
87
  |---|---|---|
88
88
  | `cus-gpt-4o` | GPT-4o | ✅ |
89
89
  | `cus-gpt-4.1` | GPT-4.1 | ✅ |
90
+ | `cus-gpt-41-copilot` | GPT-4.1 Copilot | ❌ Not supported by GitHub |
90
91
  | `cus-gpt-5-mini` | GPT-5 Mini | ✅ |
91
- | `cus-gpt-5.1` | GPT-5.1 | ✅ |
92
- | `cus-gpt-5.2` | GPT-5.2 | ⚠️ See note |
93
- | `cus-gpt-5.2-codex` | GPT-5.2 Codex | ⚠️ See note |
94
- | `cus-gpt-5.3-codex` | GPT-5.3 Codex | ⚠️ See note |
95
- | `cus-gpt-5.4` | GPT-5.4 | ⚠️ See note |
96
- | `cus-gpt-5.4-mini` | GPT-5.4 Mini | ⚠️ See note |
97
- | `cus-goldeneye` | Goldeneye | ⚠️ See note |
92
+ | `cus-gpt-5.1` | GPT-5.1 | ✅ (deprecating 2026-04-15) |
93
+ | `cus-gpt-5.2` | GPT-5.2 | |
94
+ | `cus-gpt-5.2-codex` | GPT-5.2 Codex | |
95
+ | `cus-gpt-5.3-codex` | GPT-5.3 Codex | |
96
+ | `cus-gpt-5.4` | GPT-5.4 | |
97
+ | `cus-gpt-5.4-mini` | GPT-5.4 Mini | |
98
98
  | `cus-claude-haiku-4.5` | Claude Haiku 4.5 | ✅ |
99
99
  | `cus-claude-sonnet-4` | Claude Sonnet 4 | ✅ |
100
100
  | `cus-claude-sonnet-4.5` | Claude Sonnet 4.5 | ✅ |
101
101
  | `cus-claude-sonnet-4.6` | Claude Sonnet 4.6 | ✅ |
102
102
  | `cus-claude-opus-4.5` | Claude Opus 4.5 | ✅ |
103
103
  | `cus-claude-opus-4.6` | Claude Opus 4.6 | ✅ |
104
- | `cus-claude-opus-4.6-1m` | Claude Opus 4.6 (1M) | ✅ |
105
104
  | `cus-gemini-2.5-pro` | Gemini 2.5 Pro | ✅ |
106
105
  | `cus-gemini-3-flash-preview` | Gemini 3 Flash | ✅ |
107
106
  | `cus-gemini-3.1-pro-preview` | Gemini 3.1 Pro | ✅ |
107
+ | `cus-text-embedding-3-small` | Text Embedding 3 Small | N/A (embedding model) |
108
108
 
109
- > **⚠️ GPT-5.2+, GPT-5.x-codex, and goldeneye** are currently broken. These models require the `/v1/responses` API or `max_completion_tokens` instead of `max_tokens`, but `copilot-api` injects `max_tokens` into all requests. The proxy has a Responses API bridge built in, but `copilot-api` no longer exposes the `/v1/responses` endpoint. This will be resolved when `copilot-api` is updated. **All Claude, Gemini, GPT-4.x, GPT-5-mini, and GPT-5.1 models work fine.**
109
+ > All GPT-5.x models now work thanks to the switch to [@jeffreycao/copilot-api](https://github.com/caozhiyuan/copilot-api), which natively supports the Responses API. The proxy also includes its own Responses API bridge as a fallback.
110
110
 
111
111
  ![Cursor Settings Configuration](./cursor-settings.png)
112
112
 
@@ -186,7 +186,7 @@ Three tabs:
186
186
  | Streaming | ✅ Works |
187
187
  | Plan mode | ✅ Works |
188
188
  | Agent mode | ✅ Works |
189
- | GPT-5.x models | ⚠️ Blocked by copilot-api `max_tokens` bug |
189
+ | All GPT-5.x models | Works |
190
190
  | Extended thinking (chain-of-thought) | ❌ Stripped |
191
191
  | Prompt caching (`cache_control`) | ❌ Stripped |
192
192
  | Claude Vision | ❌ Not supported via Copilot |
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "copilot-cursor-proxy",
3
- "version": "1.0.4",
3
+ "version": "1.1.0",
4
4
  "description": "Proxy that bridges GitHub Copilot API to Cursor IDE — translates Anthropic format, bridges Responses API for GPT 5.x, and more",
5
5
  "bin": {
6
6
  "copilot-cursor-proxy": "bin/cli.js"
package/proxy-router.ts CHANGED
@@ -205,38 +205,34 @@ Bun.serve({
205
205
 
206
206
  logTransformedRequest(json);
207
207
 
208
- const body = JSON.stringify(json);
209
208
  const headers = new Headers(req.headers);
210
209
  headers.set("host", targetUrl.host);
211
- headers.set("content-length", String(new TextEncoder().encode(body).length));
212
210
 
213
211
  const needsResponsesAPI = targetModel.match(/^gpt-5\.[2-9]|^gpt-5\.\d+-codex|^o[1-9]|^goldeneye/i);
214
212
 
215
- // For models that need max_completion_tokens instead of max_tokens
216
- const needsMaxCompletionTokens = targetModel.match(/^gpt-5\.[2-9]|^gpt-5\.\d+-codex|^goldeneye/i);
217
- if (needsMaxCompletionTokens && json.max_tokens) {
213
+ if (needsResponsesAPI && json.max_tokens) {
218
214
  json.max_completion_tokens = json.max_tokens;
219
215
  delete json.max_tokens;
220
216
  console.log(`🔧 Converted max_tokens → max_completion_tokens`);
221
217
  }
222
218
 
223
- // Try Responses API first for models that may need it; fall back to chat completions
224
219
  if (needsResponsesAPI) {
225
- console.log(`🔀 Model ${targetModel} — trying Responses API bridge`);
220
+ console.log(`🔀 Model ${targetModel} — using Responses API bridge`);
226
221
  const chatId = `chatcmpl-proxy-${++responseCounter}`;
227
222
  try {
228
223
  const result = await handleResponsesAPIBridge(json, req, chatId, TARGET_URL);
229
- if (result.status !== 404) {
230
- addRequestLog({
231
- id: getNextRequestId(), timestamp: startTime, model: targetModel,
232
- promptTokens: 0, completionTokens: 0, totalTokens: 0,
233
- status: result.status, duration: Date.now() - startTime, stream: !!json.stream,
234
- });
235
- return result;
236
- }
237
- console.log(`⚠️ Responses API returned 404 — falling back to chat/completions`);
238
- } catch (e) {
239
- console.log(`⚠️ Responses API failed falling back to chat/completions`);
224
+ addRequestLog({
225
+ id: getNextRequestId(), timestamp: startTime, model: targetModel,
226
+ promptTokens: 0, completionTokens: 0, totalTokens: 0,
227
+ status: result.status, duration: Date.now() - startTime, stream: !!json.stream,
228
+ });
229
+ return result;
230
+ } catch (e: any) {
231
+ console.error(`❌ Responses API bridge failed for ${targetModel}:`, e?.message || e);
232
+ return new Response(
233
+ JSON.stringify({ error: { message: `Responses API bridge failed: ${e?.message || 'Unknown error'}`, type: "proxy_error" } }),
234
+ { status: 502, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" } }
235
+ );
240
236
  }
241
237
  }
242
238
 
@@ -248,6 +244,9 @@ Bun.serve({
248
244
  headers.set("Copilot-Vision-Request", "true");
249
245
  }
250
246
 
247
+ const body = JSON.stringify(json);
248
+ headers.set("content-length", String(new TextEncoder().encode(body).length));
249
+
251
250
  const response = await fetch(targetUrl.toString(), {
252
251
  method: "POST",
253
252
  headers: headers,
package/start.ts CHANGED
@@ -54,7 +54,7 @@ async function main() {
54
54
  const isWindows = process.platform === 'win32';
55
55
  const npxCmd = isWindows ? 'npx.cmd' : 'npx';
56
56
 
57
- copilotProc = spawn([npxCmd, 'copilot-api', 'start'], {
57
+ copilotProc = spawn([npxCmd, '@jeffreycao/copilot-api@latest', 'start'], {
58
58
  stdout: 'pipe',
59
59
  stderr: 'pipe',
60
60
  });