gemini-thought-signature-proxy 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +49 -0
- package/cli.js +3 -0
- package/package.json +31 -0
- package/proxy.js +86 -0
package/README.md
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
```
|
|
2
|
+
400 INVALID_ARGUMENT: Function call is missing a thought_signature in functionCall parts.
|
|
3
|
+
```
|
|
4
|
+
|
|
5
|
+
## Who is affected
|
|
6
|
+
Anyone using VS Code or VS Code Insiders with GitHub Copilot's "Bring Your Own Key" (BYOK) feature connected to **any** OpenAI-compatible Gemini 3.1 endpoint in Agent mode (tool/function calling).
|
|
7
|
+
|
|
8
|
+
## What is `thought_signature`?
|
|
9
|
+
Gemini 3.1 uses "thinking" — an internal reasoning step before generating tool calls. Google cryptographically signs this reasoning and attaches the signature to any tool call the model returns. This signature is required to be echoed back on subsequent turns. However, VS Code's Copilot extension is a vanilla OpenAI client, so it strips this non-standard `extra_content` field entirely.
|
|
10
|
+
|
|
11
|
+
## The Failure Flow
|
|
12
|
+
1. VS Code → Google (round 1, signature returned)
|
|
13
|
+
2. VS Code strips signature
|
|
14
|
+
3. VS Code → Google (round 2, 400 error)
|
|
15
|
+
|
|
16
|
+
## Quick Start
|
|
17
|
+
Run the proxy locally:
|
|
18
|
+
```bash
|
|
19
|
+
npx gemini-thought-signature-proxy
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
Then, update your VS Code settings. Paste the following into your `chatLanguageModels.json`:
|
|
23
|
+
```json
|
|
24
|
+
"url": "http://localhost:3000/v1beta/openai/"
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
**How to find `chatLanguageModels.json`:**
|
|
28
|
+
- Press `Cmd+Shift+P` (or `Ctrl+Shift+P` on Windows/Linux)
|
|
29
|
+
- Search for `Open User Settings (JSON)`
|
|
30
|
+
- Or find it directly at `~/Library/Application Support/Code - Insiders/User/chatLanguageModels.json` on macOS.
|
|
31
|
+
|
|
32
|
+
## How it works
|
|
33
|
+
This proxy sits between VS Code and Google. Before forwarding any request, it walks the `messages` array, finds every `role: "assistant"` message with `tool_calls`, and injects a stateless bypass sentinel:
|
|
34
|
+
```json
|
|
35
|
+
"extra_content": { "google": { "thought_signature": "skip_thought_signature_validator" } }
|
|
36
|
+
```
|
|
37
|
+
`skip_thought_signature_validator` is Google's own documented bypass sentinel for the signature validator, intended for exactly this third-party client scenario.
|
|
38
|
+
|
|
39
|
+
## Path routing note
|
|
40
|
+
VS Code constructs the final URL by appending `v1/chat/completions` to the base URL in `chatLanguageModels.json`. With a base of `http://localhost:3000/v1beta/openai/`, VS Code sends requests to `/v1beta/openai/v1/chat/completions`. The Google endpoint is `/v1beta/openai/chat/completions` (no extra `/v1`). The proxy intercepts VS Code's path and rewrites the upstream target accordingly.
|
|
41
|
+
|
|
42
|
+
## Note on Models
|
|
43
|
+
The bypass only activates for `models/gemini-3.1-pro-preview-customtools`. Other models pass through untouched.
|
|
44
|
+
|
|
45
|
+
## When this might stop working
|
|
46
|
+
If Google changes enforcement post-GA, the `PATCHED_MODEL_ID` and `BYPASS_SIGNATURE` constants may need to be updated.
|
|
47
|
+
|
|
48
|
+
## References
|
|
49
|
+
- [Google's official thought signatures docs](https://ai.google.dev/gemini-api/docs/thought-signatures)
|
package/cli.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "gemini-thought-signature-proxy",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "A proxy to bypass the Gemini 3.1 Pro thought_signature 400 error in VS Code Copilot BYOK",
|
|
5
|
+
"main": "proxy.js",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"bin": {
|
|
8
|
+
"gemini-thought-signature-proxy": "cli.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"start": "node cli.js"
|
|
12
|
+
},
|
|
13
|
+
"keywords": [
|
|
14
|
+
"gemini",
|
|
15
|
+
"thought_signature",
|
|
16
|
+
"proxy",
|
|
17
|
+
"vscode",
|
|
18
|
+
"copilot",
|
|
19
|
+
"byok",
|
|
20
|
+
"tool-calling",
|
|
21
|
+
"agent-mode",
|
|
22
|
+
"400",
|
|
23
|
+
"INVALID_ARGUMENT"
|
|
24
|
+
],
|
|
25
|
+
"author": "",
|
|
26
|
+
"license": "MIT",
|
|
27
|
+
"dependencies": {
|
|
28
|
+
"express": "^4.18.2",
|
|
29
|
+
"node-fetch": "^3.3.2"
|
|
30
|
+
}
|
|
31
|
+
}
|
package/proxy.js
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import express from "express";
|
|
2
|
+
import fetch from "node-fetch";
|
|
3
|
+
|
|
4
|
+
const PORT = process.env.PORT || 3000;
|
|
5
|
+
const GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com";
|
|
6
|
+
const BYPASS_SIGNATURE = "skip_thought_signature_validator";
|
|
7
|
+
const PATCHED_MODEL_ID = "models/gemini-3.1-pro-preview-customtools";
|
|
8
|
+
|
|
9
|
+
const app = express();
|
|
10
|
+
app.use(express.json({ limit: "50mb" }));
|
|
11
|
+
|
|
12
|
+
function injectThoughtSignatures(messages) {
|
|
13
|
+
if (!Array.isArray(messages)) return messages;
|
|
14
|
+
return messages.map((message) => {
|
|
15
|
+
if (message.role !== "assistant" || !Array.isArray(message.tool_calls)) {
|
|
16
|
+
return message;
|
|
17
|
+
}
|
|
18
|
+
const patchedToolCalls = message.tool_calls.map((toolCall) => {
|
|
19
|
+
const existingSignature = toolCall?.extra_content?.google?.thought_signature;
|
|
20
|
+
if (existingSignature) return toolCall;
|
|
21
|
+
return {
|
|
22
|
+
...toolCall,
|
|
23
|
+
extra_content: { google: { thought_signature: BYPASS_SIGNATURE } },
|
|
24
|
+
};
|
|
25
|
+
});
|
|
26
|
+
return { ...message, tool_calls: patchedToolCalls };
|
|
27
|
+
});
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
app.post("/v1beta/openai/v1/chat/completions", async (req, res) => {
|
|
31
|
+
try {
|
|
32
|
+
const body = req.body ?? {};
|
|
33
|
+
const { messages, model, ...rest } = body;
|
|
34
|
+
const requiresPatch = model === PATCHED_MODEL_ID;
|
|
35
|
+
const patchedMessages = requiresPatch ? injectThoughtSignatures(messages) : messages;
|
|
36
|
+
if (requiresPatch) {
|
|
37
|
+
console.log(`[proxy] ✓ Injecting thought_signature bypass for model: ${model}`);
|
|
38
|
+
}
|
|
39
|
+
const upstreamUrl = `${GOOGLE_BASE_URL}/v1beta/openai/chat/completions`;
|
|
40
|
+
const forwardHeaders = { "content-type": req.headers["content-type"] || "application/json" };
|
|
41
|
+
if (req.headers["authorization"]) forwardHeaders["authorization"] = req.headers["authorization"];
|
|
42
|
+
console.log(`[proxy] → POST ${upstreamUrl} | model: ${model ?? "unknown"} | messages: ${patchedMessages?.length ?? 0}`);
|
|
43
|
+
const upstreamResponse = await fetch(upstreamUrl, {
|
|
44
|
+
method: "POST",
|
|
45
|
+
headers: forwardHeaders,
|
|
46
|
+
body: JSON.stringify({ messages: patchedMessages, model, ...rest }),
|
|
47
|
+
});
|
|
48
|
+
res.status(upstreamResponse.status);
|
|
49
|
+
const ct = upstreamResponse.headers.get("content-type");
|
|
50
|
+
if (ct) res.set("content-type", ct);
|
|
51
|
+
upstreamResponse.body.pipe(res);
|
|
52
|
+
} catch (err) {
|
|
53
|
+
console.error("[proxy] ✖ Error:", err);
|
|
54
|
+
res.status(502).json({ error: "proxy_error", details: err.message });
|
|
55
|
+
}
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
app.all("*", async (req, res) => {
|
|
59
|
+
try {
|
|
60
|
+
const qs = req.url.includes("?") ? req.url.slice(req.url.indexOf("?")) : "";
|
|
61
|
+
const upstreamUrl = `${GOOGLE_BASE_URL}${req.path}${qs}`;
|
|
62
|
+
const forwardHeaders = {};
|
|
63
|
+
for (const header of ["authorization", "content-type", "accept"]) {
|
|
64
|
+
if (req.headers[header]) forwardHeaders[header] = req.headers[header];
|
|
65
|
+
}
|
|
66
|
+
const fetchOptions = { method: req.method, headers: forwardHeaders };
|
|
67
|
+
if (req.method !== "GET" && req.method !== "HEAD" && req.body) {
|
|
68
|
+
fetchOptions.body = JSON.stringify(req.body);
|
|
69
|
+
}
|
|
70
|
+
console.log(`[proxy] → ${req.method} ${upstreamUrl}`);
|
|
71
|
+
const upstreamResponse = await fetch(upstreamUrl, fetchOptions);
|
|
72
|
+
res.status(upstreamResponse.status);
|
|
73
|
+
const ct = upstreamResponse.headers.get("content-type");
|
|
74
|
+
if (ct) res.set("content-type", ct);
|
|
75
|
+
upstreamResponse.body.pipe(res);
|
|
76
|
+
} catch (err) {
|
|
77
|
+
console.error("[proxy] ✖ Error in passthrough:", err);
|
|
78
|
+
res.status(502).json({ error: "proxy_error", details: err.message });
|
|
79
|
+
}
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
app.listen(PORT, () => {
|
|
83
|
+
console.log(`\n Gemini thought_signature bypass proxy`);
|
|
84
|
+
console.log(` Listening → http://localhost:${PORT}`);
|
|
85
|
+
console.log(` Forwarding → ${GOOGLE_BASE_URL}\n`);
|
|
86
|
+
});
|