opencode-gemini-auth-proxy 1.3.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +254 -0
- package/index.ts +14 -0
- package/package.json +23 -0
- package/src/constants.ts +39 -0
- package/src/fetch.ts +11 -0
- package/src/gemini/oauth.ts +178 -0
- package/src/plugin/auth.test.ts +58 -0
- package/src/plugin/auth.ts +46 -0
- package/src/plugin/cache.ts +65 -0
- package/src/plugin/debug.ts +258 -0
- package/src/plugin/project.test.ts +112 -0
- package/src/plugin/project.ts +552 -0
- package/src/plugin/request-helpers.test.ts +84 -0
- package/src/plugin/request-helpers.ts +439 -0
- package/src/plugin/request.test.ts +50 -0
- package/src/plugin/request.ts +483 -0
- package/src/plugin/server.ts +246 -0
- package/src/plugin/token.test.ts +74 -0
- package/src/plugin/token.ts +188 -0
- package/src/plugin/types.ts +76 -0
- package/src/plugin.ts +700 -0
- package/src/shims.d.ts +8 -0
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
import { CODE_ASSIST_HEADERS, GEMINI_CODE_ASSIST_ENDPOINT } from "../constants";
|
|
2
|
+
import { logGeminiDebugResponse, type GeminiDebugContext } from "./debug";
|
|
3
|
+
import {
|
|
4
|
+
enhanceGeminiErrorResponse,
|
|
5
|
+
extractUsageMetadata,
|
|
6
|
+
normalizeThinkingConfig,
|
|
7
|
+
parseGeminiApiBody,
|
|
8
|
+
rewriteGeminiPreviewAccessError,
|
|
9
|
+
type GeminiApiBody,
|
|
10
|
+
type GeminiUsageMetadata,
|
|
11
|
+
} from "./request-helpers";
|
|
12
|
+
|
|
13
|
+
const STREAM_ACTION = "streamGenerateContent";
|
|
14
|
+
const MODEL_FALLBACKS: Record<string, string> = {
|
|
15
|
+
"gemini-2.5-flash-image": "gemini-2.5-flash",
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
interface GeminiFunctionCallPart {
|
|
19
|
+
functionCall?: {
|
|
20
|
+
name: string;
|
|
21
|
+
args?: Record<string, unknown>;
|
|
22
|
+
[key: string]: unknown;
|
|
23
|
+
};
|
|
24
|
+
thoughtSignature?: string;
|
|
25
|
+
[key: string]: unknown;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
interface GeminiContentPart {
|
|
29
|
+
role?: string;
|
|
30
|
+
parts?: GeminiFunctionCallPart[];
|
|
31
|
+
[key: string]: unknown;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
interface OpenAIToolCall {
|
|
35
|
+
id?: string;
|
|
36
|
+
type?: string;
|
|
37
|
+
function?: {
|
|
38
|
+
name?: string;
|
|
39
|
+
arguments?: string;
|
|
40
|
+
[key: string]: unknown;
|
|
41
|
+
};
|
|
42
|
+
[key: string]: unknown;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
interface OpenAIMessage {
|
|
46
|
+
role?: string;
|
|
47
|
+
content?: string | null;
|
|
48
|
+
tool_calls?: OpenAIToolCall[];
|
|
49
|
+
tool_call_id?: string;
|
|
50
|
+
name?: string;
|
|
51
|
+
[key: string]: unknown;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Transforms OpenAI tool_calls to Gemini functionCall format and adds thoughtSignature.
|
|
56
|
+
* This ensures compatibility when OpenCode sends OpenAI-format function calls.
|
|
57
|
+
*/
|
|
58
|
+
function transformOpenAIToolCalls(requestPayload: Record<string, unknown>): void {
|
|
59
|
+
const messages = requestPayload.messages;
|
|
60
|
+
if (!messages || !Array.isArray(messages)) {
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
for (const message of messages) {
|
|
65
|
+
if (message && typeof message === "object") {
|
|
66
|
+
const msgObj = message as OpenAIMessage;
|
|
67
|
+
const toolCalls = msgObj.tool_calls;
|
|
68
|
+
if (toolCalls && Array.isArray(toolCalls) && toolCalls.length > 0) {
|
|
69
|
+
const parts: GeminiFunctionCallPart[] = [];
|
|
70
|
+
|
|
71
|
+
if (typeof msgObj.content === "string" && msgObj.content.length > 0) {
|
|
72
|
+
parts.push({ text: msgObj.content });
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
for (const toolCall of toolCalls) {
|
|
76
|
+
if (toolCall && typeof toolCall === "object") {
|
|
77
|
+
const functionObj = toolCall.function;
|
|
78
|
+
if (functionObj && typeof functionObj === "object") {
|
|
79
|
+
const name = functionObj.name;
|
|
80
|
+
const argsStr = functionObj.arguments;
|
|
81
|
+
let args: Record<string, unknown> = {};
|
|
82
|
+
if (typeof argsStr === "string") {
|
|
83
|
+
try {
|
|
84
|
+
args = JSON.parse(argsStr) as Record<string, unknown>;
|
|
85
|
+
} catch {
|
|
86
|
+
args = {};
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
parts.push({
|
|
91
|
+
functionCall: {
|
|
92
|
+
name: name ?? "",
|
|
93
|
+
args,
|
|
94
|
+
},
|
|
95
|
+
thoughtSignature: "skip_thought_signature_validator",
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
msgObj.parts = parts;
|
|
102
|
+
delete msgObj.tool_calls;
|
|
103
|
+
delete msgObj.content;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Adds thoughtSignature to function call parts in the request payload.
|
|
111
|
+
* Gemini 3+ models require thoughtSignature for function calls when using thinking capabilities.
|
|
112
|
+
* This must be applied to all content blocks in the conversation history.
|
|
113
|
+
* Handles both flat contents arrays and nested request.contents (wrapped bodies).
|
|
114
|
+
*/
|
|
115
|
+
function addThoughtSignaturesToFunctionCalls(requestPayload: Record<string, unknown>): void {
|
|
116
|
+
const processContents = (contents: unknown): void => {
|
|
117
|
+
if (!contents || !Array.isArray(contents)) {
|
|
118
|
+
return;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
for (const content of contents) {
|
|
122
|
+
if (content && typeof content === "object") {
|
|
123
|
+
const contentObj = content as Record<string, unknown>;
|
|
124
|
+
const parts = contentObj.parts;
|
|
125
|
+
if (parts && Array.isArray(parts)) {
|
|
126
|
+
for (const part of parts) {
|
|
127
|
+
if (part && typeof part === "object") {
|
|
128
|
+
const partObj = part as Record<string, unknown>;
|
|
129
|
+
if (partObj.functionCall && !partObj.thoughtSignature) {
|
|
130
|
+
partObj.thoughtSignature = "skip_thought_signature_validator";
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
};
|
|
138
|
+
|
|
139
|
+
processContents(requestPayload.contents);
|
|
140
|
+
|
|
141
|
+
const nestedRequest = requestPayload.request;
|
|
142
|
+
if (nestedRequest && typeof nestedRequest === "object") {
|
|
143
|
+
const requestObj = nestedRequest as Record<string, unknown>;
|
|
144
|
+
processContents(requestObj.contents);
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Detects Gemini/Generative Language API requests by URL.
|
|
150
|
+
* @param input Request target passed to fetch.
|
|
151
|
+
* @returns True when the URL targets generativelanguage.googleapis.com.
|
|
152
|
+
*/
|
|
153
|
+
export function isGenerativeLanguageRequest(input: RequestInfo): input is string {
|
|
154
|
+
return toRequestUrlString(input).includes("generativelanguage.googleapis.com");
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Rewrites SSE payload lines so downstream consumers see only the inner `response` objects.
|
|
159
|
+
*/
|
|
160
|
+
function transformStreamingLine(line: string): string {
|
|
161
|
+
if (!line.startsWith("data:")) {
|
|
162
|
+
return line;
|
|
163
|
+
}
|
|
164
|
+
const json = line.slice(5).trim();
|
|
165
|
+
if (!json) {
|
|
166
|
+
return line;
|
|
167
|
+
}
|
|
168
|
+
try {
|
|
169
|
+
const parsed = JSON.parse(json) as { response?: unknown };
|
|
170
|
+
if (parsed.response !== undefined) {
|
|
171
|
+
return `data: ${JSON.stringify(parsed.response)}`;
|
|
172
|
+
}
|
|
173
|
+
} catch (_) {}
|
|
174
|
+
return line;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Streams SSE payloads, rewriting data lines on the fly.
|
|
179
|
+
*/
|
|
180
|
+
function transformStreamingPayloadStream(
|
|
181
|
+
stream: ReadableStream<Uint8Array>,
|
|
182
|
+
): ReadableStream<Uint8Array> {
|
|
183
|
+
const decoder = new TextDecoder();
|
|
184
|
+
const encoder = new TextEncoder();
|
|
185
|
+
let buffer = "";
|
|
186
|
+
let reader: ReadableStreamDefaultReader<Uint8Array> | null = null;
|
|
187
|
+
|
|
188
|
+
return new ReadableStream<Uint8Array>({
|
|
189
|
+
start(controller) {
|
|
190
|
+
reader = stream.getReader();
|
|
191
|
+
const pump = (): void => {
|
|
192
|
+
reader!
|
|
193
|
+
.read()
|
|
194
|
+
.then(({ done, value }) => {
|
|
195
|
+
if (done) {
|
|
196
|
+
buffer += decoder.decode();
|
|
197
|
+
if (buffer.length > 0) {
|
|
198
|
+
controller.enqueue(encoder.encode(transformStreamingLine(buffer)));
|
|
199
|
+
}
|
|
200
|
+
controller.close();
|
|
201
|
+
return;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
buffer += decoder.decode(value, { stream: true });
|
|
205
|
+
|
|
206
|
+
let newlineIndex = buffer.indexOf("\n");
|
|
207
|
+
while (newlineIndex !== -1) {
|
|
208
|
+
const line = buffer.slice(0, newlineIndex);
|
|
209
|
+
buffer = buffer.slice(newlineIndex + 1);
|
|
210
|
+
const hasCarriageReturn = line.endsWith("\r");
|
|
211
|
+
const rawLine = hasCarriageReturn ? line.slice(0, -1) : line;
|
|
212
|
+
const transformed = transformStreamingLine(rawLine);
|
|
213
|
+
const suffix = hasCarriageReturn ? "\r\n" : "\n";
|
|
214
|
+
controller.enqueue(encoder.encode(`${transformed}${suffix}`));
|
|
215
|
+
newlineIndex = buffer.indexOf("\n");
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
pump();
|
|
219
|
+
})
|
|
220
|
+
.catch((error) => {
|
|
221
|
+
controller.error(error);
|
|
222
|
+
});
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
pump();
|
|
226
|
+
},
|
|
227
|
+
cancel(reason) {
|
|
228
|
+
if (reader) {
|
|
229
|
+
reader.cancel(reason).catch(() => {});
|
|
230
|
+
}
|
|
231
|
+
},
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
/**
|
|
236
|
+
* Rewrites OpenAI-style requests into Gemini Code Assist shape, normalizing model, headers,
|
|
237
|
+
* optional cached_content, and thinking config. Also toggles streaming mode for SSE actions.
|
|
238
|
+
*/
|
|
239
|
+
export function prepareGeminiRequest(
|
|
240
|
+
input: RequestInfo,
|
|
241
|
+
init: RequestInit | undefined,
|
|
242
|
+
accessToken: string,
|
|
243
|
+
projectId: string,
|
|
244
|
+
): { request: RequestInfo; init: RequestInit; streaming: boolean; requestedModel?: string } {
|
|
245
|
+
const baseInit: RequestInit = { ...init };
|
|
246
|
+
const headers = new Headers(init?.headers ?? {});
|
|
247
|
+
|
|
248
|
+
if (!isGenerativeLanguageRequest(input)) {
|
|
249
|
+
return {
|
|
250
|
+
request: input,
|
|
251
|
+
init: { ...baseInit, headers },
|
|
252
|
+
streaming: false,
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
headers.set("Authorization", `Bearer ${accessToken}`);
|
|
257
|
+
headers.delete("x-api-key");
|
|
258
|
+
|
|
259
|
+
const match = toRequestUrlString(input).match(/\/models\/([^:]+):(\w+)/);
|
|
260
|
+
if (!match) {
|
|
261
|
+
return {
|
|
262
|
+
request: input,
|
|
263
|
+
init: { ...baseInit, headers },
|
|
264
|
+
streaming: false,
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
const [, rawModel = "", rawAction = ""] = match;
|
|
269
|
+
const effectiveModel = MODEL_FALLBACKS[rawModel] ?? rawModel;
|
|
270
|
+
const streaming = rawAction === STREAM_ACTION;
|
|
271
|
+
const transformedUrl = `${GEMINI_CODE_ASSIST_ENDPOINT}/v1internal:${rawAction}${
|
|
272
|
+
streaming ? "?alt=sse" : ""
|
|
273
|
+
}`;
|
|
274
|
+
|
|
275
|
+
let body = baseInit.body;
|
|
276
|
+
if (typeof baseInit.body === "string" && baseInit.body) {
|
|
277
|
+
try {
|
|
278
|
+
const parsedBody = JSON.parse(baseInit.body) as Record<string, unknown>;
|
|
279
|
+
const isWrapped = typeof parsedBody.project === "string" && "request" in parsedBody;
|
|
280
|
+
|
|
281
|
+
if (isWrapped) {
|
|
282
|
+
const wrappedBody = {
|
|
283
|
+
...parsedBody,
|
|
284
|
+
model: effectiveModel,
|
|
285
|
+
} as Record<string, unknown>;
|
|
286
|
+
body = JSON.stringify(wrappedBody);
|
|
287
|
+
} else {
|
|
288
|
+
const requestPayload: Record<string, unknown> = { ...parsedBody };
|
|
289
|
+
|
|
290
|
+
transformOpenAIToolCalls(requestPayload);
|
|
291
|
+
addThoughtSignaturesToFunctionCalls(requestPayload);
|
|
292
|
+
|
|
293
|
+
const rawGenerationConfig = requestPayload.generationConfig as Record<string, unknown> | undefined;
|
|
294
|
+
const normalizedThinking = normalizeThinkingConfig(rawGenerationConfig?.thinkingConfig);
|
|
295
|
+
if (normalizedThinking) {
|
|
296
|
+
if (rawGenerationConfig) {
|
|
297
|
+
rawGenerationConfig.thinkingConfig = normalizedThinking;
|
|
298
|
+
requestPayload.generationConfig = rawGenerationConfig;
|
|
299
|
+
} else {
|
|
300
|
+
requestPayload.generationConfig = { thinkingConfig: normalizedThinking };
|
|
301
|
+
}
|
|
302
|
+
} else if (rawGenerationConfig?.thinkingConfig) {
|
|
303
|
+
delete rawGenerationConfig.thinkingConfig;
|
|
304
|
+
requestPayload.generationConfig = rawGenerationConfig;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if ("system_instruction" in requestPayload) {
|
|
308
|
+
requestPayload.systemInstruction = requestPayload.system_instruction;
|
|
309
|
+
delete requestPayload.system_instruction;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
const cachedContentFromExtra =
|
|
313
|
+
typeof requestPayload.extra_body === "object" && requestPayload.extra_body
|
|
314
|
+
? (requestPayload.extra_body as Record<string, unknown>).cached_content ??
|
|
315
|
+
(requestPayload.extra_body as Record<string, unknown>).cachedContent
|
|
316
|
+
: undefined;
|
|
317
|
+
const cachedContent =
|
|
318
|
+
(requestPayload.cached_content as string | undefined) ??
|
|
319
|
+
(requestPayload.cachedContent as string | undefined) ??
|
|
320
|
+
(cachedContentFromExtra as string | undefined);
|
|
321
|
+
if (cachedContent) {
|
|
322
|
+
requestPayload.cachedContent = cachedContent;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
delete requestPayload.cached_content;
|
|
326
|
+
if (requestPayload.extra_body && typeof requestPayload.extra_body === "object") {
|
|
327
|
+
delete (requestPayload.extra_body as Record<string, unknown>).cached_content;
|
|
328
|
+
delete (requestPayload.extra_body as Record<string, unknown>).cachedContent;
|
|
329
|
+
if (Object.keys(requestPayload.extra_body as Record<string, unknown>).length === 0) {
|
|
330
|
+
delete requestPayload.extra_body;
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
if ("model" in requestPayload) {
|
|
335
|
+
delete requestPayload.model;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
const wrappedBody = {
|
|
339
|
+
project: projectId,
|
|
340
|
+
model: effectiveModel,
|
|
341
|
+
request: requestPayload,
|
|
342
|
+
};
|
|
343
|
+
|
|
344
|
+
body = JSON.stringify(wrappedBody);
|
|
345
|
+
}
|
|
346
|
+
} catch (error) {
|
|
347
|
+
console.error("Failed to transform Gemini request body:", error);
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
if (streaming) {
|
|
352
|
+
headers.set("Accept", "text/event-stream");
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
headers.set("User-Agent", CODE_ASSIST_HEADERS["User-Agent"]);
|
|
356
|
+
headers.set("X-Goog-Api-Client", CODE_ASSIST_HEADERS["X-Goog-Api-Client"]);
|
|
357
|
+
headers.set("Client-Metadata", CODE_ASSIST_HEADERS["Client-Metadata"]);
|
|
358
|
+
|
|
359
|
+
return {
|
|
360
|
+
request: transformedUrl,
|
|
361
|
+
init: {
|
|
362
|
+
...baseInit,
|
|
363
|
+
headers,
|
|
364
|
+
body,
|
|
365
|
+
},
|
|
366
|
+
streaming,
|
|
367
|
+
requestedModel: rawModel,
|
|
368
|
+
};
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
function toRequestUrlString(value: RequestInfo): string {
|
|
372
|
+
if (typeof value === "string") {
|
|
373
|
+
return value;
|
|
374
|
+
}
|
|
375
|
+
if (value instanceof URL) {
|
|
376
|
+
return value.toString();
|
|
377
|
+
}
|
|
378
|
+
const candidate = (value as Request).url;
|
|
379
|
+
if (candidate) {
|
|
380
|
+
return candidate;
|
|
381
|
+
}
|
|
382
|
+
return value.toString();
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
/**
|
|
386
|
+
* Normalizes Gemini responses: applies retry headers, extracts cache usage into headers,
|
|
387
|
+
* rewrites preview errors, rewrites streaming payloads, and logs debug metadata.
|
|
388
|
+
*/
|
|
389
|
+
export async function transformGeminiResponse(
|
|
390
|
+
response: Response,
|
|
391
|
+
streaming: boolean,
|
|
392
|
+
debugContext?: GeminiDebugContext | null,
|
|
393
|
+
requestedModel?: string,
|
|
394
|
+
): Promise<Response> {
|
|
395
|
+
const contentType = response.headers.get("content-type") ?? "";
|
|
396
|
+
const isJsonResponse = contentType.includes("application/json");
|
|
397
|
+
const isEventStreamResponse = contentType.includes("text/event-stream");
|
|
398
|
+
|
|
399
|
+
if (!isJsonResponse && !isEventStreamResponse) {
|
|
400
|
+
logGeminiDebugResponse(debugContext, response, {
|
|
401
|
+
note: "Non-JSON response (body omitted)",
|
|
402
|
+
});
|
|
403
|
+
return response;
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
try {
|
|
407
|
+
const headers = new Headers(response.headers);
|
|
408
|
+
|
|
409
|
+
if (streaming && response.ok && isEventStreamResponse && response.body) {
|
|
410
|
+
logGeminiDebugResponse(debugContext, response, {
|
|
411
|
+
note: "Streaming SSE payload (body omitted)",
|
|
412
|
+
headersOverride: headers,
|
|
413
|
+
});
|
|
414
|
+
|
|
415
|
+
return new Response(transformStreamingPayloadStream(response.body), {
|
|
416
|
+
status: response.status,
|
|
417
|
+
statusText: response.statusText,
|
|
418
|
+
headers,
|
|
419
|
+
});
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
const text = await response.text();
|
|
423
|
+
|
|
424
|
+
const init = {
|
|
425
|
+
status: response.status,
|
|
426
|
+
statusText: response.statusText,
|
|
427
|
+
headers,
|
|
428
|
+
};
|
|
429
|
+
|
|
430
|
+
const parsed: GeminiApiBody | null = !streaming || !isEventStreamResponse ? parseGeminiApiBody(text) : null;
|
|
431
|
+
const enhanced = !response.ok && parsed ? enhanceGeminiErrorResponse(parsed, response.status) : null;
|
|
432
|
+
if (enhanced?.retryAfterMs) {
|
|
433
|
+
const retryAfterSec = Math.ceil(enhanced.retryAfterMs / 1000).toString();
|
|
434
|
+
headers.set("Retry-After", retryAfterSec);
|
|
435
|
+
headers.set("retry-after-ms", String(enhanced.retryAfterMs));
|
|
436
|
+
}
|
|
437
|
+
const previewPatched = parsed
|
|
438
|
+
? rewriteGeminiPreviewAccessError(enhanced?.body ?? parsed, response.status, requestedModel)
|
|
439
|
+
: null;
|
|
440
|
+
const effectiveBody = previewPatched ?? enhanced?.body ?? parsed ?? undefined;
|
|
441
|
+
|
|
442
|
+
const usage = effectiveBody ? extractUsageMetadata(effectiveBody) : null;
|
|
443
|
+
if (usage?.cachedContentTokenCount !== undefined) {
|
|
444
|
+
headers.set("x-gemini-cached-content-token-count", String(usage.cachedContentTokenCount));
|
|
445
|
+
if (usage.totalTokenCount !== undefined) {
|
|
446
|
+
headers.set("x-gemini-total-token-count", String(usage.totalTokenCount));
|
|
447
|
+
}
|
|
448
|
+
if (usage.promptTokenCount !== undefined) {
|
|
449
|
+
headers.set("x-gemini-prompt-token-count", String(usage.promptTokenCount));
|
|
450
|
+
}
|
|
451
|
+
if (usage.candidatesTokenCount !== undefined) {
|
|
452
|
+
headers.set("x-gemini-candidates-token-count", String(usage.candidatesTokenCount));
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
logGeminiDebugResponse(debugContext, response, {
|
|
457
|
+
body: text,
|
|
458
|
+
note: streaming ? "Streaming SSE payload (buffered)" : undefined,
|
|
459
|
+
headersOverride: headers,
|
|
460
|
+
});
|
|
461
|
+
|
|
462
|
+
if (!parsed) {
|
|
463
|
+
return new Response(text, init);
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
if (effectiveBody?.response !== undefined) {
|
|
467
|
+
return new Response(JSON.stringify(effectiveBody.response), init);
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
if (previewPatched) {
|
|
471
|
+
return new Response(JSON.stringify(previewPatched), init);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
return new Response(text, init);
|
|
475
|
+
} catch (error) {
|
|
476
|
+
logGeminiDebugResponse(debugContext, response, {
|
|
477
|
+
error,
|
|
478
|
+
note: "Failed to transform Gemini response",
|
|
479
|
+
});
|
|
480
|
+
console.error("Failed to transform Gemini response:", error);
|
|
481
|
+
return response;
|
|
482
|
+
}
|
|
483
|
+
}
|