open-sse 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +180 -0
- package/config/constants.js +206 -0
- package/config/defaultThinkingSignature.js +7 -0
- package/config/ollamaModels.js +19 -0
- package/config/providerModels.js +161 -0
- package/handlers/chatCore.js +277 -0
- package/handlers/responsesHandler.js +69 -0
- package/index.js +69 -0
- package/package.json +44 -0
- package/services/accountFallback.js +148 -0
- package/services/combo.js +69 -0
- package/services/compact.js +64 -0
- package/services/model.js +109 -0
- package/services/provider.js +237 -0
- package/services/tokenRefresh.js +542 -0
- package/services/usage.js +398 -0
- package/translator/formats.js +12 -0
- package/translator/from-openai/claude.js +341 -0
- package/translator/from-openai/gemini.js +469 -0
- package/translator/from-openai/openai-responses.js +361 -0
- package/translator/helpers/claudeHelper.js +179 -0
- package/translator/helpers/geminiHelper.js +131 -0
- package/translator/helpers/openaiHelper.js +80 -0
- package/translator/helpers/responsesApiHelper.js +103 -0
- package/translator/helpers/toolCallHelper.js +111 -0
- package/translator/index.js +167 -0
- package/translator/to-openai/claude.js +238 -0
- package/translator/to-openai/gemini.js +151 -0
- package/translator/to-openai/openai-responses.js +140 -0
- package/translator/to-openai/openai.js +371 -0
- package/utils/bypassHandler.js +258 -0
- package/utils/error.js +133 -0
- package/utils/ollamaTransform.js +82 -0
- package/utils/requestLogger.js +217 -0
- package/utils/stream.js +274 -0
- package/utils/streamHandler.js +131 -0
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Translator: OpenAI Chat Completions → OpenAI Responses API (response)
|
|
3
|
+
* Converts streaming chunks from Chat Completions to Responses API events
|
|
4
|
+
*/
|
|
5
|
+
import { register } from "../index.js";
|
|
6
|
+
import { FORMATS } from "../formats.js";
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Translate OpenAI chunk to Responses API events
|
|
10
|
+
* @returns {Array} Array of events with { event, data } structure
|
|
11
|
+
*/
|
|
12
|
+
function translateResponse(chunk, state) {
|
|
13
|
+
if (!chunk) {
|
|
14
|
+
// Flush remaining events
|
|
15
|
+
return flushEvents(state);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
if (!chunk.choices?.length) return [];
|
|
19
|
+
|
|
20
|
+
const events = [];
|
|
21
|
+
const nextSeq = () => ++state.seq;
|
|
22
|
+
|
|
23
|
+
const emit = (eventType, data) => {
|
|
24
|
+
data.sequence_number = nextSeq();
|
|
25
|
+
events.push({ event: eventType, data });
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
const choice = chunk.choices[0];
|
|
29
|
+
const idx = choice.index || 0;
|
|
30
|
+
const delta = choice.delta || {};
|
|
31
|
+
|
|
32
|
+
// Emit initial events
|
|
33
|
+
if (!state.started) {
|
|
34
|
+
state.started = true;
|
|
35
|
+
state.responseId = chunk.id ? `resp_${chunk.id}` : state.responseId;
|
|
36
|
+
|
|
37
|
+
emit("response.created", {
|
|
38
|
+
type: "response.created",
|
|
39
|
+
response: {
|
|
40
|
+
id: state.responseId,
|
|
41
|
+
object: "response",
|
|
42
|
+
created_at: state.created,
|
|
43
|
+
status: "in_progress",
|
|
44
|
+
background: false,
|
|
45
|
+
error: null,
|
|
46
|
+
output: []
|
|
47
|
+
}
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
emit("response.in_progress", {
|
|
51
|
+
type: "response.in_progress",
|
|
52
|
+
response: {
|
|
53
|
+
id: state.responseId,
|
|
54
|
+
object: "response",
|
|
55
|
+
created_at: state.created,
|
|
56
|
+
status: "in_progress"
|
|
57
|
+
}
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// Handle reasoning_content
|
|
62
|
+
if (delta.reasoning_content) {
|
|
63
|
+
startReasoning(state, emit, idx);
|
|
64
|
+
emitReasoningDelta(state, emit, delta.reasoning_content);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Handle text content
|
|
68
|
+
if (delta.content) {
|
|
69
|
+
let content = delta.content;
|
|
70
|
+
|
|
71
|
+
if (content.includes("<think>")) {
|
|
72
|
+
state.inThinking = true;
|
|
73
|
+
content = content.replace("<think>", "");
|
|
74
|
+
startReasoning(state, emit, idx);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
if (content.includes("</think>")) {
|
|
78
|
+
const parts = content.split("</think>");
|
|
79
|
+
const thinkPart = parts[0];
|
|
80
|
+
const textPart = parts.slice(1).join("</think>");
|
|
81
|
+
if (thinkPart) emitReasoningDelta(state, emit, thinkPart);
|
|
82
|
+
closeReasoning(state, emit);
|
|
83
|
+
state.inThinking = false;
|
|
84
|
+
content = textPart;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
if (state.inThinking && content) {
|
|
88
|
+
emitReasoningDelta(state, emit, content);
|
|
89
|
+
return events;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (content) {
|
|
93
|
+
emitTextContent(state, emit, idx, content);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Handle tool_calls
|
|
98
|
+
if (delta.tool_calls) {
|
|
99
|
+
closeMessage(state, emit, idx);
|
|
100
|
+
for (const tc of delta.tool_calls) {
|
|
101
|
+
emitToolCall(state, emit, tc);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Handle finish_reason
|
|
106
|
+
if (choice.finish_reason) {
|
|
107
|
+
for (const i in state.msgItemAdded) closeMessage(state, emit, i);
|
|
108
|
+
closeReasoning(state, emit);
|
|
109
|
+
for (const i in state.funcCallIds) closeToolCall(state, emit, i);
|
|
110
|
+
sendCompleted(state, emit);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return events;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Helper functions
|
|
117
|
+
function startReasoning(state, emit, idx) {
|
|
118
|
+
if (!state.reasoningId) {
|
|
119
|
+
state.reasoningId = `rs_${state.responseId}_${idx}`;
|
|
120
|
+
state.reasoningIndex = idx;
|
|
121
|
+
|
|
122
|
+
emit("response.output_item.added", {
|
|
123
|
+
type: "response.output_item.added",
|
|
124
|
+
output_index: idx,
|
|
125
|
+
item: { id: state.reasoningId, type: "reasoning", summary: [] }
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
emit("response.reasoning_summary_part.added", {
|
|
129
|
+
type: "response.reasoning_summary_part.added",
|
|
130
|
+
item_id: state.reasoningId,
|
|
131
|
+
output_index: idx,
|
|
132
|
+
summary_index: 0,
|
|
133
|
+
part: { type: "summary_text", text: "" }
|
|
134
|
+
});
|
|
135
|
+
state.reasoningPartAdded = true;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
function emitReasoningDelta(state, emit, text) {
|
|
140
|
+
if (!text) return;
|
|
141
|
+
state.reasoningBuf += text;
|
|
142
|
+
emit("response.reasoning_summary_text.delta", {
|
|
143
|
+
type: "response.reasoning_summary_text.delta",
|
|
144
|
+
item_id: state.reasoningId,
|
|
145
|
+
output_index: state.reasoningIndex,
|
|
146
|
+
summary_index: 0,
|
|
147
|
+
delta: text
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
function closeReasoning(state, emit) {
|
|
152
|
+
if (state.reasoningId && !state.reasoningDone) {
|
|
153
|
+
state.reasoningDone = true;
|
|
154
|
+
|
|
155
|
+
emit("response.reasoning_summary_text.done", {
|
|
156
|
+
type: "response.reasoning_summary_text.done",
|
|
157
|
+
item_id: state.reasoningId,
|
|
158
|
+
output_index: state.reasoningIndex,
|
|
159
|
+
summary_index: 0,
|
|
160
|
+
text: state.reasoningBuf
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
emit("response.reasoning_summary_part.done", {
|
|
164
|
+
type: "response.reasoning_summary_part.done",
|
|
165
|
+
item_id: state.reasoningId,
|
|
166
|
+
output_index: state.reasoningIndex,
|
|
167
|
+
summary_index: 0,
|
|
168
|
+
part: { type: "summary_text", text: state.reasoningBuf }
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
emit("response.output_item.done", {
|
|
172
|
+
type: "response.output_item.done",
|
|
173
|
+
output_index: state.reasoningIndex,
|
|
174
|
+
item: {
|
|
175
|
+
id: state.reasoningId,
|
|
176
|
+
type: "reasoning",
|
|
177
|
+
summary: [{ type: "summary_text", text: state.reasoningBuf }]
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
function emitTextContent(state, emit, idx, content) {
|
|
184
|
+
if (!state.msgItemAdded[idx]) {
|
|
185
|
+
state.msgItemAdded[idx] = true;
|
|
186
|
+
const msgId = `msg_${state.responseId}_${idx}`;
|
|
187
|
+
|
|
188
|
+
emit("response.output_item.added", {
|
|
189
|
+
type: "response.output_item.added",
|
|
190
|
+
output_index: idx,
|
|
191
|
+
item: { id: msgId, type: "message", content: [], role: "assistant" }
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
if (!state.msgContentAdded[idx]) {
|
|
196
|
+
state.msgContentAdded[idx] = true;
|
|
197
|
+
|
|
198
|
+
emit("response.content_part.added", {
|
|
199
|
+
type: "response.content_part.added",
|
|
200
|
+
item_id: `msg_${state.responseId}_${idx}`,
|
|
201
|
+
output_index: idx,
|
|
202
|
+
content_index: 0,
|
|
203
|
+
part: { type: "output_text", annotations: [], logprobs: [], text: "" }
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
emit("response.output_text.delta", {
|
|
208
|
+
type: "response.output_text.delta",
|
|
209
|
+
item_id: `msg_${state.responseId}_${idx}`,
|
|
210
|
+
output_index: idx,
|
|
211
|
+
content_index: 0,
|
|
212
|
+
delta: content,
|
|
213
|
+
logprobs: []
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
if (!state.msgTextBuf[idx]) state.msgTextBuf[idx] = "";
|
|
217
|
+
state.msgTextBuf[idx] += content;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
function closeMessage(state, emit, idx) {
|
|
221
|
+
if (state.msgItemAdded[idx] && !state.msgItemDone[idx]) {
|
|
222
|
+
state.msgItemDone[idx] = true;
|
|
223
|
+
const fullText = state.msgTextBuf[idx] || "";
|
|
224
|
+
const msgId = `msg_${state.responseId}_${idx}`;
|
|
225
|
+
|
|
226
|
+
emit("response.output_text.done", {
|
|
227
|
+
type: "response.output_text.done",
|
|
228
|
+
item_id: msgId,
|
|
229
|
+
output_index: parseInt(idx),
|
|
230
|
+
content_index: 0,
|
|
231
|
+
text: fullText,
|
|
232
|
+
logprobs: []
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
emit("response.content_part.done", {
|
|
236
|
+
type: "response.content_part.done",
|
|
237
|
+
item_id: msgId,
|
|
238
|
+
output_index: parseInt(idx),
|
|
239
|
+
content_index: 0,
|
|
240
|
+
part: { type: "output_text", annotations: [], logprobs: [], text: fullText }
|
|
241
|
+
});
|
|
242
|
+
|
|
243
|
+
emit("response.output_item.done", {
|
|
244
|
+
type: "response.output_item.done",
|
|
245
|
+
output_index: parseInt(idx),
|
|
246
|
+
item: {
|
|
247
|
+
id: msgId,
|
|
248
|
+
type: "message",
|
|
249
|
+
content: [{ type: "output_text", annotations: [], logprobs: [], text: fullText }],
|
|
250
|
+
role: "assistant"
|
|
251
|
+
}
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
function emitToolCall(state, emit, tc) {
|
|
257
|
+
const tcIdx = tc.index ?? 0;
|
|
258
|
+
const newCallId = tc.id;
|
|
259
|
+
const funcName = tc.function?.name;
|
|
260
|
+
|
|
261
|
+
if (funcName) state.funcNames[tcIdx] = funcName;
|
|
262
|
+
|
|
263
|
+
if (!state.funcCallIds[tcIdx] && newCallId) {
|
|
264
|
+
state.funcCallIds[tcIdx] = newCallId;
|
|
265
|
+
|
|
266
|
+
emit("response.output_item.added", {
|
|
267
|
+
type: "response.output_item.added",
|
|
268
|
+
output_index: tcIdx,
|
|
269
|
+
item: {
|
|
270
|
+
id: `fc_${newCallId}`,
|
|
271
|
+
type: "function_call",
|
|
272
|
+
arguments: "",
|
|
273
|
+
call_id: newCallId,
|
|
274
|
+
name: state.funcNames[tcIdx] || ""
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
if (!state.funcArgsBuf[tcIdx]) state.funcArgsBuf[tcIdx] = "";
|
|
280
|
+
|
|
281
|
+
if (tc.function?.arguments) {
|
|
282
|
+
const refCallId = state.funcCallIds[tcIdx] || newCallId;
|
|
283
|
+
if (refCallId) {
|
|
284
|
+
emit("response.function_call_arguments.delta", {
|
|
285
|
+
type: "response.function_call_arguments.delta",
|
|
286
|
+
item_id: `fc_${refCallId}`,
|
|
287
|
+
output_index: tcIdx,
|
|
288
|
+
delta: tc.function.arguments
|
|
289
|
+
});
|
|
290
|
+
}
|
|
291
|
+
state.funcArgsBuf[tcIdx] += tc.function.arguments;
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
function closeToolCall(state, emit, idx) {
|
|
296
|
+
const callId = state.funcCallIds[idx];
|
|
297
|
+
if (callId && !state.funcItemDone[idx]) {
|
|
298
|
+
const args = state.funcArgsBuf[idx] || "{}";
|
|
299
|
+
|
|
300
|
+
emit("response.function_call_arguments.done", {
|
|
301
|
+
type: "response.function_call_arguments.done",
|
|
302
|
+
item_id: `fc_${callId}`,
|
|
303
|
+
output_index: parseInt(idx),
|
|
304
|
+
arguments: args
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
emit("response.output_item.done", {
|
|
308
|
+
type: "response.output_item.done",
|
|
309
|
+
output_index: parseInt(idx),
|
|
310
|
+
item: {
|
|
311
|
+
id: `fc_${callId}`,
|
|
312
|
+
type: "function_call",
|
|
313
|
+
arguments: args,
|
|
314
|
+
call_id: callId,
|
|
315
|
+
name: state.funcNames[idx] || ""
|
|
316
|
+
}
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
state.funcItemDone[idx] = true;
|
|
320
|
+
state.funcArgsDone[idx] = true;
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
function sendCompleted(state, emit) {
|
|
325
|
+
if (!state.completedSent) {
|
|
326
|
+
state.completedSent = true;
|
|
327
|
+
emit("response.completed", {
|
|
328
|
+
type: "response.completed",
|
|
329
|
+
response: {
|
|
330
|
+
id: state.responseId,
|
|
331
|
+
object: "response",
|
|
332
|
+
created_at: state.created,
|
|
333
|
+
status: "completed",
|
|
334
|
+
background: false,
|
|
335
|
+
error: null
|
|
336
|
+
}
|
|
337
|
+
});
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
function flushEvents(state) {
|
|
342
|
+
if (state.completedSent) return [];
|
|
343
|
+
|
|
344
|
+
const events = [];
|
|
345
|
+
const nextSeq = () => ++state.seq;
|
|
346
|
+
const emit = (eventType, data) => {
|
|
347
|
+
data.sequence_number = nextSeq();
|
|
348
|
+
events.push({ event: eventType, data });
|
|
349
|
+
};
|
|
350
|
+
|
|
351
|
+
for (const i in state.msgItemAdded) closeMessage(state, emit, i);
|
|
352
|
+
closeReasoning(state, emit);
|
|
353
|
+
for (const i in state.funcCallIds) closeToolCall(state, emit, i);
|
|
354
|
+
sendCompleted(state, emit);
|
|
355
|
+
|
|
356
|
+
return events;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
// Register translator
|
|
360
|
+
register(FORMATS.OPENAI, FORMATS.OPENAI_RESPONSES, null, translateResponse);
|
|
361
|
+
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
// Claude helper functions for translator
|
|
2
|
+
import { DEFAULT_THINKING_CLAUDE_SIGNATURE } from "../../config/defaultThinkingSignature.js";
|
|
3
|
+
|
|
4
|
+
// Check if message has valid non-empty content
|
|
5
|
+
export function hasValidContent(msg) {
|
|
6
|
+
if (typeof msg.content === "string" && msg.content.trim()) return true;
|
|
7
|
+
if (Array.isArray(msg.content)) {
|
|
8
|
+
return msg.content.some(block =>
|
|
9
|
+
(block.type === "text" && block.text?.trim()) ||
|
|
10
|
+
block.type === "tool_use" ||
|
|
11
|
+
block.type === "tool_result"
|
|
12
|
+
);
|
|
13
|
+
}
|
|
14
|
+
return false;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
// Fix tool_use/tool_result ordering for Claude API
|
|
18
|
+
// 1. Assistant message with tool_use: remove text AFTER tool_use (Claude doesn't allow)
|
|
19
|
+
// 2. Merge consecutive same-role messages
|
|
20
|
+
export function fixToolUseOrdering(messages) {
|
|
21
|
+
if (messages.length <= 1) return messages;
|
|
22
|
+
|
|
23
|
+
// Pass 1: Fix assistant messages with tool_use - remove text after tool_use
|
|
24
|
+
for (const msg of messages) {
|
|
25
|
+
if (msg.role === "assistant" && Array.isArray(msg.content)) {
|
|
26
|
+
const hasToolUse = msg.content.some(b => b.type === "tool_use");
|
|
27
|
+
if (hasToolUse) {
|
|
28
|
+
// Keep only: thinking blocks + tool_use blocks (remove text blocks after tool_use)
|
|
29
|
+
const newContent = [];
|
|
30
|
+
let foundToolUse = false;
|
|
31
|
+
|
|
32
|
+
for (const block of msg.content) {
|
|
33
|
+
if (block.type === "tool_use") {
|
|
34
|
+
foundToolUse = true;
|
|
35
|
+
newContent.push(block);
|
|
36
|
+
} else if (block.type === "thinking" || block.type === "redacted_thinking") {
|
|
37
|
+
newContent.push(block);
|
|
38
|
+
} else if (!foundToolUse) {
|
|
39
|
+
// Keep text blocks BEFORE tool_use
|
|
40
|
+
newContent.push(block);
|
|
41
|
+
}
|
|
42
|
+
// Skip text blocks AFTER tool_use
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
msg.content = newContent;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Pass 2: Merge consecutive same-role messages
|
|
51
|
+
const merged = [];
|
|
52
|
+
|
|
53
|
+
for (const msg of messages) {
|
|
54
|
+
const last = merged[merged.length - 1];
|
|
55
|
+
|
|
56
|
+
if (last && last.role === msg.role) {
|
|
57
|
+
// Merge content arrays
|
|
58
|
+
const lastContent = Array.isArray(last.content) ? last.content : [{ type: "text", text: last.content }];
|
|
59
|
+
const msgContent = Array.isArray(msg.content) ? msg.content : [{ type: "text", text: msg.content }];
|
|
60
|
+
|
|
61
|
+
// Put tool_result first, then other content
|
|
62
|
+
const toolResults = [...lastContent.filter(b => b.type === "tool_result"), ...msgContent.filter(b => b.type === "tool_result")];
|
|
63
|
+
const otherContent = [...lastContent.filter(b => b.type !== "tool_result"), ...msgContent.filter(b => b.type !== "tool_result")];
|
|
64
|
+
|
|
65
|
+
last.content = [...toolResults, ...otherContent];
|
|
66
|
+
} else {
|
|
67
|
+
// Ensure content is array
|
|
68
|
+
const content = Array.isArray(msg.content) ? msg.content : [{ type: "text", text: msg.content }];
|
|
69
|
+
merged.push({ role: msg.role, content: [...content] });
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return merged;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// Prepare request for Claude format endpoints
|
|
77
|
+
// - Cleanup cache_control
|
|
78
|
+
// - Filter empty messages
|
|
79
|
+
// - Add thinking block for Anthropic endpoint (provider === "claude")
|
|
80
|
+
// - Fix tool_use/tool_result ordering
|
|
81
|
+
export function prepareClaudeRequest(body, provider = null) {
|
|
82
|
+
// 1. System: remove all cache_control, add only to last block with ttl 1h
|
|
83
|
+
if (body.system && Array.isArray(body.system)) {
|
|
84
|
+
body.system = body.system.map((block, i) => {
|
|
85
|
+
const { cache_control, ...rest } = block;
|
|
86
|
+
if (i === body.system.length - 1) {
|
|
87
|
+
return { ...rest, cache_control: { type: "ephemeral", ttl: "1h" } };
|
|
88
|
+
}
|
|
89
|
+
return rest;
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// 2. Messages: process in optimized passes
|
|
94
|
+
if (body.messages && Array.isArray(body.messages)) {
|
|
95
|
+
const len = body.messages.length;
|
|
96
|
+
let filtered = [];
|
|
97
|
+
|
|
98
|
+
// Pass 1: remove cache_control + filter empty messages
|
|
99
|
+
for (let i = 0; i < len; i++) {
|
|
100
|
+
const msg = body.messages[i];
|
|
101
|
+
|
|
102
|
+
// Remove cache_control from content blocks
|
|
103
|
+
if (Array.isArray(msg.content)) {
|
|
104
|
+
for (const block of msg.content) {
|
|
105
|
+
delete block.cache_control;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Keep final assistant even if empty, otherwise check valid content
|
|
110
|
+
const isFinalAssistant = i === len - 1 && msg.role === "assistant";
|
|
111
|
+
if (isFinalAssistant || hasValidContent(msg)) {
|
|
112
|
+
filtered.push(msg);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Pass 1.5: Fix tool_use/tool_result ordering
|
|
117
|
+
// Each tool_use must have tool_result in the NEXT message (not same message with other content)
|
|
118
|
+
filtered = fixToolUseOrdering(filtered);
|
|
119
|
+
|
|
120
|
+
body.messages = filtered;
|
|
121
|
+
|
|
122
|
+
// Check if thinking is enabled AND last message is from user
|
|
123
|
+
const lastMessage = filtered[filtered.length - 1];
|
|
124
|
+
const lastMessageIsUser = lastMessage?.role === "user";
|
|
125
|
+
const thinkingEnabled = body.thinking?.type === "enabled" && lastMessageIsUser;
|
|
126
|
+
|
|
127
|
+
// Pass 2 (reverse): add cache_control to last assistant + handle thinking for Anthropic
|
|
128
|
+
let lastAssistantProcessed = false;
|
|
129
|
+
for (let i = filtered.length - 1; i >= 0; i--) {
|
|
130
|
+
const msg = filtered[i];
|
|
131
|
+
|
|
132
|
+
if (msg.role === "assistant" && Array.isArray(msg.content)) {
|
|
133
|
+
// Add cache_control to last block of first (from end) assistant with content
|
|
134
|
+
if (!lastAssistantProcessed && msg.content.length > 0) {
|
|
135
|
+
msg.content[msg.content.length - 1].cache_control = { type: "ephemeral" };
|
|
136
|
+
lastAssistantProcessed = true;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Handle thinking blocks for Anthropic endpoint only
|
|
140
|
+
if (provider === "claude") {
|
|
141
|
+
let hasToolUse = false;
|
|
142
|
+
let hasThinking = false;
|
|
143
|
+
|
|
144
|
+
// Always replace signature for all thinking blocks
|
|
145
|
+
for (const block of msg.content) {
|
|
146
|
+
if (block.type === "thinking" || block.type === "redacted_thinking") {
|
|
147
|
+
block.signature = DEFAULT_THINKING_CLAUDE_SIGNATURE;
|
|
148
|
+
hasThinking = true;
|
|
149
|
+
}
|
|
150
|
+
if (block.type === "tool_use") hasToolUse = true;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Add thinking block if thinking enabled + has tool_use but no thinking
|
|
154
|
+
if (thinkingEnabled && !hasThinking && hasToolUse) {
|
|
155
|
+
msg.content.unshift({
|
|
156
|
+
type: "thinking",
|
|
157
|
+
thinking: ".",
|
|
158
|
+
signature: DEFAULT_THINKING_CLAUDE_SIGNATURE
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// 3. Tools: remove all cache_control, add only to last tool with ttl 1h
|
|
167
|
+
if (body.tools && Array.isArray(body.tools)) {
|
|
168
|
+
body.tools = body.tools.map((tool, i) => {
|
|
169
|
+
const { cache_control, ...rest } = tool;
|
|
170
|
+
if (i === body.tools.length - 1) {
|
|
171
|
+
return { ...rest, cache_control: { type: "ephemeral", ttl: "1h" } };
|
|
172
|
+
}
|
|
173
|
+
return rest;
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
return body;
|
|
178
|
+
}
|
|
179
|
+
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
// Gemini helper functions for translator
|
|
2
|
+
|
|
3
|
+
// Unsupported JSON Schema constraints that should be removed for Antigravity
|
|
4
|
+
export const UNSUPPORTED_SCHEMA_CONSTRAINTS = [
|
|
5
|
+
"minLength", "maxLength", "exclusiveMinimum", "exclusiveMaximum",
|
|
6
|
+
"pattern", "minItems", "maxItems", "format",
|
|
7
|
+
"default", "examples", "$schema", "const"
|
|
8
|
+
];
|
|
9
|
+
|
|
10
|
+
// Default safety settings
|
|
11
|
+
export const DEFAULT_SAFETY_SETTINGS = [
|
|
12
|
+
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "OFF" },
|
|
13
|
+
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "OFF" },
|
|
14
|
+
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "OFF" },
|
|
15
|
+
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "OFF" },
|
|
16
|
+
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "OFF" }
|
|
17
|
+
];
|
|
18
|
+
|
|
19
|
+
// Convert OpenAI content to Gemini parts
|
|
20
|
+
export function convertOpenAIContentToParts(content) {
|
|
21
|
+
const parts = [];
|
|
22
|
+
|
|
23
|
+
if (typeof content === "string") {
|
|
24
|
+
parts.push({ text: content });
|
|
25
|
+
} else if (Array.isArray(content)) {
|
|
26
|
+
for (const item of content) {
|
|
27
|
+
if (item.type === "text") {
|
|
28
|
+
parts.push({ text: item.text });
|
|
29
|
+
} else if (item.type === "image_url" && item.image_url?.url?.startsWith("data:")) {
|
|
30
|
+
const match = item.image_url.url.match(/^data:([^;]+);base64,(.+)$/);
|
|
31
|
+
if (match) {
|
|
32
|
+
parts.push({
|
|
33
|
+
inlineData: { mime_type: match[1], data: match[2] }
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
return parts;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// Extract text content from OpenAI content
|
|
44
|
+
export function extractTextContent(content) {
|
|
45
|
+
if (typeof content === "string") return content;
|
|
46
|
+
if (Array.isArray(content)) {
|
|
47
|
+
return content.filter(c => c.type === "text").map(c => c.text).join("");
|
|
48
|
+
}
|
|
49
|
+
return "";
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Try parse JSON safely
|
|
53
|
+
export function tryParseJSON(str) {
|
|
54
|
+
if (typeof str !== "string") return str;
|
|
55
|
+
try {
|
|
56
|
+
return JSON.parse(str);
|
|
57
|
+
} catch {
|
|
58
|
+
return null;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Generate request ID
|
|
63
|
+
export function generateRequestId() {
|
|
64
|
+
return `agent-${crypto.randomUUID()}`;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Generate session ID
|
|
68
|
+
export function generateSessionId() {
|
|
69
|
+
return `-${Math.floor(Math.random() * 9000000000000000000)}`;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Generate project ID
|
|
73
|
+
export function generateProjectId() {
|
|
74
|
+
const adjectives = ["useful", "bright", "swift", "calm", "bold"];
|
|
75
|
+
const nouns = ["fuze", "wave", "spark", "flow", "core"];
|
|
76
|
+
const adj = adjectives[Math.floor(Math.random() * adjectives.length)];
|
|
77
|
+
const noun = nouns[Math.floor(Math.random() * nouns.length)];
|
|
78
|
+
return `${adj}-${noun}-${crypto.randomUUID().slice(0, 5)}`;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// Clean JSON Schema for Antigravity API compatibility - removes unsupported keywords recursively
|
|
82
|
+
export function cleanJSONSchemaForAntigravity(schema) {
|
|
83
|
+
if (!schema || typeof schema !== "object") return schema;
|
|
84
|
+
|
|
85
|
+
const cleaned = Array.isArray(schema) ? [] : {};
|
|
86
|
+
|
|
87
|
+
for (const [key, value] of Object.entries(schema)) {
|
|
88
|
+
if (UNSUPPORTED_SCHEMA_CONSTRAINTS.includes(key)) continue;
|
|
89
|
+
|
|
90
|
+
// Handle type array like ["string", "null"] - Gemini only supports single type
|
|
91
|
+
if (key === "type" && Array.isArray(value)) {
|
|
92
|
+
const nonNullType = value.find(t => t !== "null") || "string";
|
|
93
|
+
cleaned[key] = nonNullType;
|
|
94
|
+
continue;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
if (value && typeof value === "object") {
|
|
98
|
+
cleaned[key] = cleanJSONSchemaForAntigravity(value);
|
|
99
|
+
} else {
|
|
100
|
+
cleaned[key] = value;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// Cleanup required fields - only keep fields that exist in properties
|
|
105
|
+
if (cleaned.required && Array.isArray(cleaned.required) && cleaned.properties) {
|
|
106
|
+
const validRequired = cleaned.required.filter(field =>
|
|
107
|
+
Object.prototype.hasOwnProperty.call(cleaned.properties, field)
|
|
108
|
+
);
|
|
109
|
+
if (validRequired.length === 0) {
|
|
110
|
+
delete cleaned.required;
|
|
111
|
+
} else {
|
|
112
|
+
cleaned.required = validRequired;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Add placeholder for empty object schemas (Antigravity requirement)
|
|
117
|
+
if (cleaned.type === "object") {
|
|
118
|
+
if (!cleaned.properties || Object.keys(cleaned.properties).length === 0) {
|
|
119
|
+
cleaned.properties = {
|
|
120
|
+
reason: {
|
|
121
|
+
type: "string",
|
|
122
|
+
description: "Brief explanation of why you are calling this tool"
|
|
123
|
+
}
|
|
124
|
+
};
|
|
125
|
+
cleaned.required = ["reason"];
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
return cleaned;
|
|
130
|
+
}
|
|
131
|
+
|