open-sse 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +180 -0
- package/config/constants.js +206 -0
- package/config/defaultThinkingSignature.js +7 -0
- package/config/ollamaModels.js +19 -0
- package/config/providerModels.js +161 -0
- package/handlers/chatCore.js +277 -0
- package/handlers/responsesHandler.js +69 -0
- package/index.js +69 -0
- package/package.json +44 -0
- package/services/accountFallback.js +148 -0
- package/services/combo.js +69 -0
- package/services/compact.js +64 -0
- package/services/model.js +109 -0
- package/services/provider.js +237 -0
- package/services/tokenRefresh.js +542 -0
- package/services/usage.js +398 -0
- package/translator/formats.js +12 -0
- package/translator/from-openai/claude.js +341 -0
- package/translator/from-openai/gemini.js +469 -0
- package/translator/from-openai/openai-responses.js +361 -0
- package/translator/helpers/claudeHelper.js +179 -0
- package/translator/helpers/geminiHelper.js +131 -0
- package/translator/helpers/openaiHelper.js +80 -0
- package/translator/helpers/responsesApiHelper.js +103 -0
- package/translator/helpers/toolCallHelper.js +111 -0
- package/translator/index.js +167 -0
- package/translator/to-openai/claude.js +238 -0
- package/translator/to-openai/gemini.js +151 -0
- package/translator/to-openai/openai-responses.js +140 -0
- package/translator/to-openai/openai.js +371 -0
- package/utils/bypassHandler.js +258 -0
- package/utils/error.js +133 -0
- package/utils/ollamaTransform.js +82 -0
- package/utils/requestLogger.js +217 -0
- package/utils/stream.js +274 -0
- package/utils/streamHandler.js +131 -0
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import { register } from "../index.js";
|
|
2
|
+
import { FORMATS } from "../formats.js";
|
|
3
|
+
|
|
4
|
+
// Convert Gemini request to OpenAI format
|
|
5
|
+
function geminiToOpenAI(model, body, stream) {
|
|
6
|
+
const result = {
|
|
7
|
+
model: model,
|
|
8
|
+
messages: [],
|
|
9
|
+
stream: stream
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
// Generation config
|
|
13
|
+
if (body.generationConfig) {
|
|
14
|
+
const config = body.generationConfig;
|
|
15
|
+
if (config.maxOutputTokens) {
|
|
16
|
+
result.max_tokens = config.maxOutputTokens;
|
|
17
|
+
}
|
|
18
|
+
if (config.temperature !== undefined) {
|
|
19
|
+
result.temperature = config.temperature;
|
|
20
|
+
}
|
|
21
|
+
if (config.topP !== undefined) {
|
|
22
|
+
result.top_p = config.topP;
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// System instruction
|
|
27
|
+
if (body.systemInstruction) {
|
|
28
|
+
const systemText = extractGeminiText(body.systemInstruction);
|
|
29
|
+
if (systemText) {
|
|
30
|
+
result.messages.push({
|
|
31
|
+
role: "system",
|
|
32
|
+
content: systemText
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Convert contents to messages
|
|
38
|
+
if (body.contents && Array.isArray(body.contents)) {
|
|
39
|
+
for (const content of body.contents) {
|
|
40
|
+
const converted = convertGeminiContent(content);
|
|
41
|
+
if (converted) {
|
|
42
|
+
result.messages.push(converted);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Tools
|
|
48
|
+
if (body.tools && Array.isArray(body.tools)) {
|
|
49
|
+
result.tools = [];
|
|
50
|
+
for (const tool of body.tools) {
|
|
51
|
+
if (tool.functionDeclarations) {
|
|
52
|
+
for (const func of tool.functionDeclarations) {
|
|
53
|
+
result.tools.push({
|
|
54
|
+
type: "function",
|
|
55
|
+
function: {
|
|
56
|
+
name: func.name,
|
|
57
|
+
description: func.description || "",
|
|
58
|
+
parameters: func.parameters || { type: "object", properties: {} }
|
|
59
|
+
}
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return result;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Convert Gemini content to OpenAI message
|
|
70
|
+
function convertGeminiContent(content) {
|
|
71
|
+
const role = content.role === "user" ? "user" : "assistant";
|
|
72
|
+
|
|
73
|
+
if (!content.parts || !Array.isArray(content.parts)) {
|
|
74
|
+
return null;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const parts = [];
|
|
78
|
+
const toolCalls = [];
|
|
79
|
+
|
|
80
|
+
for (const part of content.parts) {
|
|
81
|
+
// Text
|
|
82
|
+
if (part.text !== undefined) {
|
|
83
|
+
parts.push({ type: "text", text: part.text });
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Image
|
|
87
|
+
if (part.inlineData) {
|
|
88
|
+
parts.push({
|
|
89
|
+
type: "image_url",
|
|
90
|
+
image_url: {
|
|
91
|
+
url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`
|
|
92
|
+
}
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Function call
|
|
97
|
+
if (part.functionCall) {
|
|
98
|
+
toolCalls.push({
|
|
99
|
+
id: `call_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
|
|
100
|
+
type: "function",
|
|
101
|
+
function: {
|
|
102
|
+
name: part.functionCall.name,
|
|
103
|
+
arguments: JSON.stringify(part.functionCall.args || {})
|
|
104
|
+
}
|
|
105
|
+
});
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Function response - use id if available, fallback to name
|
|
109
|
+
if (part.functionResponse) {
|
|
110
|
+
return {
|
|
111
|
+
role: "tool",
|
|
112
|
+
tool_call_id: part.functionResponse.id || part.functionResponse.name,
|
|
113
|
+
content: JSON.stringify(part.functionResponse.response?.result || part.functionResponse.response || {})
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Has tool calls
|
|
119
|
+
if (toolCalls.length > 0) {
|
|
120
|
+
const result = { role: "assistant" };
|
|
121
|
+
if (parts.length > 0) {
|
|
122
|
+
result.content = parts.length === 1 ? parts[0].text : parts;
|
|
123
|
+
}
|
|
124
|
+
result.tool_calls = toolCalls;
|
|
125
|
+
return result;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Regular message
|
|
129
|
+
if (parts.length > 0) {
|
|
130
|
+
return {
|
|
131
|
+
role,
|
|
132
|
+
content: parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
return null;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Extract text from Gemini content
|
|
140
|
+
function extractGeminiText(content) {
|
|
141
|
+
if (typeof content === "string") return content;
|
|
142
|
+
if (content.parts && Array.isArray(content.parts)) {
|
|
143
|
+
return content.parts.map(p => p.text || "").join("");
|
|
144
|
+
}
|
|
145
|
+
return "";
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Register
|
|
149
|
+
register(FORMATS.GEMINI, FORMATS.OPENAI, geminiToOpenAI, null);
|
|
150
|
+
register(FORMATS.GEMINI_CLI, FORMATS.OPENAI, geminiToOpenAI, null);
|
|
151
|
+
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Translator: OpenAI Responses API → OpenAI Chat Completions
|
|
3
|
+
*
|
|
4
|
+
* Responses API uses: { input: [...], instructions: "..." }
|
|
5
|
+
* Chat API uses: { messages: [...] }
|
|
6
|
+
*/
|
|
7
|
+
import { register } from "../index.js";
|
|
8
|
+
import { FORMATS } from "../formats.js";
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Convert OpenAI Responses API request to OpenAI Chat Completions format
|
|
12
|
+
*/
|
|
13
|
+
function translateRequest(model, body, stream, credentials) {
|
|
14
|
+
if (!body.input) return body;
|
|
15
|
+
|
|
16
|
+
const result = { ...body };
|
|
17
|
+
result.messages = [];
|
|
18
|
+
|
|
19
|
+
// Convert instructions to system message
|
|
20
|
+
if (body.instructions) {
|
|
21
|
+
result.messages.push({ role: "system", content: body.instructions });
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// Group items by conversation turn
|
|
25
|
+
let currentAssistantMsg = null;
|
|
26
|
+
let pendingToolResults = [];
|
|
27
|
+
|
|
28
|
+
for (const item of body.input) {
|
|
29
|
+
if (item.type === "message") {
|
|
30
|
+
// Flush any pending assistant message with tool calls
|
|
31
|
+
if (currentAssistantMsg) {
|
|
32
|
+
result.messages.push(currentAssistantMsg);
|
|
33
|
+
currentAssistantMsg = null;
|
|
34
|
+
}
|
|
35
|
+
// Flush pending tool results
|
|
36
|
+
if (pendingToolResults.length > 0) {
|
|
37
|
+
for (const tr of pendingToolResults) {
|
|
38
|
+
result.messages.push(tr);
|
|
39
|
+
}
|
|
40
|
+
pendingToolResults = [];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// Convert content: input_text → text, output_text → text
|
|
44
|
+
const content = Array.isArray(item.content)
|
|
45
|
+
? item.content.map(c => {
|
|
46
|
+
if (c.type === "input_text") return { type: "text", text: c.text };
|
|
47
|
+
if (c.type === "output_text") return { type: "text", text: c.text };
|
|
48
|
+
return c;
|
|
49
|
+
})
|
|
50
|
+
: item.content;
|
|
51
|
+
result.messages.push({ role: item.role, content });
|
|
52
|
+
}
|
|
53
|
+
else if (item.type === "function_call") {
|
|
54
|
+
// Start or append to assistant message with tool_calls
|
|
55
|
+
if (!currentAssistantMsg) {
|
|
56
|
+
currentAssistantMsg = {
|
|
57
|
+
role: "assistant",
|
|
58
|
+
content: null,
|
|
59
|
+
tool_calls: []
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
currentAssistantMsg.tool_calls.push({
|
|
63
|
+
id: item.call_id,
|
|
64
|
+
type: "function",
|
|
65
|
+
function: {
|
|
66
|
+
name: item.name,
|
|
67
|
+
arguments: item.arguments
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
else if (item.type === "function_call_output") {
|
|
72
|
+
// Flush assistant message first if exists
|
|
73
|
+
if (currentAssistantMsg) {
|
|
74
|
+
result.messages.push(currentAssistantMsg);
|
|
75
|
+
currentAssistantMsg = null;
|
|
76
|
+
}
|
|
77
|
+
// Flush any pending tool results first
|
|
78
|
+
if (pendingToolResults.length > 0) {
|
|
79
|
+
for (const tr of pendingToolResults) {
|
|
80
|
+
result.messages.push(tr);
|
|
81
|
+
}
|
|
82
|
+
pendingToolResults = [];
|
|
83
|
+
}
|
|
84
|
+
// Add tool result immediately (not pending)
|
|
85
|
+
result.messages.push({
|
|
86
|
+
role: "tool",
|
|
87
|
+
tool_call_id: item.call_id,
|
|
88
|
+
content: typeof item.output === "string" ? item.output : JSON.stringify(item.output)
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
else if (item.type === "reasoning") {
|
|
92
|
+
// Skip reasoning items - they are for display only
|
|
93
|
+
continue;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Flush remaining
|
|
98
|
+
if (currentAssistantMsg) {
|
|
99
|
+
result.messages.push(currentAssistantMsg);
|
|
100
|
+
}
|
|
101
|
+
if (pendingToolResults.length > 0) {
|
|
102
|
+
for (const tr of pendingToolResults) {
|
|
103
|
+
result.messages.push(tr);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Tools are already in OpenAI format, just keep them
|
|
108
|
+
// Responses API tools: { type: "function", name, description, parameters }
|
|
109
|
+
// OpenAI tools: { type: "function", function: { name, description, parameters } }
|
|
110
|
+
if (body.tools && Array.isArray(body.tools)) {
|
|
111
|
+
result.tools = body.tools.map(tool => {
|
|
112
|
+
// Already has function wrapper
|
|
113
|
+
if (tool.function) return tool;
|
|
114
|
+
// Responses API format: flatten to OpenAI format
|
|
115
|
+
return {
|
|
116
|
+
type: "function",
|
|
117
|
+
function: {
|
|
118
|
+
name: tool.name,
|
|
119
|
+
description: tool.description,
|
|
120
|
+
parameters: tool.parameters,
|
|
121
|
+
strict: tool.strict
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Cleanup Responses API specific fields
|
|
128
|
+
delete result.input;
|
|
129
|
+
delete result.instructions;
|
|
130
|
+
delete result.include;
|
|
131
|
+
delete result.prompt_cache_key;
|
|
132
|
+
delete result.store;
|
|
133
|
+
delete result.reasoning;
|
|
134
|
+
|
|
135
|
+
return result;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Register translator
|
|
139
|
+
register(FORMATS.OPENAI_RESPONSES, FORMATS.OPENAI, translateRequest, null);
|
|
140
|
+
|
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
import { register } from "../index.js";
|
|
2
|
+
import { FORMATS } from "../formats.js";
|
|
3
|
+
import { CLAUDE_SYSTEM_PROMPT, DEFAULT_MAX_TOKENS } from "../../config/constants.js";
|
|
4
|
+
|
|
5
|
+
// Convert OpenAI request to Claude format
|
|
6
|
+
function openaiToClaude(model, body, stream) {
|
|
7
|
+
const result = {
|
|
8
|
+
model: model,
|
|
9
|
+
max_tokens: body.max_tokens || DEFAULT_MAX_TOKENS,
|
|
10
|
+
stream: stream
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
// Temperature
|
|
14
|
+
if (body.temperature !== undefined) {
|
|
15
|
+
result.temperature = body.temperature;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
// Messages
|
|
19
|
+
result.messages = [];
|
|
20
|
+
const systemParts = [];
|
|
21
|
+
|
|
22
|
+
if (body.messages && Array.isArray(body.messages)) {
|
|
23
|
+
// Extract system messages
|
|
24
|
+
for (const msg of body.messages) {
|
|
25
|
+
if (msg.role === "system") {
|
|
26
|
+
systemParts.push(typeof msg.content === "string" ? msg.content : extractTextContent(msg.content));
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Filter out system messages for separate processing
|
|
31
|
+
const nonSystemMessages = body.messages.filter(m => m.role !== "system");
|
|
32
|
+
|
|
33
|
+
// Process messages with merging logic
|
|
34
|
+
// CRITICAL: tool_result must be in separate message immediately after tool_use
|
|
35
|
+
let currentRole = undefined;
|
|
36
|
+
let currentParts = [];
|
|
37
|
+
|
|
38
|
+
const flushCurrentMessage = () => {
|
|
39
|
+
if (currentRole && currentParts.length > 0) {
|
|
40
|
+
result.messages.push({ role: currentRole, content: currentParts });
|
|
41
|
+
currentParts = [];
|
|
42
|
+
}
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
for (const msg of nonSystemMessages) {
|
|
46
|
+
const newRole = (msg.role === "user" || msg.role === "tool") ? "user" : "assistant";
|
|
47
|
+
const blocks = getContentBlocksFromMessage(msg);
|
|
48
|
+
const hasToolUse = blocks.some(b => b.type === "tool_use");
|
|
49
|
+
const hasToolResult = blocks.some(b => b.type === "tool_result");
|
|
50
|
+
|
|
51
|
+
// Separate tool_result from other content
|
|
52
|
+
if (hasToolResult) {
|
|
53
|
+
const toolResultBlocks = blocks.filter(b => b.type === "tool_result");
|
|
54
|
+
const otherBlocks = blocks.filter(b => b.type !== "tool_result");
|
|
55
|
+
|
|
56
|
+
// Flush current message first
|
|
57
|
+
flushCurrentMessage();
|
|
58
|
+
|
|
59
|
+
// Add tool_result as separate user message
|
|
60
|
+
if (toolResultBlocks.length > 0) {
|
|
61
|
+
result.messages.push({ role: "user", content: toolResultBlocks });
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Add other blocks to current parts for next message
|
|
65
|
+
if (otherBlocks.length > 0) {
|
|
66
|
+
currentRole = newRole;
|
|
67
|
+
currentParts.push(...otherBlocks);
|
|
68
|
+
}
|
|
69
|
+
continue;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
if (currentRole !== newRole) {
|
|
73
|
+
flushCurrentMessage();
|
|
74
|
+
currentRole = newRole;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
currentParts.push(...blocks);
|
|
78
|
+
|
|
79
|
+
if (hasToolUse) {
|
|
80
|
+
flushCurrentMessage();
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
flushCurrentMessage();
|
|
85
|
+
|
|
86
|
+
// Add cache_control to last assistant message (like worker.old)
|
|
87
|
+
for (let i = result.messages.length - 1; i >= 0; i--) {
|
|
88
|
+
const message = result.messages[i];
|
|
89
|
+
if (message.role === "assistant" && Array.isArray(message.content) && message.content.length > 0) {
|
|
90
|
+
const lastBlock = message.content[message.content.length - 1];
|
|
91
|
+
if (lastBlock) {
|
|
92
|
+
lastBlock.cache_control = { type: "ephemeral" };
|
|
93
|
+
break;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// System with Claude Code prompt and cache_control
|
|
100
|
+
const claudeCodePrompt = { type: "text", text: CLAUDE_SYSTEM_PROMPT };
|
|
101
|
+
|
|
102
|
+
if (systemParts.length > 0) {
|
|
103
|
+
const systemText = systemParts.join("\n");
|
|
104
|
+
result.system = [
|
|
105
|
+
claudeCodePrompt,
|
|
106
|
+
{ type: "text", text: systemText, cache_control: { type: "ephemeral", ttl: "1h" } }
|
|
107
|
+
];
|
|
108
|
+
} else {
|
|
109
|
+
result.system = [claudeCodePrompt];
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Tools - convert from OpenAI format to Claude format
|
|
113
|
+
if (body.tools && Array.isArray(body.tools)) {
|
|
114
|
+
result.tools = body.tools.map(tool => {
|
|
115
|
+
// Handle both OpenAI format {type: "function", function: {...}} and direct format
|
|
116
|
+
const toolData = tool.type === "function" && tool.function ? tool.function : tool;
|
|
117
|
+
return {
|
|
118
|
+
name: toolData.name,
|
|
119
|
+
description: toolData.description || "",
|
|
120
|
+
input_schema: toolData.parameters || toolData.input_schema || { type: "object", properties: {}, required: [] }
|
|
121
|
+
};
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
// Add cache control to last tool (like worker.old)
|
|
125
|
+
if (result.tools.length > 0) {
|
|
126
|
+
result.tools[result.tools.length - 1].cache_control = { type: "ephemeral", ttl: "1h" };
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// console.log("[CLAUDE TOOLS DEBUG] Converted tools:", result.tools.map(t => t.name));
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// Tool choice
|
|
133
|
+
if (body.tool_choice) {
|
|
134
|
+
result.tool_choice = convertOpenAIToolChoice(body.tool_choice);
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return result;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Convert OpenAI request to Gemini format
|
|
141
|
+
function openaiToGemini(model, body, stream) {
|
|
142
|
+
const result = {
|
|
143
|
+
contents: [],
|
|
144
|
+
generationConfig: {}
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
// Generation config
|
|
148
|
+
if (body.max_tokens) {
|
|
149
|
+
result.generationConfig.maxOutputTokens = body.max_tokens;
|
|
150
|
+
}
|
|
151
|
+
if (body.temperature !== undefined) {
|
|
152
|
+
result.generationConfig.temperature = body.temperature;
|
|
153
|
+
}
|
|
154
|
+
if (body.top_p !== undefined) {
|
|
155
|
+
result.generationConfig.topP = body.top_p;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Messages
|
|
159
|
+
if (body.messages && Array.isArray(body.messages)) {
|
|
160
|
+
for (const msg of body.messages) {
|
|
161
|
+
if (msg.role === "system") {
|
|
162
|
+
result.systemInstruction = {
|
|
163
|
+
parts: [{ text: typeof msg.content === "string" ? msg.content : extractTextContent(msg.content) }]
|
|
164
|
+
};
|
|
165
|
+
} else if (msg.role === "tool") {
|
|
166
|
+
result.contents.push({
|
|
167
|
+
role: "function",
|
|
168
|
+
parts: [{
|
|
169
|
+
functionResponse: {
|
|
170
|
+
name: msg.tool_call_id,
|
|
171
|
+
response: tryParseJSON(msg.content)
|
|
172
|
+
}
|
|
173
|
+
}]
|
|
174
|
+
});
|
|
175
|
+
} else {
|
|
176
|
+
const converted = convertOpenAIToGeminiContent(msg);
|
|
177
|
+
if (converted) {
|
|
178
|
+
result.contents.push(converted);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// Tools
|
|
185
|
+
if (body.tools && Array.isArray(body.tools)) {
|
|
186
|
+
const validTools = body.tools.filter(tool => tool && tool.function && tool.function.name);
|
|
187
|
+
if (validTools.length > 0) {
|
|
188
|
+
result.tools = [{
|
|
189
|
+
functionDeclarations: validTools.map(tool => ({
|
|
190
|
+
name: tool.function.name,
|
|
191
|
+
description: tool.function.description || "",
|
|
192
|
+
parameters: tool.function.parameters || { type: "object", properties: {} }
|
|
193
|
+
}))
|
|
194
|
+
}];
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
return result;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// Get content blocks from single message (like src.cc getContentBlocksFromMessage)
|
|
202
|
+
function getContentBlocksFromMessage(msg) {
|
|
203
|
+
const blocks = [];
|
|
204
|
+
|
|
205
|
+
if (msg.role === "tool") {
|
|
206
|
+
blocks.push({
|
|
207
|
+
type: "tool_result",
|
|
208
|
+
tool_use_id: msg.tool_call_id,
|
|
209
|
+
content: msg.content
|
|
210
|
+
});
|
|
211
|
+
} else if (msg.role === "user") {
|
|
212
|
+
if (typeof msg.content === "string") {
|
|
213
|
+
if (msg.content) {
|
|
214
|
+
blocks.push({ type: "text", text: msg.content });
|
|
215
|
+
}
|
|
216
|
+
} else if (Array.isArray(msg.content)) {
|
|
217
|
+
for (const part of msg.content) {
|
|
218
|
+
if (part.type === "text" && part.text) {
|
|
219
|
+
blocks.push({ type: "text", text: part.text });
|
|
220
|
+
} else if (part.type === "tool_result") {
|
|
221
|
+
blocks.push({
|
|
222
|
+
type: "tool_result",
|
|
223
|
+
tool_use_id: part.tool_use_id,
|
|
224
|
+
content: part.content,
|
|
225
|
+
...(part.is_error && { is_error: part.is_error })
|
|
226
|
+
});
|
|
227
|
+
} else if (part.type === "image_url") {
|
|
228
|
+
const url = part.image_url.url;
|
|
229
|
+
const match = url.match(/^data:([^;]+);base64,(.+)$/);
|
|
230
|
+
if (match) {
|
|
231
|
+
blocks.push({
|
|
232
|
+
type: "image",
|
|
233
|
+
source: { type: "base64", media_type: match[1], data: match[2] }
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
} else if (part.type === "image" && part.source) {
|
|
237
|
+
blocks.push({ type: "image", source: part.source });
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
} else if (msg.role === "assistant") {
|
|
242
|
+
// Handle Anthropic format: content is array with tool_use blocks
|
|
243
|
+
if (Array.isArray(msg.content)) {
|
|
244
|
+
for (const part of msg.content) {
|
|
245
|
+
if (part.type === "text" && part.text) {
|
|
246
|
+
blocks.push({ type: "text", text: part.text });
|
|
247
|
+
} else if (part.type === "tool_use") {
|
|
248
|
+
blocks.push({ type: "tool_use", id: part.id, name: part.name, input: part.input });
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
} else if (msg.content) {
|
|
252
|
+
const text = typeof msg.content === "string" ? msg.content : extractTextContent(msg.content);
|
|
253
|
+
if (text) {
|
|
254
|
+
blocks.push({ type: "text", text });
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// Handle OpenAI format: tool_calls array
|
|
259
|
+
if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
|
|
260
|
+
for (const tc of msg.tool_calls) {
|
|
261
|
+
if (tc.type === "function") {
|
|
262
|
+
blocks.push({
|
|
263
|
+
type: "tool_use",
|
|
264
|
+
id: tc.id,
|
|
265
|
+
name: tc.function.name,
|
|
266
|
+
input: tryParseJSON(tc.function.arguments)
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
return blocks;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Convert single OpenAI message to Claude format (for backward compatibility)
|
|
277
|
+
function convertOpenAIMessage(msg) {
|
|
278
|
+
const role = msg.role === "assistant" ? "assistant" : "user";
|
|
279
|
+
const content = convertOpenAIMessageContent(msg);
|
|
280
|
+
|
|
281
|
+
if (content.length === 0) return null;
|
|
282
|
+
|
|
283
|
+
return { role, content };
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// Convert OpenAI message to Gemini content
|
|
287
|
+
function convertOpenAIToGeminiContent(msg) {
|
|
288
|
+
const role = msg.role === "assistant" ? "model" : "user";
|
|
289
|
+
const parts = [];
|
|
290
|
+
|
|
291
|
+
// Text content
|
|
292
|
+
if (typeof msg.content === "string") {
|
|
293
|
+
if (msg.content) {
|
|
294
|
+
parts.push({ text: msg.content });
|
|
295
|
+
}
|
|
296
|
+
} else if (Array.isArray(msg.content)) {
|
|
297
|
+
for (const part of msg.content) {
|
|
298
|
+
if (part.type === "text") {
|
|
299
|
+
parts.push({ text: part.text });
|
|
300
|
+
} else if (part.type === "image_url") {
|
|
301
|
+
const url = part.image_url.url;
|
|
302
|
+
if (url.startsWith("data:")) {
|
|
303
|
+
const match = url.match(/^data:([^;]+);base64,(.+)$/);
|
|
304
|
+
if (match) {
|
|
305
|
+
parts.push({
|
|
306
|
+
inlineData: {
|
|
307
|
+
mimeType: match[1],
|
|
308
|
+
data: match[2]
|
|
309
|
+
}
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Tool calls
|
|
318
|
+
if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
|
|
319
|
+
for (const tc of msg.tool_calls) {
|
|
320
|
+
parts.push({
|
|
321
|
+
functionCall: {
|
|
322
|
+
name: tc.function.name,
|
|
323
|
+
args: tryParseJSON(tc.function.arguments)
|
|
324
|
+
}
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
if (parts.length === 0) return null;
|
|
330
|
+
|
|
331
|
+
return { role, parts };
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Convert tool choice
|
|
335
|
+
function convertOpenAIToolChoice(choice) {
|
|
336
|
+
if (!choice) return { type: "auto" };
|
|
337
|
+
// Passthrough if already Claude format
|
|
338
|
+
if (typeof choice === "object" && choice.type) return choice;
|
|
339
|
+
if (choice === "auto" || choice === "none") return { type: "auto" };
|
|
340
|
+
if (choice === "required") return { type: "any" };
|
|
341
|
+
if (typeof choice === "object" && choice.function) {
|
|
342
|
+
return { type: "tool", name: choice.function.name };
|
|
343
|
+
}
|
|
344
|
+
return { type: "auto" };
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
// Extract text from content
|
|
348
|
+
function extractTextContent(content) {
|
|
349
|
+
if (typeof content === "string") return content;
|
|
350
|
+
if (Array.isArray(content)) {
|
|
351
|
+
return content.filter(c => c.type === "text").map(c => c.text).join("\n");
|
|
352
|
+
}
|
|
353
|
+
return "";
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// Try parse JSON
|
|
357
|
+
function tryParseJSON(str) {
|
|
358
|
+
if (typeof str !== "string") return str;
|
|
359
|
+
try {
|
|
360
|
+
return JSON.parse(str);
|
|
361
|
+
} catch {
|
|
362
|
+
return str;
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// Register
|
|
367
|
+
register(FORMATS.OPENAI, FORMATS.CLAUDE, openaiToClaude, null);
|
|
368
|
+
register(FORMATS.OPENAI, FORMATS.GEMINI, openaiToGemini, null);
|
|
369
|
+
register(FORMATS.OPENAI, FORMATS.GEMINI_CLI, openaiToGemini, null);
|
|
370
|
+
|
|
371
|
+
|