open-sse 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +180 -0
  2. package/config/constants.js +206 -0
  3. package/config/defaultThinkingSignature.js +7 -0
  4. package/config/ollamaModels.js +19 -0
  5. package/config/providerModels.js +161 -0
  6. package/handlers/chatCore.js +277 -0
  7. package/handlers/responsesHandler.js +69 -0
  8. package/index.js +69 -0
  9. package/package.json +44 -0
  10. package/services/accountFallback.js +148 -0
  11. package/services/combo.js +69 -0
  12. package/services/compact.js +64 -0
  13. package/services/model.js +109 -0
  14. package/services/provider.js +237 -0
  15. package/services/tokenRefresh.js +542 -0
  16. package/services/usage.js +398 -0
  17. package/translator/formats.js +12 -0
  18. package/translator/from-openai/claude.js +341 -0
  19. package/translator/from-openai/gemini.js +469 -0
  20. package/translator/from-openai/openai-responses.js +361 -0
  21. package/translator/helpers/claudeHelper.js +179 -0
  22. package/translator/helpers/geminiHelper.js +131 -0
  23. package/translator/helpers/openaiHelper.js +80 -0
  24. package/translator/helpers/responsesApiHelper.js +103 -0
  25. package/translator/helpers/toolCallHelper.js +111 -0
  26. package/translator/index.js +167 -0
  27. package/translator/to-openai/claude.js +238 -0
  28. package/translator/to-openai/gemini.js +151 -0
  29. package/translator/to-openai/openai-responses.js +140 -0
  30. package/translator/to-openai/openai.js +371 -0
  31. package/utils/bypassHandler.js +258 -0
  32. package/utils/error.js +133 -0
  33. package/utils/ollamaTransform.js +82 -0
  34. package/utils/requestLogger.js +217 -0
  35. package/utils/stream.js +274 -0
  36. package/utils/streamHandler.js +131 -0
@@ -0,0 +1,80 @@
1
+ // OpenAI helper functions for translator
2
+
3
+ // Valid OpenAI content block types
4
+ export const VALID_OPENAI_CONTENT_TYPES = ["text", "image_url", "image"];
5
+ export const VALID_OPENAI_MESSAGE_TYPES = ["text", "image_url", "image", "tool_calls", "tool_result"];
6
+
7
+ // Filter messages to OpenAI standard format
8
+ // Remove: thinking, redacted_thinking, signature, and other non-OpenAI blocks
9
+ export function filterToOpenAIFormat(body) {
10
+ if (!body.messages || !Array.isArray(body.messages)) return body;
11
+
12
+ body.messages = body.messages.map(msg => {
13
+ // Keep tool messages as-is (OpenAI format)
14
+ if (msg.role === "tool") return msg;
15
+
16
+ // Keep assistant messages with tool_calls as-is
17
+ if (msg.role === "assistant" && msg.tool_calls) return msg;
18
+
19
+ // Handle string content
20
+ if (typeof msg.content === "string") return msg;
21
+
22
+ // Handle array content
23
+ if (Array.isArray(msg.content)) {
24
+ const filteredContent = [];
25
+
26
+ for (const block of msg.content) {
27
+ // Skip thinking blocks
28
+ if (block.type === "thinking" || block.type === "redacted_thinking") continue;
29
+
30
+ // Only keep valid OpenAI content types
31
+ if (VALID_OPENAI_CONTENT_TYPES.includes(block.type)) {
32
+ // Remove signature field if exists
33
+ const { signature, cache_control, ...cleanBlock } = block;
34
+ filteredContent.push(cleanBlock);
35
+ } else if (block.type === "tool_use") {
36
+ // Convert tool_use to tool_calls format (handled separately)
37
+ continue;
38
+ } else if (block.type === "tool_result") {
39
+ // Keep tool_result but clean it
40
+ const { signature, cache_control, ...cleanBlock } = block;
41
+ filteredContent.push(cleanBlock);
42
+ }
43
+ }
44
+
45
+ // If all content was filtered, add empty text
46
+ if (filteredContent.length === 0) {
47
+ filteredContent.push({ type: "text", text: "" });
48
+ }
49
+
50
+ return { ...msg, content: filteredContent };
51
+ }
52
+
53
+ return msg;
54
+ });
55
+
56
+ // Filter out messages with only empty text (but NEVER filter tool messages)
57
+ body.messages = body.messages.filter(msg => {
58
+ // Always keep tool messages
59
+ if (msg.role === "tool") return true;
60
+ // Always keep assistant messages with tool_calls
61
+ if (msg.role === "assistant" && msg.tool_calls) return true;
62
+
63
+ if (typeof msg.content === "string") return msg.content.trim() !== "";
64
+ if (Array.isArray(msg.content)) {
65
+ return msg.content.some(b =>
66
+ (b.type === "text" && b.text?.trim()) ||
67
+ b.type !== "text"
68
+ );
69
+ }
70
+ return true;
71
+ });
72
+
73
+ // Remove empty tools array (some providers like QWEN reject it)
74
+ if (body.tools && Array.isArray(body.tools) && body.tools.length === 0) {
75
+ delete body.tools;
76
+ }
77
+
78
+ return body;
79
+ }
80
+
@@ -0,0 +1,103 @@
1
+ /**
2
+ * Convert OpenAI Responses API format to standard chat completions format
3
+ * Responses API uses: { input: [...], instructions: "..." }
4
+ * Chat API uses: { messages: [...] }
5
+ */
6
+ export function convertResponsesApiFormat(body) {
7
+ if (!body.input) return body;
8
+
9
+ const result = { ...body };
10
+ result.messages = [];
11
+
12
+ // Convert instructions to system message
13
+ if (body.instructions) {
14
+ result.messages.push({ role: "system", content: body.instructions });
15
+ }
16
+
17
+ // Group items by conversation turn
18
+ let currentAssistantMsg = null;
19
+ let pendingToolCalls = [];
20
+ let pendingToolResults = [];
21
+
22
+ for (const item of body.input) {
23
+ if (item.type === "message") {
24
+ // Flush any pending assistant message with tool calls
25
+ if (currentAssistantMsg) {
26
+ result.messages.push(currentAssistantMsg);
27
+ currentAssistantMsg = null;
28
+ }
29
+ // Flush pending tool results
30
+ if (pendingToolResults.length > 0) {
31
+ for (const tr of pendingToolResults) {
32
+ result.messages.push(tr);
33
+ }
34
+ pendingToolResults = [];
35
+ }
36
+
37
+ // Convert content: input_text → text, output_text → text
38
+ const content = Array.isArray(item.content)
39
+ ? item.content.map(c => {
40
+ if (c.type === "input_text") return { type: "text", text: c.text };
41
+ if (c.type === "output_text") return { type: "text", text: c.text };
42
+ return c;
43
+ })
44
+ : item.content;
45
+ result.messages.push({ role: item.role, content });
46
+ }
47
+ else if (item.type === "function_call") {
48
+ // Start or append to assistant message with tool_calls
49
+ if (!currentAssistantMsg) {
50
+ currentAssistantMsg = {
51
+ role: "assistant",
52
+ content: null,
53
+ tool_calls: []
54
+ };
55
+ }
56
+ currentAssistantMsg.tool_calls.push({
57
+ id: item.call_id,
58
+ type: "function",
59
+ function: {
60
+ name: item.name,
61
+ arguments: item.arguments
62
+ }
63
+ });
64
+ }
65
+ else if (item.type === "function_call_output") {
66
+ // Flush assistant message first if exists
67
+ if (currentAssistantMsg) {
68
+ result.messages.push(currentAssistantMsg);
69
+ currentAssistantMsg = null;
70
+ }
71
+ // Add tool result
72
+ pendingToolResults.push({
73
+ role: "tool",
74
+ tool_call_id: item.call_id,
75
+ content: typeof item.output === "string" ? item.output : JSON.stringify(item.output)
76
+ });
77
+ }
78
+ else if (item.type === "reasoning") {
79
+ // Skip reasoning items - they are for display only
80
+ continue;
81
+ }
82
+ }
83
+
84
+ // Flush remaining
85
+ if (currentAssistantMsg) {
86
+ result.messages.push(currentAssistantMsg);
87
+ }
88
+ if (pendingToolResults.length > 0) {
89
+ for (const tr of pendingToolResults) {
90
+ result.messages.push(tr);
91
+ }
92
+ }
93
+
94
+ // Cleanup Responses API specific fields
95
+ delete result.input;
96
+ delete result.instructions;
97
+ delete result.include;
98
+ delete result.prompt_cache_key;
99
+ delete result.store;
100
+ delete result.reasoning;
101
+
102
+ return result;
103
+ }
@@ -0,0 +1,111 @@
1
+ // Tool call helper functions for translator
2
+
3
+ // Generate unique tool call ID
4
+ export function generateToolCallId() {
5
+ return `call_${Date.now().toString(36)}_${Math.random().toString(36).slice(2, 9)}`;
6
+ }
7
+
8
+ // Ensure all tool_calls have id field and arguments is string (some providers require it)
9
+ export function ensureToolCallIds(body) {
10
+ if (!body.messages || !Array.isArray(body.messages)) return body;
11
+
12
+ for (const msg of body.messages) {
13
+ if (msg.role === "assistant" && msg.tool_calls && Array.isArray(msg.tool_calls)) {
14
+ for (const tc of msg.tool_calls) {
15
+ if (!tc.id) {
16
+ tc.id = generateToolCallId();
17
+ }
18
+ if (!tc.type) {
19
+ tc.type = "function";
20
+ }
21
+ // Ensure arguments is JSON string, not object
22
+ if (tc.function?.arguments && typeof tc.function.arguments !== "string") {
23
+ tc.function.arguments = JSON.stringify(tc.function.arguments);
24
+ }
25
+ }
26
+ }
27
+ }
28
+
29
+ return body;
30
+ }
31
+
32
+ // Get tool_call ids from assistant message (OpenAI format: tool_calls, Claude format: tool_use in content)
33
+ export function getToolCallIds(msg) {
34
+ if (msg.role !== "assistant") return [];
35
+
36
+ const ids = [];
37
+
38
+ // OpenAI format: tool_calls array
39
+ if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
40
+ for (const tc of msg.tool_calls) {
41
+ if (tc.id) ids.push(tc.id);
42
+ }
43
+ }
44
+
45
+ // Claude format: tool_use blocks in content
46
+ if (Array.isArray(msg.content)) {
47
+ for (const block of msg.content) {
48
+ if (block.type === "tool_use" && block.id) {
49
+ ids.push(block.id);
50
+ }
51
+ }
52
+ }
53
+
54
+ return ids;
55
+ }
56
+
57
+ // Check if user message has tool_result for given ids (OpenAI format: role=tool, Claude format: tool_result in content)
58
+ export function hasToolResults(msg, toolCallIds) {
59
+ if (!msg || !toolCallIds.length) return false;
60
+
61
+ // OpenAI format: role = "tool" with tool_call_id
62
+ if (msg.role === "tool" && msg.tool_call_id) {
63
+ return toolCallIds.includes(msg.tool_call_id);
64
+ }
65
+
66
+ // Claude format: tool_result blocks in user message content
67
+ if (msg.role === "user" && Array.isArray(msg.content)) {
68
+ for (const block of msg.content) {
69
+ if (block.type === "tool_result" && toolCallIds.includes(block.tool_use_id)) {
70
+ return true;
71
+ }
72
+ }
73
+ }
74
+
75
+ return false;
76
+ }
77
+
78
+ // Fix missing tool responses - insert empty tool_result if assistant has tool_use but next message has no tool_result
79
+ export function fixMissingToolResponses(body) {
80
+ if (!body.messages || !Array.isArray(body.messages)) return body;
81
+
82
+ const newMessages = [];
83
+
84
+ for (let i = 0; i < body.messages.length; i++) {
85
+ const msg = body.messages[i];
86
+ const nextMsg = body.messages[i + 1];
87
+
88
+ newMessages.push(msg);
89
+
90
+ // Check if this is assistant with tool_calls/tool_use
91
+ const toolCallIds = getToolCallIds(msg);
92
+ if (toolCallIds.length === 0) continue;
93
+
94
+ // Check if next message has tool_result
95
+ if (nextMsg && !hasToolResults(nextMsg, toolCallIds)) {
96
+ // Insert tool responses for each tool_call
97
+ for (const id of toolCallIds) {
98
+ // OpenAI format: role = "tool"
99
+ newMessages.push({
100
+ role: "tool",
101
+ tool_call_id: id,
102
+ content: ""
103
+ });
104
+ }
105
+ }
106
+ }
107
+
108
+ body.messages = newMessages;
109
+ return body;
110
+ }
111
+
@@ -0,0 +1,167 @@
1
+ import { FORMATS } from "./formats.js";
2
+ import { ensureToolCallIds, fixMissingToolResponses } from "./helpers/toolCallHelper.js";
3
+ import { prepareClaudeRequest } from "./helpers/claudeHelper.js";
4
+ import { filterToOpenAIFormat } from "./helpers/openaiHelper.js";
5
+ import { normalizeThinkingConfig } from "../services/provider.js";
6
+
7
+ // Registry for translators
8
+ const requestRegistry = new Map();
9
+ const responseRegistry = new Map();
10
+
11
+ // Register translator
12
+ export function register(from, to, requestFn, responseFn) {
13
+ const key = `${from}:${to}`;
14
+ if (requestFn) {
15
+ requestRegistry.set(key, requestFn);
16
+ }
17
+ if (responseFn) {
18
+ responseRegistry.set(key, responseFn);
19
+ }
20
+ }
21
+
22
+ // Translate request: source -> openai -> target
23
+ export function translateRequest(sourceFormat, targetFormat, model, body, stream = true, credentials = null, provider = null) {
24
+ let result = body;
25
+
26
+ // Normalize thinking config: remove if lastMessage is not user
27
+ normalizeThinkingConfig(result);
28
+
29
+ // Always ensure tool_calls have id (some providers require it)
30
+ ensureToolCallIds(result);
31
+
32
+ // Fix missing tool responses (insert empty tool_result if needed)
33
+ fixMissingToolResponses(result);
34
+
35
+ // If same format, skip translation steps
36
+ if (sourceFormat !== targetFormat) {
37
+ // Step 1: source -> openai (if source is not openai)
38
+ if (sourceFormat !== FORMATS.OPENAI) {
39
+ const toOpenAI = requestRegistry.get(`${sourceFormat}:${FORMATS.OPENAI}`);
40
+ if (toOpenAI) {
41
+ result = toOpenAI(model, result, stream, credentials);
42
+ }
43
+ }
44
+
45
+ // Step 1.5: Filter to clean OpenAI format (only when target is OpenAI)
46
+ if (targetFormat === FORMATS.OPENAI) {
47
+ result = filterToOpenAIFormat(result);
48
+ }
49
+
50
+ // Step 2: openai -> target (if target is not openai)
51
+ if (targetFormat !== FORMATS.OPENAI) {
52
+ const fromOpenAI = requestRegistry.get(`${FORMATS.OPENAI}:${targetFormat}`);
53
+ if (fromOpenAI) {
54
+ result = fromOpenAI(model, result, stream, credentials);
55
+ }
56
+ }
57
+ }
58
+
59
+ // Final step: prepare request for Claude format endpoints
60
+ if (targetFormat === FORMATS.CLAUDE) {
61
+ result = prepareClaudeRequest(result, provider);
62
+ }
63
+
64
+ return result;
65
+ }
66
+
67
+ // Translate response chunk: target -> openai -> source
68
+ export function translateResponse(targetFormat, sourceFormat, chunk, state) {
69
+ // If same format, return as-is
70
+ if (sourceFormat === targetFormat) {
71
+ return [chunk];
72
+ }
73
+
74
+ let results = [chunk];
75
+
76
+ // Step 1: target -> openai (if target is not openai)
77
+ if (targetFormat !== FORMATS.OPENAI) {
78
+ const toOpenAI = responseRegistry.get(`${targetFormat}:${FORMATS.OPENAI}`);
79
+ if (toOpenAI) {
80
+ results = [];
81
+ const converted = toOpenAI(chunk, state);
82
+ if (converted) {
83
+ results = Array.isArray(converted) ? converted : [converted];
84
+ }
85
+ }
86
+ }
87
+
88
+ // Step 2: openai -> source (if source is not openai)
89
+ if (sourceFormat !== FORMATS.OPENAI) {
90
+ const fromOpenAI = responseRegistry.get(`${FORMATS.OPENAI}:${sourceFormat}`);
91
+ if (fromOpenAI) {
92
+ const finalResults = [];
93
+ for (const r of results) {
94
+ const converted = fromOpenAI(r, state);
95
+ if (converted) {
96
+ finalResults.push(...(Array.isArray(converted) ? converted : [converted]));
97
+ }
98
+ }
99
+ results = finalResults;
100
+ }
101
+ }
102
+
103
+ return results;
104
+ }
105
+
106
+ // Check if translation needed
107
+ export function needsTranslation(sourceFormat, targetFormat) {
108
+ return sourceFormat !== targetFormat;
109
+ }
110
+
111
+ // Initialize state for streaming response based on format
112
+ export function initState(sourceFormat) {
113
+ // Base state for all formats
114
+ const base = {
115
+ messageId: null,
116
+ model: null,
117
+ textBlockStarted: false,
118
+ thinkingBlockStarted: false,
119
+ inThinkingBlock: false,
120
+ currentBlockIndex: null,
121
+ toolCalls: new Map(),
122
+ finishReason: null,
123
+ finishReasonSent: false,
124
+ usage: null,
125
+ contentBlockIndex: -1
126
+ };
127
+
128
+ // Add openai-responses specific fields
129
+ if (sourceFormat === FORMATS.OPENAI_RESPONSES) {
130
+ return {
131
+ ...base,
132
+ seq: 0,
133
+ responseId: `resp_${Date.now()}`,
134
+ created: Math.floor(Date.now() / 1000),
135
+ started: false,
136
+ msgTextBuf: {},
137
+ msgItemAdded: {},
138
+ msgContentAdded: {},
139
+ msgItemDone: {},
140
+ reasoningId: "",
141
+ reasoningIndex: -1,
142
+ reasoningBuf: "",
143
+ reasoningPartAdded: false,
144
+ reasoningDone: false,
145
+ inThinking: false,
146
+ funcArgsBuf: {},
147
+ funcNames: {},
148
+ funcCallIds: {},
149
+ funcArgsDone: {},
150
+ funcItemDone: {},
151
+ completedSent: false
152
+ };
153
+ }
154
+
155
+ return base;
156
+ }
157
+
158
+ // Initialize all translators
159
+ export async function initTranslators() {
160
+ await import("./to-openai/claude.js");
161
+ await import("./to-openai/gemini.js");
162
+ await import("./to-openai/openai.js");
163
+ await import("./to-openai/openai-responses.js");
164
+ await import("./from-openai/claude.js");
165
+ await import("./from-openai/gemini.js");
166
+ await import("./from-openai/openai-responses.js");
167
+ }
@@ -0,0 +1,238 @@
1
+ import { register } from "../index.js";
2
+ import { FORMATS } from "../formats.js";
3
+
4
+ // Convert Claude request to OpenAI format
5
+ function claudeToOpenAI(model, body, stream) {
6
+ const result = {
7
+ model: model,
8
+ messages: [],
9
+ stream: stream
10
+ };
11
+
12
+ // Max tokens
13
+ if (body.max_tokens) {
14
+ result.max_tokens = body.max_tokens;
15
+ }
16
+
17
+ // Temperature
18
+ if (body.temperature !== undefined) {
19
+ result.temperature = body.temperature;
20
+ }
21
+
22
+ // System message
23
+ if (body.system) {
24
+ const systemContent = Array.isArray(body.system)
25
+ ? body.system.map(s => s.text || "").join("\n")
26
+ : body.system;
27
+
28
+ if (systemContent) {
29
+ result.messages.push({
30
+ role: "system",
31
+ content: systemContent
32
+ });
33
+ }
34
+ }
35
+
36
+ // Convert messages
37
+ if (body.messages && Array.isArray(body.messages)) {
38
+ for (let i = 0; i < body.messages.length; i++) {
39
+ const msg = body.messages[i];
40
+ const converted = convertClaudeMessage(msg);
41
+ if (converted) {
42
+ // Handle array of messages (multiple tool results)
43
+ if (Array.isArray(converted)) {
44
+ result.messages.push(...converted);
45
+ } else {
46
+ result.messages.push(converted);
47
+ }
48
+ }
49
+ }
50
+ }
51
+
52
+ // Fix missing tool responses - OpenAI requires every tool_call to have a response
53
+ fixMissingToolResponses(result.messages);
54
+
55
+ // Tools
56
+ if (body.tools && Array.isArray(body.tools)) {
57
+ result.tools = body.tools.map(tool => ({
58
+ type: "function",
59
+ function: {
60
+ name: tool.name,
61
+ description: tool.description,
62
+ parameters: tool.input_schema || { type: "object", properties: {} }
63
+ }
64
+ }));
65
+ }
66
+
67
+ // Tool choice
68
+ if (body.tool_choice) {
69
+ result.tool_choice = convertToolChoice(body.tool_choice);
70
+ }
71
+
72
+ return result;
73
+ }
74
+
75
+ // Fix missing tool responses - add empty responses for tool_calls without responses
76
+ function fixMissingToolResponses(messages) {
77
+ for (let i = 0; i < messages.length; i++) {
78
+ const msg = messages[i];
79
+ if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
80
+ const toolCallIds = msg.tool_calls.map(tc => tc.id);
81
+
82
+ // Collect all tool response IDs that IMMEDIATELY follow this assistant message
83
+ // Stop at any non-tool message (user or assistant)
84
+ const respondedIds = new Set();
85
+ let insertPosition = i + 1;
86
+ for (let j = i + 1; j < messages.length; j++) {
87
+ const nextMsg = messages[j];
88
+ if (nextMsg.role === "tool" && nextMsg.tool_call_id) {
89
+ respondedIds.add(nextMsg.tool_call_id);
90
+ insertPosition = j + 1;
91
+ } else {
92
+ // Stop at any non-tool message (user or assistant)
93
+ break;
94
+ }
95
+ }
96
+
97
+ // Find missing responses and insert them
98
+ const missingIds = toolCallIds.filter(id => !respondedIds.has(id));
99
+
100
+ if (missingIds.length > 0) {
101
+ const missingResponses = missingIds.map(id => ({
102
+ role: "tool",
103
+ tool_call_id: id,
104
+ content: "[No response received]"
105
+ }));
106
+ // Insert missing responses at the correct position
107
+ messages.splice(insertPosition, 0, ...missingResponses);
108
+ // Adjust index to skip inserted messages
109
+ i = insertPosition + missingResponses.length - 1;
110
+ }
111
+ }
112
+ }
113
+ }
114
+
115
+ // Convert single Claude message - returns single message or array of messages
116
+ function convertClaudeMessage(msg) {
117
+ const role = msg.role === "user" || msg.role === "tool" ? "user" : "assistant";
118
+
119
+ // Simple string content
120
+ if (typeof msg.content === "string") {
121
+ return { role, content: msg.content };
122
+ }
123
+
124
+ // Array content
125
+ if (Array.isArray(msg.content)) {
126
+ const parts = [];
127
+ const toolCalls = [];
128
+ const toolResults = [];
129
+
130
+ for (const block of msg.content) {
131
+ switch (block.type) {
132
+ case "text":
133
+ parts.push({ type: "text", text: block.text });
134
+ break;
135
+
136
+ case "image":
137
+ if (block.source?.type === "base64") {
138
+ parts.push({
139
+ type: "image_url",
140
+ image_url: {
141
+ url: `data:${block.source.media_type};base64,${block.source.data}`
142
+ }
143
+ });
144
+ }
145
+ break;
146
+
147
+ case "tool_use":
148
+ toolCalls.push({
149
+ id: block.id,
150
+ type: "function",
151
+ function: {
152
+ name: block.name,
153
+ arguments: JSON.stringify(block.input || {})
154
+ }
155
+ });
156
+ break;
157
+
158
+ case "tool_result":
159
+ // Extract actual content from tool_result
160
+ let resultContent = "";
161
+ if (typeof block.content === "string") {
162
+ resultContent = block.content;
163
+ } else if (Array.isArray(block.content)) {
164
+ // Claude tool_result content can be array of text blocks
165
+ resultContent = block.content
166
+ .filter(c => c.type === "text")
167
+ .map(c => c.text)
168
+ .join("\n") || JSON.stringify(block.content);
169
+ } else if (block.content) {
170
+ resultContent = JSON.stringify(block.content);
171
+ }
172
+
173
+ toolResults.push({
174
+ role: "tool",
175
+ tool_call_id: block.tool_use_id,
176
+ content: resultContent
177
+ });
178
+ break;
179
+ }
180
+ }
181
+
182
+ // If has tool results, return array of tool messages
183
+ if (toolResults.length > 0) {
184
+ // Also include text parts as user message if any
185
+ if (parts.length > 0) {
186
+ const textContent = parts.length === 1 && parts[0].type === "text"
187
+ ? parts[0].text
188
+ : parts;
189
+ return [...toolResults, { role: "user", content: textContent }];
190
+ }
191
+ return toolResults;
192
+ }
193
+
194
+ // If has tool calls, return assistant message with tool_calls
195
+ if (toolCalls.length > 0) {
196
+ const result = { role: "assistant" };
197
+ if (parts.length > 0) {
198
+ result.content = parts.length === 1 && parts[0].type === "text"
199
+ ? parts[0].text
200
+ : parts;
201
+ }
202
+ result.tool_calls = toolCalls;
203
+ return result;
204
+ }
205
+
206
+ // Return content
207
+ if (parts.length > 0) {
208
+ return {
209
+ role,
210
+ content: parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts
211
+ };
212
+ }
213
+
214
+ // Empty content array - return empty string content to keep message in conversation
215
+ if (msg.content.length === 0) {
216
+ return { role, content: "" };
217
+ }
218
+ }
219
+
220
+ return null;
221
+ }
222
+
223
+ // Convert tool choice
224
+ function convertToolChoice(choice) {
225
+ if (!choice) return "auto";
226
+ if (typeof choice === "string") return choice;
227
+
228
+ switch (choice.type) {
229
+ case "auto": return "auto";
230
+ case "any": return "required";
231
+ case "tool": return { type: "function", function: { name: choice.name } };
232
+ default: return "auto";
233
+ }
234
+ }
235
+
236
+ // Register
237
+ register(FORMATS.CLAUDE, FORMATS.OPENAI, claudeToOpenAI, null);
238
+