open-sse 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +180 -0
- package/config/constants.js +206 -0
- package/config/defaultThinkingSignature.js +7 -0
- package/config/ollamaModels.js +19 -0
- package/config/providerModels.js +161 -0
- package/handlers/chatCore.js +277 -0
- package/handlers/responsesHandler.js +69 -0
- package/index.js +69 -0
- package/package.json +44 -0
- package/services/accountFallback.js +148 -0
- package/services/combo.js +69 -0
- package/services/compact.js +64 -0
- package/services/model.js +109 -0
- package/services/provider.js +237 -0
- package/services/tokenRefresh.js +542 -0
- package/services/usage.js +398 -0
- package/translator/formats.js +12 -0
- package/translator/from-openai/claude.js +341 -0
- package/translator/from-openai/gemini.js +469 -0
- package/translator/from-openai/openai-responses.js +361 -0
- package/translator/helpers/claudeHelper.js +179 -0
- package/translator/helpers/geminiHelper.js +131 -0
- package/translator/helpers/openaiHelper.js +80 -0
- package/translator/helpers/responsesApiHelper.js +103 -0
- package/translator/helpers/toolCallHelper.js +111 -0
- package/translator/index.js +167 -0
- package/translator/to-openai/claude.js +238 -0
- package/translator/to-openai/gemini.js +151 -0
- package/translator/to-openai/openai-responses.js +140 -0
- package/translator/to-openai/openai.js +371 -0
- package/utils/bypassHandler.js +258 -0
- package/utils/error.js +133 -0
- package/utils/ollamaTransform.js +82 -0
- package/utils/requestLogger.js +217 -0
- package/utils/stream.js +274 -0
- package/utils/streamHandler.js +131 -0
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
import { register } from "../index.js";
|
|
2
|
+
import { FORMATS } from "../formats.js";
|
|
3
|
+
|
|
4
|
+
// Create OpenAI chunk helper
|
|
5
|
+
function createChunk(state, delta, finishReason = null) {
|
|
6
|
+
return {
|
|
7
|
+
id: `chatcmpl-${state.messageId}`,
|
|
8
|
+
object: "chat.completion.chunk",
|
|
9
|
+
created: Math.floor(Date.now() / 1000),
|
|
10
|
+
model: state.model,
|
|
11
|
+
choices: [{
|
|
12
|
+
index: 0,
|
|
13
|
+
delta,
|
|
14
|
+
finish_reason: finishReason
|
|
15
|
+
}]
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// Convert Claude stream chunk to OpenAI format
|
|
20
|
+
function claudeToOpenAIResponse(chunk, state) {
|
|
21
|
+
if (!chunk) return null;
|
|
22
|
+
|
|
23
|
+
const results = [];
|
|
24
|
+
const event = chunk.type;
|
|
25
|
+
|
|
26
|
+
switch (event) {
|
|
27
|
+
case "message_start": {
|
|
28
|
+
state.messageId = chunk.message?.id || `msg_${Date.now()}`;
|
|
29
|
+
state.model = chunk.message?.model;
|
|
30
|
+
results.push(createChunk(state, { role: "assistant" }));
|
|
31
|
+
break;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
case "content_block_start": {
|
|
35
|
+
const block = chunk.content_block;
|
|
36
|
+
if (block?.type === "text") {
|
|
37
|
+
state.textBlockStarted = true;
|
|
38
|
+
} else if (block?.type === "thinking") {
|
|
39
|
+
// console.log("🧠Thinking block started");
|
|
40
|
+
state.inThinkingBlock = true;
|
|
41
|
+
state.currentBlockIndex = chunk.index;
|
|
42
|
+
results.push(createChunk(state, { content: "<think>" }));
|
|
43
|
+
} else if (block?.type === "tool_use") {
|
|
44
|
+
const toolCall = {
|
|
45
|
+
index: chunk.index || 0,
|
|
46
|
+
id: block.id,
|
|
47
|
+
type: "function",
|
|
48
|
+
function: {
|
|
49
|
+
name: block.name,
|
|
50
|
+
arguments: ""
|
|
51
|
+
}
|
|
52
|
+
};
|
|
53
|
+
state.toolCalls.set(chunk.index, toolCall);
|
|
54
|
+
results.push(createChunk(state, { tool_calls: [toolCall] }));
|
|
55
|
+
}
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
case "content_block_delta": {
|
|
60
|
+
const delta = chunk.delta;
|
|
61
|
+
if (delta?.type === "text_delta" && delta.text) {
|
|
62
|
+
results.push(createChunk(state, { content: delta.text }));
|
|
63
|
+
} else if (delta?.type === "thinking_delta" && delta.thinking) {
|
|
64
|
+
// Stream thinking content
|
|
65
|
+
results.push(createChunk(state, { content: delta.thinking }));
|
|
66
|
+
} else if (delta?.type === "input_json_delta" && delta.partial_json) {
|
|
67
|
+
const toolCall = state.toolCalls.get(chunk.index);
|
|
68
|
+
if (toolCall) {
|
|
69
|
+
toolCall.function.arguments += delta.partial_json;
|
|
70
|
+
results.push(createChunk(state, {
|
|
71
|
+
tool_calls: [{
|
|
72
|
+
index: toolCall.index,
|
|
73
|
+
function: { arguments: delta.partial_json }
|
|
74
|
+
}]
|
|
75
|
+
}));
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
break;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
case "content_block_stop": {
|
|
82
|
+
if (state.inThinkingBlock && chunk.index === state.currentBlockIndex) {
|
|
83
|
+
// console.log("✅ Thinking block ended");
|
|
84
|
+
results.push(createChunk(state, { content: "</think>" }));
|
|
85
|
+
state.inThinkingBlock = false;
|
|
86
|
+
}
|
|
87
|
+
state.textBlockStarted = false;
|
|
88
|
+
state.thinkingBlockStarted = false;
|
|
89
|
+
break;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
case "message_delta": {
|
|
93
|
+
if (chunk.delta?.stop_reason) {
|
|
94
|
+
state.finishReason = convertStopReason(chunk.delta.stop_reason);
|
|
95
|
+
|
|
96
|
+
// Send the final chunk with finish_reason immediately
|
|
97
|
+
results.push({
|
|
98
|
+
id: `chatcmpl-${state.messageId}`,
|
|
99
|
+
object: "chat.completion.chunk",
|
|
100
|
+
created: Math.floor(Date.now() / 1000),
|
|
101
|
+
model: state.model,
|
|
102
|
+
choices: [{
|
|
103
|
+
index: 0,
|
|
104
|
+
delta: {},
|
|
105
|
+
finish_reason: state.finishReason
|
|
106
|
+
}]
|
|
107
|
+
});
|
|
108
|
+
state.finishReasonSent = true;
|
|
109
|
+
}
|
|
110
|
+
// Usage is now extracted in stream.js extractUsage()
|
|
111
|
+
break;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
case "message_stop": {
|
|
115
|
+
// CLIProxyAPI and OpenAI standard: message_stop should send the final chunk with finish_reason
|
|
116
|
+
// This ensures proper signaling to the client that the response is complete
|
|
117
|
+
|
|
118
|
+
// Only send a chunk if we haven't already sent the finish_reason in message_delta
|
|
119
|
+
// In some cases, finish_reason might not have been sent yet
|
|
120
|
+
if (!state.finishReasonSent) {
|
|
121
|
+
const finishReason = state.finishReason || (state.toolCalls?.size > 0 ? "tool_calls" : "stop");
|
|
122
|
+
results.push({
|
|
123
|
+
id: `chatcmpl-${state.messageId}`,
|
|
124
|
+
object: "chat.completion.chunk",
|
|
125
|
+
created: Math.floor(Date.now() / 1000),
|
|
126
|
+
model: state.model,
|
|
127
|
+
choices: [{
|
|
128
|
+
index: 0,
|
|
129
|
+
delta: {},
|
|
130
|
+
finish_reason: finishReason
|
|
131
|
+
}],
|
|
132
|
+
...(state.usage && {
|
|
133
|
+
usage: {
|
|
134
|
+
prompt_tokens: state.usage.input_tokens || 0,
|
|
135
|
+
completion_tokens: state.usage.output_tokens || 0,
|
|
136
|
+
total_tokens: (state.usage.input_tokens || 0) + (state.usage.output_tokens || 0)
|
|
137
|
+
}
|
|
138
|
+
})
|
|
139
|
+
});
|
|
140
|
+
state.finishReasonSent = true;
|
|
141
|
+
}
|
|
142
|
+
break;
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return results.length > 0 ? results : null;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// Helper: stop thinking block if started
|
|
150
|
+
function stopThinkingBlock(state, results) {
|
|
151
|
+
if (!state.thinkingBlockStarted) return;
|
|
152
|
+
results.push({
|
|
153
|
+
type: "content_block_stop",
|
|
154
|
+
index: state.thinkingBlockIndex
|
|
155
|
+
});
|
|
156
|
+
state.thinkingBlockStarted = false;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// Helper: stop text block if started
|
|
160
|
+
function stopTextBlock(state, results) {
|
|
161
|
+
if (!state.textBlockStarted || state.textBlockClosed) return;
|
|
162
|
+
state.textBlockClosed = true;
|
|
163
|
+
results.push({
|
|
164
|
+
type: "content_block_stop",
|
|
165
|
+
index: state.textBlockIndex
|
|
166
|
+
});
|
|
167
|
+
state.textBlockStarted = false;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// Convert OpenAI stream chunk to Claude format
|
|
171
|
+
function openaiToClaudeResponse(chunk, state) {
|
|
172
|
+
if (!chunk || !chunk.choices?.[0]) return null;
|
|
173
|
+
|
|
174
|
+
const results = [];
|
|
175
|
+
const choice = chunk.choices[0];
|
|
176
|
+
const delta = choice.delta;
|
|
177
|
+
|
|
178
|
+
// First chunk - ALWAYS send message_start first
|
|
179
|
+
if (!state.messageStartSent) {
|
|
180
|
+
state.messageStartSent = true;
|
|
181
|
+
state.messageId = chunk.id?.replace("chatcmpl-", "") || `msg_${Date.now()}`;
|
|
182
|
+
if (!state.messageId || state.messageId === "chat" || state.messageId.length < 8) {
|
|
183
|
+
state.messageId = chunk.extend_fields?.requestId ||
|
|
184
|
+
chunk.extend_fields?.traceId ||
|
|
185
|
+
`msg_${Date.now()}`;
|
|
186
|
+
}
|
|
187
|
+
state.model = chunk.model || "unknown";
|
|
188
|
+
state.nextBlockIndex = 0;
|
|
189
|
+
results.push({
|
|
190
|
+
type: "message_start",
|
|
191
|
+
message: {
|
|
192
|
+
id: state.messageId,
|
|
193
|
+
type: "message",
|
|
194
|
+
role: "assistant",
|
|
195
|
+
model: state.model,
|
|
196
|
+
content: [],
|
|
197
|
+
stop_reason: null,
|
|
198
|
+
stop_sequence: null,
|
|
199
|
+
usage: { input_tokens: 0, output_tokens: 0 }
|
|
200
|
+
}
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Handle reasoning_content (thinking) - GLM, DeepSeek, etc.
|
|
205
|
+
const reasoningContent = delta?.reasoning_content || delta?.reasoning;
|
|
206
|
+
if (reasoningContent) {
|
|
207
|
+
// Stop text block before thinking
|
|
208
|
+
stopTextBlock(state, results);
|
|
209
|
+
|
|
210
|
+
// Start thinking block if needed
|
|
211
|
+
if (!state.thinkingBlockStarted) {
|
|
212
|
+
state.thinkingBlockIndex = state.nextBlockIndex++;
|
|
213
|
+
state.thinkingBlockStarted = true;
|
|
214
|
+
results.push({
|
|
215
|
+
type: "content_block_start",
|
|
216
|
+
index: state.thinkingBlockIndex,
|
|
217
|
+
content_block: { type: "thinking", thinking: "" }
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Send thinking delta
|
|
222
|
+
results.push({
|
|
223
|
+
type: "content_block_delta",
|
|
224
|
+
index: state.thinkingBlockIndex,
|
|
225
|
+
delta: { type: "thinking_delta", thinking: reasoningContent }
|
|
226
|
+
});
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
// Handle regular content
|
|
230
|
+
if (delta?.content) {
|
|
231
|
+
// Stop thinking block before text
|
|
232
|
+
stopThinkingBlock(state, results);
|
|
233
|
+
|
|
234
|
+
// Start text block if needed
|
|
235
|
+
if (!state.textBlockStarted) {
|
|
236
|
+
state.textBlockIndex = state.nextBlockIndex++;
|
|
237
|
+
state.textBlockStarted = true;
|
|
238
|
+
state.textBlockClosed = false;
|
|
239
|
+
results.push({
|
|
240
|
+
type: "content_block_start",
|
|
241
|
+
index: state.textBlockIndex,
|
|
242
|
+
content_block: { type: "text", text: "" }
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Send text delta
|
|
247
|
+
results.push({
|
|
248
|
+
type: "content_block_delta",
|
|
249
|
+
index: state.textBlockIndex,
|
|
250
|
+
delta: { type: "text_delta", text: delta.content }
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// Tool calls
|
|
255
|
+
if (delta?.tool_calls) {
|
|
256
|
+
for (const tc of delta.tool_calls) {
|
|
257
|
+
const idx = tc.index ?? 0;
|
|
258
|
+
|
|
259
|
+
if (tc.id) {
|
|
260
|
+
// Stop thinking and text blocks before tool use
|
|
261
|
+
stopThinkingBlock(state, results);
|
|
262
|
+
stopTextBlock(state, results);
|
|
263
|
+
|
|
264
|
+
// New tool call
|
|
265
|
+
const toolBlockIndex = state.nextBlockIndex++;
|
|
266
|
+
state.toolCalls.set(idx, { id: tc.id, name: tc.function?.name || "", blockIndex: toolBlockIndex });
|
|
267
|
+
results.push({
|
|
268
|
+
type: "content_block_start",
|
|
269
|
+
index: toolBlockIndex,
|
|
270
|
+
content_block: {
|
|
271
|
+
type: "tool_use",
|
|
272
|
+
id: tc.id,
|
|
273
|
+
name: tc.function?.name || "",
|
|
274
|
+
input: {}
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
if (tc.function?.arguments) {
|
|
280
|
+
const toolInfo = state.toolCalls.get(idx);
|
|
281
|
+
if (toolInfo) {
|
|
282
|
+
results.push({
|
|
283
|
+
type: "content_block_delta",
|
|
284
|
+
index: toolInfo.blockIndex,
|
|
285
|
+
delta: { type: "input_json_delta", partial_json: tc.function.arguments }
|
|
286
|
+
});
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
// Finish
|
|
293
|
+
if (choice.finish_reason) {
|
|
294
|
+
// Stop all open blocks
|
|
295
|
+
stopThinkingBlock(state, results);
|
|
296
|
+
stopTextBlock(state, results);
|
|
297
|
+
|
|
298
|
+
// Close tool call blocks
|
|
299
|
+
for (const [, toolInfo] of state.toolCalls) {
|
|
300
|
+
results.push({
|
|
301
|
+
type: "content_block_stop",
|
|
302
|
+
index: toolInfo.blockIndex
|
|
303
|
+
});
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
results.push({
|
|
307
|
+
type: "message_delta",
|
|
308
|
+
delta: { stop_reason: convertFinishReason(choice.finish_reason) },
|
|
309
|
+
usage: { output_tokens: 0 }
|
|
310
|
+
});
|
|
311
|
+
results.push({ type: "message_stop" });
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
return results.length > 0 ? results : null;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Convert Claude stop_reason to OpenAI finish_reason
|
|
318
|
+
function convertStopReason(reason) {
|
|
319
|
+
switch (reason) {
|
|
320
|
+
case "end_turn": return "stop";
|
|
321
|
+
case "max_tokens": return "length";
|
|
322
|
+
case "tool_use": return "tool_calls";
|
|
323
|
+
case "stop_sequence": return "stop";
|
|
324
|
+
default: return "stop";
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Convert OpenAI finish_reason to Claude stop_reason
|
|
329
|
+
function convertFinishReason(reason) {
|
|
330
|
+
switch (reason) {
|
|
331
|
+
case "stop": return "end_turn";
|
|
332
|
+
case "length": return "max_tokens";
|
|
333
|
+
case "tool_calls": return "tool_use";
|
|
334
|
+
default: return "end_turn";
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
// Register
|
|
339
|
+
register(FORMATS.CLAUDE, FORMATS.OPENAI, null, claudeToOpenAIResponse);
|
|
340
|
+
register(FORMATS.OPENAI, FORMATS.CLAUDE, null, openaiToClaudeResponse);
|
|
341
|
+
|