protocol-proxy 2.3.4 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,240 +1,255 @@
1
- /**
2
- * Gemini → OpenAI 协议转换
3
- */
4
-
5
- const { encodeOpenAIEvent, encodeOpenAIDone } = require('./sse-helpers');
6
-
7
- function generateCallId() {
8
- return 'call_' + Math.random().toString(36).slice(2, 14);
9
- }
10
-
11
- // ==================== 请求转换 ====================
12
-
13
- function convertRequest(body, targetModel) {
14
- const messages = [];
15
-
16
- // system_instruction system message
17
- const sysText = body.systemInstruction?.parts?.map(p => p.text || '').join('') || '';
18
- if (sysText) {
19
- messages.push({ role: 'system', content: sysText });
20
- }
21
-
22
- // tools: functionDeclarations OpenAI tools
23
- let tools = undefined;
24
- if (body.tools && Array.isArray(body.tools)) {
25
- const allDeclarations = [];
26
- for (const tool of body.tools) {
27
- if (tool.functionDeclarations) {
28
- allDeclarations.push(...tool.functionDeclarations);
29
- }
30
- }
31
- if (allDeclarations.length > 0) {
32
- tools = allDeclarations.map(fd => ({
33
- type: 'function',
34
- function: {
35
- name: fd.name,
36
- description: fd.description || '',
37
- parameters: fd.parameters || { type: 'object', properties: {} },
38
- },
39
- }));
40
- }
41
- }
42
-
43
- // contents → messages, functionCall/functionResponse → tool_calls/tool results
44
- for (const msg of (body.contents || [])) {
45
- const role = msg.role === 'model' ? 'assistant' : 'user';
46
- const parts = msg.parts || [];
47
-
48
- // 检查是否有 functionCall
49
- const functionCalls = parts.filter(p => p.functionCall);
50
- if (functionCalls.length > 0) {
51
- const text = parts.filter(p => p.text).map(p => p.text).join('');
52
- const tool_calls = functionCalls.map(fc => ({
53
- id: generateCallId(),
54
- type: 'function',
55
- function: { name: fc.functionCall.name, arguments: JSON.stringify(fc.functionCall.args || {}) },
56
- }));
57
- messages.push({ role: 'assistant', content: text || null, tool_calls });
58
- continue;
59
- }
60
-
61
- // 检查是否有 functionResponse → tool messages
62
- const functionResponses = parts.filter(p => p.functionResponse);
63
- for (const fr of functionResponses) {
64
- messages.push({
65
- role: 'tool',
66
- tool_call_id: fr.functionResponse.name || 'unknown',
67
- content: typeof fr.functionResponse.response === 'string'
68
- ? fr.functionResponse.response
69
- : JSON.stringify(fr.functionResponse.response || {}),
70
- });
71
- }
72
-
73
- // 纯文本 part(跳过已处理 functionCall/functionResponse 的消息)
74
- const textParts = parts.filter(p => p.text).map(p => p.text).join('');
75
- if (textParts && functionCalls.length === 0 && functionResponses.length === 0) {
76
- messages.push({ role, content: textParts });
77
- }
78
- }
79
-
80
- const result = {
81
- model: targetModel,
82
- messages,
83
- stream: false,
84
- };
85
-
86
- if (tools) result.tools = tools;
87
-
88
- // generationConfig OpenAI params
89
- const gc = body.generationConfig || {};
90
- if (gc.maxOutputTokens !== undefined) result.max_tokens = gc.maxOutputTokens;
91
- if (gc.temperature !== undefined) result.temperature = gc.temperature;
92
- if (gc.topP !== undefined) result.top_p = gc.topP;
93
- if (gc.stopSequences) result.stop = gc.stopSequences;
94
-
95
- return result;
96
- }
97
-
98
- // ==================== 响应转换 ====================
99
-
100
- function convertResponse(geminiBody) {
101
- const candidate = geminiBody.candidates?.[0];
102
- if (!candidate) {
103
- return { id: '', object: 'chat.completion', choices: [], usage: convertUsage(geminiBody.usageMetadata) };
104
- }
105
-
106
- const parts = candidate.content?.parts || [];
107
- const textParts = [];
108
- const toolCalls = [];
109
-
110
- for (const part of parts) {
111
- if (part.text) textParts.push(part.text);
112
- if (part.functionCall) {
113
- toolCalls.push({
114
- id: generateCallId(),
115
- type: 'function',
116
- function: { name: part.functionCall.name, arguments: JSON.stringify(part.functionCall.args || {}) },
117
- });
118
- }
119
- }
120
-
121
- const message = { role: 'assistant', content: textParts.join('') || null };
122
- if (toolCalls.length > 0) message.tool_calls = toolCalls;
123
-
124
- return {
125
- id: '',
126
- object: 'chat.completion',
127
- choices: [{
128
- index: 0,
129
- message,
130
- finish_reason: toolCalls.length > 0 ? 'tool_calls' : mapFinishReason(candidate.finishReason),
131
- }],
132
- usage: convertUsage(geminiBody.usageMetadata),
133
- };
134
- }
135
-
136
- function convertUsage(meta) {
137
- return {
138
- prompt_tokens: meta?.promptTokenCount || 0,
139
- completion_tokens: meta?.candidatesTokenCount || 0,
140
- total_tokens: meta?.totalTokenCount || 0,
141
- };
142
- }
143
-
144
- function mapFinishReason(reason) {
145
- if (!reason) return null;
146
- if (reason === 'STOP') return 'stop';
147
- if (reason === 'MAX_TOKENS') return 'length';
148
- if (reason === 'SAFETY') return 'content_filter';
149
- return 'stop';
150
- }
151
-
152
- // ==================== SSE 流式转换 ====================
153
-
154
- function createSSEConverter() {
155
- const state = { started: false, sentFunctionCall: new Map(), buffer: '' };
156
-
157
- return {
158
- convertChunk(chunkText) {
159
- let output = '';
160
- state.buffer += chunkText;
161
- const lines = state.buffer.split('\n');
162
- state.buffer = lines.pop() || '';
163
-
164
- for (const line of lines) {
165
- const trimmed = line.trim();
166
- if (!trimmed.startsWith('data: ')) continue;
167
- const dataStr = trimmed.slice(6);
168
- if (!dataStr) continue;
169
-
170
- let chunk;
171
- try { chunk = JSON.parse(dataStr); } catch { continue; }
172
-
173
- const candidate = chunk.candidates?.[0];
174
- if (!candidate) continue;
175
-
176
- const parts = candidate.content?.parts || [];
177
-
178
- // 首个 chunk 发送 role
179
- if (!state.started && (parts.length > 0)) {
180
- state.started = true;
181
- output += encodeOpenAIEvent({
182
- id: '',
183
- object: 'chat.completion.chunk',
184
- choices: [{ index: 0, delta: { role: 'assistant', content: null }, finish_reason: null }],
185
- });
186
- }
187
-
188
- // 文本增量
189
- const text = parts.filter(p => p.text).map(p => p.text).join('') || '';
190
- if (text) {
191
- output += encodeOpenAIEvent({
192
- id: '',
193
- object: 'chat.completion.chunk',
194
- choices: [{ index: 0, delta: { content: text }, finish_reason: null }],
195
- });
196
- }
197
-
198
- // functionCall 增量(去重,首次生成 ID 后缓存)
199
- for (const part of parts) {
200
- if (!part.functionCall) continue;
201
- const key = part.functionCall.name + (typeof part.functionCall.args === 'string' ? part.functionCall.args : JSON.stringify(part.functionCall.args || {}));
202
- if (state.sentFunctionCall.has(key)) continue;
203
- const callId = generateCallId();
204
- state.sentFunctionCall.set(key, callId);
205
- output += encodeOpenAIEvent({
206
- id: '',
207
- object: 'chat.completion.chunk',
208
- choices: [{
209
- index: 0,
210
- delta: {
211
- tool_calls: [{
212
- index: 0,
213
- id: callId,
214
- type: 'function',
215
- function: { name: part.functionCall.name, arguments: JSON.stringify(part.functionCall.args || {}) },
216
- }],
217
- },
218
- finish_reason: null,
219
- }],
220
- });
221
- }
222
-
223
- if (candidate.finishReason) {
224
- const reason = mapFinishReason(candidate.finishReason);
225
- output += encodeOpenAIEvent({
226
- id: '',
227
- object: 'chat.completion.chunk',
228
- choices: [{ index: 0, delta: {}, finish_reason: reason }],
229
- });
230
- output += encodeOpenAIDone();
231
- }
232
- }
233
-
234
- return output || null;
235
- },
236
- flush() { return ''; },
237
- };
238
- }
239
-
240
- module.exports = { convertRequest, convertResponse, createSSEConverter };
1
+ /**
2
+ * Gemini → OpenAI 协议转换
3
+ */
4
+
5
+ const { encodeOpenAIEvent, encodeOpenAIDone } = require('./sse-helpers');
6
+
7
+ function generateCallId() {
8
+ return 'call_' + Math.random().toString(36).slice(2, 14);
9
+ }
10
+
11
+ // ==================== 请求转换 ====================
12
+
13
+ function convertRequest(body, targetModel) {
14
+ const messages = [];
15
+ // 追踪 Gemini 函数名 → 生成的 tool_call id,用于后续 tool_result 转换
16
+ const nameToId = new Map();
17
+ const nameCount = new Map();
18
+
19
+ // system_instruction system message
20
+ const sysText = body.systemInstruction?.parts?.map(p => p.text || '').join('') || '';
21
+ if (sysText) {
22
+ messages.push({ role: 'system', content: sysText });
23
+ }
24
+
25
+ // tools: functionDeclarations → OpenAI tools
26
+ let tools = undefined;
27
+ if (body.tools && Array.isArray(body.tools)) {
28
+ const allDeclarations = [];
29
+ for (const tool of body.tools) {
30
+ if (tool.functionDeclarations) {
31
+ allDeclarations.push(...tool.functionDeclarations);
32
+ }
33
+ }
34
+ if (allDeclarations.length > 0) {
35
+ tools = allDeclarations.map(fd => ({
36
+ type: 'function',
37
+ function: {
38
+ name: fd.name,
39
+ description: fd.description || '',
40
+ parameters: fd.parameters || { type: 'object', properties: {} },
41
+ },
42
+ }));
43
+ }
44
+ }
45
+
46
+ // contents messages, functionCall/functionResponse → tool_calls/tool results
47
+ for (const msg of (body.contents || [])) {
48
+ const role = msg.role === 'model' ? 'assistant' : 'user';
49
+ const parts = msg.parts || [];
50
+
51
+ // 检查是否有 functionCall
52
+ const functionCalls = parts.filter(p => p.functionCall);
53
+ if (functionCalls.length > 0) {
54
+ const text = parts.filter(p => p.text).map(p => p.text).join('');
55
+ const tool_calls = functionCalls.map(fc => {
56
+ const fnName = fc.functionCall.name || 'unknown';
57
+ const count = nameCount.get(fnName) || 0;
58
+ nameCount.set(fnName, count + 1);
59
+ const callId = generateCallId();
60
+ nameToId.set(fnName + '#' + count, callId);
61
+ return {
62
+ id: callId,
63
+ type: 'function',
64
+ function: { name: fnName, arguments: JSON.stringify(fc.functionCall.args || {}) },
65
+ };
66
+ });
67
+ messages.push({ role: 'assistant', content: text || null, tool_calls });
68
+ continue;
69
+ }
70
+
71
+ // 检查是否有 functionResponse → tool messages
72
+ const functionResponses = parts.filter(p => p.functionResponse);
73
+ const respCount = new Map();
74
+ for (const fr of functionResponses) {
75
+ const fnName = fr.functionResponse.name || 'unknown';
76
+ const count = respCount.get(fnName) || 0;
77
+ respCount.set(fnName, count + 1);
78
+ const toolCallId = nameToId.get(fnName + '#' + count) || fnName;
79
+ messages.push({
80
+ role: 'tool',
81
+ tool_call_id: toolCallId,
82
+ content: typeof fr.functionResponse.response === 'string'
83
+ ? fr.functionResponse.response
84
+ : JSON.stringify(fr.functionResponse.response || {}),
85
+ });
86
+ }
87
+
88
+ // 纯文本 part(跳过已处理 functionCall/functionResponse 的消息)
89
+ const textParts = parts.filter(p => p.text).map(p => p.text).join('');
90
+ if (textParts && functionCalls.length === 0 && functionResponses.length === 0) {
91
+ messages.push({ role, content: textParts });
92
+ }
93
+ }
94
+
95
+ const result = {
96
+ model: targetModel,
97
+ messages,
98
+ stream: false,
99
+ };
100
+
101
+ if (tools) result.tools = tools;
102
+
103
+ // generationConfig OpenAI params
104
+ const gc = body.generationConfig || {};
105
+ if (gc.maxOutputTokens !== undefined) result.max_tokens = gc.maxOutputTokens;
106
+ if (gc.temperature !== undefined) result.temperature = gc.temperature;
107
+ if (gc.topP !== undefined) result.top_p = gc.topP;
108
+ if (gc.stopSequences) result.stop = gc.stopSequences;
109
+
110
+ return { ...result, nameToId };
111
+ }
112
+
113
+ // ==================== 响应转换 ====================
114
+
115
+ function convertResponse(geminiBody) {
116
+ const candidate = geminiBody.candidates?.[0];
117
+ if (!candidate) {
118
+ return { id: '', object: 'chat.completion', choices: [], usage: convertUsage(geminiBody.usageMetadata) };
119
+ }
120
+
121
+ const parts = candidate.content?.parts || [];
122
+ const textParts = [];
123
+ const toolCalls = [];
124
+
125
+ for (const part of parts) {
126
+ if (part.text) textParts.push(part.text);
127
+ if (part.functionCall) {
128
+ toolCalls.push({
129
+ id: generateCallId(),
130
+ type: 'function',
131
+ function: { name: part.functionCall.name, arguments: JSON.stringify(part.functionCall.args || {}) },
132
+ });
133
+ }
134
+ }
135
+
136
+ const message = { role: 'assistant', content: textParts.join('') || null };
137
+ if (toolCalls.length > 0) message.tool_calls = toolCalls;
138
+
139
+ return {
140
+ id: '',
141
+ object: 'chat.completion',
142
+ choices: [{
143
+ index: 0,
144
+ message,
145
+ finish_reason: toolCalls.length > 0 ? 'tool_calls' : mapFinishReason(candidate.finishReason),
146
+ }],
147
+ usage: convertUsage(geminiBody.usageMetadata),
148
+ };
149
+ }
150
+
151
+ function convertUsage(meta) {
152
+ return {
153
+ prompt_tokens: meta?.promptTokenCount || 0,
154
+ completion_tokens: meta?.candidatesTokenCount || 0,
155
+ total_tokens: meta?.totalTokenCount || 0,
156
+ };
157
+ }
158
+
159
+ function mapFinishReason(reason) {
160
+ if (!reason) return null;
161
+ if (reason === 'STOP') return 'stop';
162
+ if (reason === 'MAX_TOKENS') return 'length';
163
+ if (reason === 'SAFETY') return 'content_filter';
164
+ return 'stop';
165
+ }
166
+
167
+ // ==================== SSE 流式转换 ====================
168
+
169
+ function createSSEConverter() {
170
+ const state = { started: false, sentFunctionCall: new Map(), buffer: '' };
171
+
172
+ return {
173
+ convertChunk(chunkText) {
174
+ let output = '';
175
+ state.buffer += chunkText;
176
+ const lines = state.buffer.split('\n');
177
+ state.buffer = lines.pop() || '';
178
+
179
+ for (const line of lines) {
180
+ const trimmed = line.trim();
181
+ if (!trimmed.startsWith('data: ')) continue;
182
+ const dataStr = trimmed.slice(6);
183
+ if (!dataStr) continue;
184
+
185
+ let chunk;
186
+ try { chunk = JSON.parse(dataStr); } catch { continue; }
187
+
188
+ const candidate = chunk.candidates?.[0];
189
+ if (!candidate) continue;
190
+
191
+ const parts = candidate.content?.parts || [];
192
+
193
+ // 首个 chunk 发送 role
194
+ if (!state.started && (parts.length > 0)) {
195
+ state.started = true;
196
+ output += encodeOpenAIEvent({
197
+ id: '',
198
+ object: 'chat.completion.chunk',
199
+ choices: [{ index: 0, delta: { role: 'assistant', content: null }, finish_reason: null }],
200
+ });
201
+ }
202
+
203
+ // 文本增量
204
+ const text = parts.filter(p => p.text).map(p => p.text).join('') || '';
205
+ if (text) {
206
+ output += encodeOpenAIEvent({
207
+ id: '',
208
+ object: 'chat.completion.chunk',
209
+ choices: [{ index: 0, delta: { content: text }, finish_reason: null }],
210
+ });
211
+ }
212
+
213
+ // functionCall 增量(去重,首次生成 ID 后缓存)
214
+ for (const part of parts) {
215
+ if (!part.functionCall) continue;
216
+ const key = part.functionCall.name + (typeof part.functionCall.args === 'string' ? part.functionCall.args : JSON.stringify(part.functionCall.args || {}));
217
+ if (state.sentFunctionCall.has(key)) continue;
218
+ const callId = generateCallId();
219
+ state.sentFunctionCall.set(key, callId);
220
+ output += encodeOpenAIEvent({
221
+ id: '',
222
+ object: 'chat.completion.chunk',
223
+ choices: [{
224
+ index: 0,
225
+ delta: {
226
+ tool_calls: [{
227
+ index: 0,
228
+ id: callId,
229
+ type: 'function',
230
+ function: { name: part.functionCall.name, arguments: JSON.stringify(part.functionCall.args || {}) },
231
+ }],
232
+ },
233
+ finish_reason: null,
234
+ }],
235
+ });
236
+ }
237
+
238
+ if (candidate.finishReason) {
239
+ const reason = mapFinishReason(candidate.finishReason);
240
+ output += encodeOpenAIEvent({
241
+ id: '',
242
+ object: 'chat.completion.chunk',
243
+ choices: [{ index: 0, delta: {}, finish_reason: reason }],
244
+ });
245
+ output += encodeOpenAIDone();
246
+ }
247
+ }
248
+
249
+ return output || null;
250
+ },
251
+ flush() { return ''; },
252
+ };
253
+ }
254
+
255
+ module.exports = { convertRequest, convertResponse, createSSEConverter };