@mmmbuto/zai-codex-bridge 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/server.js +69 -26
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mmmbuto/zai-codex-bridge",
3
- "version": "0.1.4",
3
+ "version": "0.1.6",
4
4
  "description": "Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format for Codex",
5
5
  "main": "src/server.js",
6
6
  "bin": {
package/src/server.js CHANGED
@@ -71,31 +71,40 @@ function translateResponsesToChat(request) {
71
71
  });
72
72
  }
73
73
 
74
- // Add messages from input array
75
- if (request.input && Array.isArray(request.input)) {
76
- for (const item of request.input) {
77
- const msg = {
78
- role: item.role,
79
- content: flattenContent(item.content)
80
- };
81
-
82
- // Handle tool calls if present
83
- if (item.tool_calls && Array.isArray(item.tool_calls)) {
84
- msg.tool_calls = item.tool_calls;
85
- }
74
+ // Handle input: can be string (simple user message) or array (message history)
75
+ if (request.input) {
76
+ if (typeof request.input === 'string') {
77
+ // Simple string input -> user message
78
+ messages.push({
79
+ role: 'user',
80
+ content: request.input
81
+ });
82
+ } else if (Array.isArray(request.input)) {
83
+ // Array of message objects
84
+ for (const item of request.input) {
85
+ const msg = {
86
+ role: item.role,
87
+ content: flattenContent(item.content)
88
+ };
89
+
90
+ // Handle tool calls if present
91
+ if (item.tool_calls && Array.isArray(item.tool_calls)) {
92
+ msg.tool_calls = item.tool_calls;
93
+ }
86
94
 
87
- // Handle tool call ID for tool responses
88
- if (item.tool_call_id) {
89
- msg.tool_call_id = item.tool_call_id;
90
- }
95
+ // Handle tool call ID for tool responses
96
+ if (item.tool_call_id) {
97
+ msg.tool_call_id = item.tool_call_id;
98
+ }
91
99
 
92
- messages.push(msg);
100
+ messages.push(msg);
101
+ }
93
102
  }
94
103
  }
95
104
 
96
105
  // Build chat request
97
106
  const chatRequest = {
98
- model: request.model,
107
+ model: request.model || 'glm-4.7', // Default to glm-4.7 (lowercase)
99
108
  messages: messages,
100
109
  stream: request.stream !== false // default true
101
110
  };
@@ -197,7 +206,9 @@ async function makeUpstreamRequest(path, body, headers) {
197
206
  path: path,
198
207
  cleanPath: cleanPath,
199
208
  base: ZAI_BASE_URL,
200
- hasAuth: !!upstreamHeaders.Authorization
209
+ hasAuth: !!upstreamHeaders.Authorization,
210
+ bodyKeys: Object.keys(body),
211
+ bodyPreview: JSON.stringify(body).substring(0, 200)
201
212
  });
202
213
 
203
214
  const response = await fetch(url, {
@@ -212,22 +223,34 @@ async function makeUpstreamRequest(path, body, headers) {
212
223
  /**
213
224
  * Handle streaming response from Z.AI
214
225
  */
215
- async function* streamChatToResponses(stream, res) {
226
+ async function streamChatToResponses(stream, res) {
216
227
  const decoder = new TextDecoder();
217
228
  let buffer = '';
229
+ let chunkCount = 0;
230
+
231
+ log('debug', 'Starting to process stream');
218
232
 
219
233
  for await (const chunk of stream) {
220
234
  buffer += decoder.decode(chunk, { stream: true });
221
235
  const lines = buffer.split('\n');
222
236
  buffer = lines.pop() || '';
223
237
 
238
+ chunkCount++;
239
+
224
240
  for (const line of lines) {
225
- if (!line.trim() || !line.startsWith('data: ')) continue;
241
+ if (!line.trim() || !line.startsWith('data: ')) {
242
+ if (line.trim() && !line.startsWith(':')) {
243
+ log('debug', 'Non-data line:', line.substring(0, 50));
244
+ }
245
+ continue;
246
+ }
226
247
 
227
248
  const data = line.slice(6).trim();
249
+ log('debug', 'SSE data:', data.substring(0, 100));
228
250
 
229
251
  // Check for stream end
230
252
  if (data === '[DONE]') {
253
+ log('debug', 'Stream end received');
231
254
  res.write(`event: completed\n`);
232
255
  res.write(`data: ${JSON.stringify({ status: 'completed' })}\n\n`);
233
256
  return;
@@ -235,17 +258,30 @@ async function* streamChatToResponses(stream, res) {
235
258
 
236
259
  try {
237
260
  const parsed = JSON.parse(data);
261
+ log('debug', 'Parsed SSE:', JSON.stringify(parsed).substring(0, 150));
262
+
238
263
  const delta = parsed.choices?.[0]?.delta;
239
264
 
240
- if (delta?.content) {
265
+ // Z.AI uses reasoning_content instead of content
266
+ const content = delta?.content || delta?.reasoning_content || '';
267
+
268
+ if (content) {
269
+ log('debug', 'Writing delta:', content.substring(0, 30));
241
270
  res.write(`event: output.text.delta\n`);
242
- res.write(`data: ${JSON.stringify({ value: delta.content })}\n\n`);
271
+ res.write(`data: ${JSON.stringify({ value: content })}\n\n`);
243
272
  }
244
273
  } catch (e) {
245
- log('warn', 'Failed to parse SSE chunk:', e.message);
274
+ log('warn', 'Failed to parse SSE chunk:', e.message, 'data:', data.substring(0, 100));
246
275
  }
247
276
  }
277
+
278
+ if (chunkCount > 100) {
279
+ log('warn', 'Too many chunks, possible loop');
280
+ return;
281
+ }
248
282
  }
283
+
284
+ log('debug', 'Stream ended naturally');
249
285
  }
250
286
 
251
287
  /**
@@ -278,7 +314,8 @@ async function handlePostRequest(req, res) {
278
314
  log('info', 'Incoming request:', {
279
315
  path,
280
316
  format: detectFormat(request),
281
- model: request.model
317
+ model: request.model,
318
+ authHeader: req.headers['authorization'] || req.headers['Authorization'] || 'none'
282
319
  });
283
320
 
284
321
  let upstreamBody;
@@ -321,13 +358,19 @@ async function handlePostRequest(req, res) {
321
358
 
322
359
  // Handle streaming response
323
360
  if (upstreamBody.stream) {
361
+ log('info', 'Starting streaming response');
324
362
  res.writeHead(200, {
325
363
  'Content-Type': 'text/event-stream; charset=utf-8',
326
364
  'Cache-Control': 'no-cache',
327
365
  'Connection': 'keep-alive'
328
366
  });
329
367
 
330
- await streamChatToResponses(upstreamResponse.body, res);
368
+ try {
369
+ await streamChatToResponses(upstreamResponse.body, res);
370
+ log('info', 'Streaming completed');
371
+ } catch (e) {
372
+ log('error', 'Streaming error:', e);
373
+ }
331
374
  res.end();
332
375
  } else {
333
376
  // Non-streaming response