@mmmbuto/zai-codex-bridge 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/server.js +43 -9
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mmmbuto/zai-codex-bridge",
3
- "version": "0.1.5",
3
+ "version": "0.1.6",
4
4
  "description": "Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format for Codex",
5
5
  "main": "src/server.js",
6
6
  "bin": {
package/src/server.js CHANGED
@@ -104,7 +104,7 @@ function translateResponsesToChat(request) {
104
104
 
105
105
  // Build chat request
106
106
  const chatRequest = {
107
- model: request.model,
107
+ model: request.model || 'glm-4.7', // Default to glm-4.7 (lowercase)
108
108
  messages: messages,
109
109
  stream: request.stream !== false // default true
110
110
  };
@@ -206,7 +206,9 @@ async function makeUpstreamRequest(path, body, headers) {
206
206
  path: path,
207
207
  cleanPath: cleanPath,
208
208
  base: ZAI_BASE_URL,
209
- hasAuth: !!upstreamHeaders.Authorization
209
+ hasAuth: !!upstreamHeaders.Authorization,
210
+ bodyKeys: Object.keys(body),
211
+ bodyPreview: JSON.stringify(body).substring(0, 200)
210
212
  });
211
213
 
212
214
  const response = await fetch(url, {
@@ -221,22 +223,34 @@ async function makeUpstreamRequest(path, body, headers) {
221
223
  /**
222
224
  * Handle streaming response from Z.AI
223
225
  */
224
- async function* streamChatToResponses(stream, res) {
226
+ async function streamChatToResponses(stream, res) {
225
227
  const decoder = new TextDecoder();
226
228
  let buffer = '';
229
+ let chunkCount = 0;
230
+
231
+ log('debug', 'Starting to process stream');
227
232
 
228
233
  for await (const chunk of stream) {
229
234
  buffer += decoder.decode(chunk, { stream: true });
230
235
  const lines = buffer.split('\n');
231
236
  buffer = lines.pop() || '';
232
237
 
238
+ chunkCount++;
239
+
233
240
  for (const line of lines) {
234
- if (!line.trim() || !line.startsWith('data: ')) continue;
241
+ if (!line.trim() || !line.startsWith('data: ')) {
242
+ if (line.trim() && !line.startsWith(':')) {
243
+ log('debug', 'Non-data line:', line.substring(0, 50));
244
+ }
245
+ continue;
246
+ }
235
247
 
236
248
  const data = line.slice(6).trim();
249
+ log('debug', 'SSE data:', data.substring(0, 100));
237
250
 
238
251
  // Check for stream end
239
252
  if (data === '[DONE]') {
253
+ log('debug', 'Stream end received');
240
254
  res.write(`event: completed\n`);
241
255
  res.write(`data: ${JSON.stringify({ status: 'completed' })}\n\n`);
242
256
  return;
@@ -244,17 +258,30 @@ async function* streamChatToResponses(stream, res) {
244
258
 
245
259
  try {
246
260
  const parsed = JSON.parse(data);
261
+ log('debug', 'Parsed SSE:', JSON.stringify(parsed).substring(0, 150));
262
+
247
263
  const delta = parsed.choices?.[0]?.delta;
248
264
 
249
- if (delta?.content) {
265
+ // Z.AI uses reasoning_content instead of content
266
+ const content = delta?.content || delta?.reasoning_content || '';
267
+
268
+ if (content) {
269
+ log('debug', 'Writing delta:', content.substring(0, 30));
250
270
  res.write(`event: output.text.delta\n`);
251
- res.write(`data: ${JSON.stringify({ value: delta.content })}\n\n`);
271
+ res.write(`data: ${JSON.stringify({ value: content })}\n\n`);
252
272
  }
253
273
  } catch (e) {
254
- log('warn', 'Failed to parse SSE chunk:', e.message);
274
+ log('warn', 'Failed to parse SSE chunk:', e.message, 'data:', data.substring(0, 100));
255
275
  }
256
276
  }
277
+
278
+ if (chunkCount > 100) {
279
+ log('warn', 'Too many chunks, possible loop');
280
+ return;
281
+ }
257
282
  }
283
+
284
+ log('debug', 'Stream ended naturally');
258
285
  }
259
286
 
260
287
  /**
@@ -287,7 +314,8 @@ async function handlePostRequest(req, res) {
287
314
  log('info', 'Incoming request:', {
288
315
  path,
289
316
  format: detectFormat(request),
290
- model: request.model
317
+ model: request.model,
318
+ authHeader: req.headers['authorization'] || req.headers['Authorization'] || 'none'
291
319
  });
292
320
 
293
321
  let upstreamBody;
@@ -330,13 +358,19 @@ async function handlePostRequest(req, res) {
330
358
 
331
359
  // Handle streaming response
332
360
  if (upstreamBody.stream) {
361
+ log('info', 'Starting streaming response');
333
362
  res.writeHead(200, {
334
363
  'Content-Type': 'text/event-stream; charset=utf-8',
335
364
  'Cache-Control': 'no-cache',
336
365
  'Connection': 'keep-alive'
337
366
  });
338
367
 
339
- await streamChatToResponses(upstreamResponse.body, res);
368
+ try {
369
+ await streamChatToResponses(upstreamResponse.body, res);
370
+ log('info', 'Streaming completed');
371
+ } catch (e) {
372
+ log('error', 'Streaming error:', e);
373
+ }
340
374
  res.end();
341
375
  } else {
342
376
  // Non-streaming response