@mmmbuto/zai-codex-bridge 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/server.js +45 -9
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mmmbuto/zai-codex-bridge",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "description": "Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format for Codex",
5
5
  "main": "src/server.js",
6
6
  "bin": {
package/src/server.js CHANGED
@@ -103,8 +103,10 @@ function translateResponsesToChat(request) {
103
103
  }
104
104
 
105
105
  // Build chat request
106
+ // Normalize model name to lowercase (Z.AI requirement)
107
+ const model = (request.model || 'glm-4.7').toLowerCase();
106
108
  const chatRequest = {
107
- model: request.model,
109
+ model: model,
108
110
  messages: messages,
109
111
  stream: request.stream !== false // default true
110
112
  };
@@ -206,7 +208,9 @@ async function makeUpstreamRequest(path, body, headers) {
206
208
  path: path,
207
209
  cleanPath: cleanPath,
208
210
  base: ZAI_BASE_URL,
209
- hasAuth: !!upstreamHeaders.Authorization
211
+ hasAuth: !!upstreamHeaders.Authorization,
212
+ bodyKeys: Object.keys(body),
213
+ bodyPreview: JSON.stringify(body).substring(0, 200)
210
214
  });
211
215
 
212
216
  const response = await fetch(url, {
@@ -221,22 +225,34 @@ async function makeUpstreamRequest(path, body, headers) {
221
225
  /**
222
226
  * Handle streaming response from Z.AI
223
227
  */
224
- async function* streamChatToResponses(stream, res) {
228
+ async function streamChatToResponses(stream, res) {
225
229
  const decoder = new TextDecoder();
226
230
  let buffer = '';
231
+ let chunkCount = 0;
232
+
233
+ log('debug', 'Starting to process stream');
227
234
 
228
235
  for await (const chunk of stream) {
229
236
  buffer += decoder.decode(chunk, { stream: true });
230
237
  const lines = buffer.split('\n');
231
238
  buffer = lines.pop() || '';
232
239
 
240
+ chunkCount++;
241
+
233
242
  for (const line of lines) {
234
- if (!line.trim() || !line.startsWith('data: ')) continue;
243
+ if (!line.trim() || !line.startsWith('data: ')) {
244
+ if (line.trim() && !line.startsWith(':')) {
245
+ log('debug', 'Non-data line:', line.substring(0, 50));
246
+ }
247
+ continue;
248
+ }
235
249
 
236
250
  const data = line.slice(6).trim();
251
+ log('debug', 'SSE data:', data.substring(0, 100));
237
252
 
238
253
  // Check for stream end
239
254
  if (data === '[DONE]') {
255
+ log('debug', 'Stream end received');
240
256
  res.write(`event: completed\n`);
241
257
  res.write(`data: ${JSON.stringify({ status: 'completed' })}\n\n`);
242
258
  return;
@@ -244,17 +260,30 @@ async function* streamChatToResponses(stream, res) {
244
260
 
245
261
  try {
246
262
  const parsed = JSON.parse(data);
263
+ log('debug', 'Parsed SSE:', JSON.stringify(parsed).substring(0, 150));
264
+
247
265
  const delta = parsed.choices?.[0]?.delta;
248
266
 
249
- if (delta?.content) {
267
+ // Z.AI uses reasoning_content instead of content
268
+ const content = delta?.content || delta?.reasoning_content || '';
269
+
270
+ if (content) {
271
+ log('debug', 'Writing delta:', content.substring(0, 30));
250
272
  res.write(`event: output.text.delta\n`);
251
- res.write(`data: ${JSON.stringify({ value: delta.content })}\n\n`);
273
+ res.write(`data: ${JSON.stringify({ value: content })}\n\n`);
252
274
  }
253
275
  } catch (e) {
254
- log('warn', 'Failed to parse SSE chunk:', e.message);
276
+ log('warn', 'Failed to parse SSE chunk:', e.message, 'data:', data.substring(0, 100));
255
277
  }
256
278
  }
279
+
280
+ if (chunkCount > 100) {
281
+ log('warn', 'Too many chunks, possible loop');
282
+ return;
283
+ }
257
284
  }
285
+
286
+ log('debug', 'Stream ended naturally');
258
287
  }
259
288
 
260
289
  /**
@@ -287,7 +316,8 @@ async function handlePostRequest(req, res) {
287
316
  log('info', 'Incoming request:', {
288
317
  path,
289
318
  format: detectFormat(request),
290
- model: request.model
319
+ model: request.model,
320
+ authHeader: req.headers['authorization'] || req.headers['Authorization'] || 'none'
291
321
  });
292
322
 
293
323
  let upstreamBody;
@@ -330,13 +360,19 @@ async function handlePostRequest(req, res) {
330
360
 
331
361
  // Handle streaming response
332
362
  if (upstreamBody.stream) {
363
+ log('info', 'Starting streaming response');
333
364
  res.writeHead(200, {
334
365
  'Content-Type': 'text/event-stream; charset=utf-8',
335
366
  'Cache-Control': 'no-cache',
336
367
  'Connection': 'keep-alive'
337
368
  });
338
369
 
339
- await streamChatToResponses(upstreamResponse.body, res);
370
+ try {
371
+ await streamChatToResponses(upstreamResponse.body, res);
372
+ log('info', 'Streaming completed');
373
+ } catch (e) {
374
+ log('error', 'Streaming error:', e);
375
+ }
340
376
  res.end();
341
377
  } else {
342
378
  // Non-streaming response