@mmmbuto/zai-codex-bridge 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/server.js +42 -7
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mmmbuto/zai-codex-bridge",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "description": "Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format for Codex",
5
5
  "main": "src/server.js",
6
6
  "bin": {
package/src/server.js CHANGED
@@ -80,10 +80,24 @@ function translateResponsesToChat(request) {
80
80
  content: request.input
81
81
  });
82
82
  } else if (Array.isArray(request.input)) {
83
- // Array of message objects
83
+ // Array of ResponseItem objects - filter only Message items with role
84
84
  for (const item of request.input) {
85
+ // Only process items with a 'role' field (Message items)
86
+ // Skip Reasoning, FunctionCall, LocalShellCall, etc.
87
+ if (!item.role) continue;
88
+
89
+ // Map non-standard roles to Z.AI-compatible roles
90
+ // Z.AI accepts: system, user, assistant
91
+ let role = item.role;
92
+ if (role === 'developer') {
93
+ role = 'user'; // Map developer to user
94
+ } else if (role !== 'system' && role !== 'user' && role !== 'assistant') {
95
+ // Skip any other non-standard roles
96
+ continue;
97
+ }
98
+
85
99
  const msg = {
86
- role: item.role,
100
+ role: role,
87
101
  content: flattenContent(item.content)
88
102
  };
89
103
 
@@ -103,8 +117,10 @@ function translateResponsesToChat(request) {
103
117
  }
104
118
 
105
119
  // Build chat request
120
+ // Normalize model name to lowercase (Z.AI requirement)
121
+ const model = (request.model || 'glm-4.7').toLowerCase();
106
122
  const chatRequest = {
107
- model: request.model || 'glm-4.7', // Default to glm-4.7 (lowercase)
123
+ model: model,
108
124
  messages: messages,
109
125
  stream: request.stream !== false // default true
110
126
  };
@@ -125,7 +141,21 @@ function translateResponsesToChat(request) {
125
141
  }
126
142
 
127
143
  if (request.tools && Array.isArray(request.tools)) {
128
- chatRequest.tools = request.tools;
144
+ // Filter out tools with null or empty function
145
+ chatRequest.tools = request.tools.filter(tool => {
146
+ if (tool.type === 'function') {
147
+ // Check if function has required fields
148
+ return tool.function && typeof tool.function === 'object' &&
149
+ tool.function.name && tool.function.name.length > 0 &&
150
+ tool.function.parameters !== undefined && tool.function.parameters !== null;
151
+ }
152
+ // Keep non-function tools (if any)
153
+ return true;
154
+ });
155
+ // Only add tools array if there are valid tools
156
+ if (chatRequest.tools.length === 0) {
157
+ delete chatRequest.tools;
158
+ }
129
159
  }
130
160
 
131
161
  if (request.tool_choice) {
@@ -208,7 +238,9 @@ async function makeUpstreamRequest(path, body, headers) {
208
238
  base: ZAI_BASE_URL,
209
239
  hasAuth: !!upstreamHeaders.Authorization,
210
240
  bodyKeys: Object.keys(body),
211
- bodyPreview: JSON.stringify(body).substring(0, 200)
241
+ bodyPreview: JSON.stringify(body).substring(0, 800),
242
+ messagesCount: body.messages?.length || 0,
243
+ allRoles: body.messages?.map(m => m.role) || []
212
244
  });
213
245
 
214
246
  const response = await fetch(url, {
@@ -227,6 +259,7 @@ async function streamChatToResponses(stream, res) {
227
259
  const decoder = new TextDecoder();
228
260
  let buffer = '';
229
261
  let chunkCount = 0;
262
+ let deltaCount = 0;
230
263
 
231
264
  log('debug', 'Starting to process stream');
232
265
 
@@ -250,9 +283,10 @@ async function streamChatToResponses(stream, res) {
250
283
 
251
284
  // Check for stream end
252
285
  if (data === '[DONE]') {
253
- log('debug', 'Stream end received');
286
+ log('info', `Stream end received - wrote ${deltaCount} deltas total`);
254
287
  res.write(`event: completed\n`);
255
288
  res.write(`data: ${JSON.stringify({ status: 'completed' })}\n\n`);
289
+ log('info', 'Sent completed event');
256
290
  return;
257
291
  }
258
292
 
@@ -266,6 +300,7 @@ async function streamChatToResponses(stream, res) {
266
300
  const content = delta?.content || delta?.reasoning_content || '';
267
301
 
268
302
  if (content) {
303
+ deltaCount++;
269
304
  log('debug', 'Writing delta:', content.substring(0, 30));
270
305
  res.write(`event: output.text.delta\n`);
271
306
  res.write(`data: ${JSON.stringify({ value: content })}\n\n`);
@@ -281,7 +316,7 @@ async function streamChatToResponses(stream, res) {
281
316
  }
282
317
  }
283
318
 
284
- log('debug', 'Stream ended naturally');
319
+ log('info', `Stream ended naturally - wrote ${deltaCount} deltas`);
285
320
  }
286
321
 
287
322
  /**