@mmmbuto/zai-codex-bridge 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Davide A. Guglielmi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,294 @@
1
+ # 🌉 Z.AI Codex Bridge
2
+
3
+ > **Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format**
4
+
5
+ [![npm](https://img.shields.io/npm/v/@mmmbuto/zai-codex-bridge?style=flat-square&logo=npm)](https://www.npmjs.com/package/@mmmbuto/zai-codex-bridge)
6
+ [![node](https://img.shields.io/node/v/@mmmbuto/zai-codex-bridge?style=flat-square&logo=node.js)](https://github.com/mmmbuto/zai-codex-bridge)
7
+ [![license](https://img.shields.io/npm/l/@mmmbuto/zai-codex-bridge?style=flat-square)](LICENSE)
8
+
9
+ ---
10
+
11
+ ## What It Solves
12
+
13
+ Codex uses the newer OpenAI **Responses API** format (with `instructions` and `input` fields), but Z.AI only supports the legacy **Chat Completions** format (with `messages` array).
14
+
15
+ This proxy:
16
+ 1. Accepts Codex requests in **Responses format**
17
+ 2. Translates them to **Chat format**
18
+ 3. Forwards to Z.AI
19
+ 4. Translates the response back to **Responses format**
20
+ 5. Returns to Codex
21
+
22
+ **Without this proxy**, Codex fails with error from Z.AI:
23
+ ```json
24
+ {"error":{"code":"1214","message":"Incorrect role information"}}
25
+ ```
26
+
27
+ ---
28
+
29
+ ## Features
30
+
31
+ ✅ **Transparent translation** between Responses and Chat formats
32
+ ✅ **Streaming support** with SSE (Server-Sent Events)
33
+ ✅ **Zero dependencies** - uses Node.js built-ins only
34
+ ✅ **Auto-starting ZSH functions** - proxy runs only when Codex runs
35
+ ✅ **Health checks** at `/health` endpoint
36
+ ✅ **Configurable** via CLI flags and environment variables
37
+
38
+ ---
39
+
40
+ ## Requirements
41
+
42
+ - **Node.js**: 18.0.0 or higher (for native `fetch`)
43
+ - **Platform**: Linux, macOS, Windows (WSL), Termux (ARM64)
44
+ - **Port**: 31415 (default, configurable)
45
+
46
+ ---
47
+
48
+ ## Installation
49
+
50
+ ### Global Install (Recommended)
51
+
52
+ ```bash
53
+ npm install -g @mmmbuto/zai-codex-bridge
54
+ ```
55
+
56
+ ### Local Install
57
+
58
+ ```bash
59
+ npm install @mmmbuto/zai-codex-bridge
60
+ npx zai-codex-bridge
61
+ ```
62
+
63
+ ### From Source
64
+
65
+ ```bash
66
+ git clone https://github.com/DioNanos/zai-codex-bridge.git
67
+ cd zai-codex-bridge
68
+ npm install
69
+ npm link
70
+ ```
71
+
72
+ ---
73
+
74
+ ## Quick Start
75
+
76
+ ### 1. Configure Codex
77
+
78
+ Add to `~/.codex/config.toml`:
79
+
80
+ ```toml
81
+ [model_providers.zai_glm_proxy]
82
+ name = "ZAI GLM via local proxy"
83
+ base_url = "http://127.0.0.1:31415/v1"
84
+ env_key = "OPENAI_API_KEY"
85
+ wire_api = "responses"
86
+ stream_idle_timeout_ms = 3000000
87
+ ```
88
+
89
+ ### 2. Add ZSH Functions
90
+
91
+ Add to `~/.zshrc`:
92
+
93
+ ```bash
94
+ _codex_glm_with_proxy () {
95
+ local KEY="$1"; shift
96
+ local HOST="127.0.0.1"
97
+ local PORT="31415"
98
+ local HEALTH="http://${HOST}:${PORT}/health"
99
+ local LOGFILE="${TMPDIR:-/tmp}/zai-codex-bridge.log"
100
+ local PROXY_PID=""
101
+
102
+ # Start proxy only if not responding
103
+ if ! curl -fsS "$HEALTH" >/dev/null 2>&1; then
104
+ zai-codex-bridge --host "$HOST" --port "$PORT" --log-level info >"$LOGFILE" 2>&1 &
105
+ PROXY_PID=$!
106
+ trap '[[ -n "$PROXY_PID" ]] && kill "$PROXY_PID" 2>/dev/null' EXIT INT TERM
107
+ for i in {1..40}; do
108
+ curl -fsS "$HEALTH" >/dev/null 2>&1 && break
109
+ sleep 0.05
110
+ done
111
+ fi
112
+
113
+ OPENAI_API_KEY="$KEY" codex -m "GLM-4.7" -c model_provider="zai_glm_proxy" "$@"
114
+ }
115
+
116
+ codex-glm-a () { _codex_glm_with_proxy "$ZAI_API_KEY_A" "$@"; }
117
+ codex-glm-p () { _codex_glm_with_proxy "$ZAI_API_KEY_P" "$@"; }
118
+ ```
119
+
120
+ ### 3. Set API Keys
121
+
122
+ ```bash
123
+ export ZAI_API_KEY_A="sk-your-key-here"
124
+ export ZAI_API_KEY_P="sk-your-key-here"
125
+ ```
126
+
127
+ ### 4. Use It
128
+
129
+ ```bash
130
+ source ~/.zshrc
131
+ codex-glm-a
132
+ ```
133
+
134
+ ---
135
+
136
+ ## CLI Usage
137
+
138
+ ```bash
139
+ # Start with defaults (http://127.0.0.1:31415)
140
+ zai-codex-bridge
141
+
142
+ # Custom port
143
+ zai-codex-bridge --port 8080
144
+
145
+ # Enable debug logging
146
+ zai-codex-bridge --log-level debug
147
+
148
+ # Custom Z.AI endpoint
149
+ zai-codex-bridge --zai-base-url https://custom.z.ai/v1
150
+
151
+ # Show help
152
+ zai-codex-bridge --help
153
+ ```
154
+
155
+ ### Environment Variables
156
+
157
+ ```bash
158
+ export PORT=31415
159
+ export HOST=127.0.0.1
160
+ export ZAI_BASE_URL=https://api.z.ai/api/coding/paas/v4
161
+ export LOG_LEVEL=info
162
+ ```
163
+
164
+ ---
165
+
166
+ ## API Endpoints
167
+
168
+ ### `POST /responses`
169
+ Accepts OpenAI Responses API format, translates to Chat, returns Responses format.
170
+
171
+ ### `POST /v1/responses`
172
+ Same as `/responses` (for compatibility with Codex's path structure).
173
+
174
+ ### `GET /health`
175
+ Health check endpoint.
176
+
177
+ ---
178
+
179
+ ## Testing
180
+
181
+ ```bash
182
+ # Set your Z.AI API key
183
+ export ZAI_API_KEY="sk-your-key"
184
+
185
+ # Run test suite
186
+ npm run test:curl
187
+ ```
188
+
189
+ ### Manual Test
190
+
191
+ ```bash
192
+ # Start proxy
193
+ zai-codex-bridge &
194
+
195
+ # Test health
196
+ curl http://127.0.0.1:31415/health
197
+
198
+ # Test Responses API
199
+ curl -X POST http://127.0.0.1:31415/v1/responses \
200
+ -H "Content-Type: application/json" \
201
+ -H "Authorization: Bearer $ZAI_API_KEY" \
202
+ -d '{
203
+ "model": "GLM-4.7",
204
+ "instructions": "Be brief",
205
+ "input": [{"role": "user", "content": "What is 2+2?"}],
206
+ "stream": false
207
+ }'
208
+ ```
209
+
210
+ ---
211
+
212
+ ## Architecture
213
+
214
+ ```
215
+ Codex (Responses API)
216
+
217
+ Proxy (localhost:31415)
218
+
219
+ Translation: Responses → Chat
220
+
221
+ Z.AI (Chat Completions)
222
+
223
+ Translation: Chat → Responses
224
+
225
+ Codex
226
+ ```
227
+
228
+ ---
229
+
230
+ ## Documentation
231
+
232
+ 📚 **Complete Guide**: [docs/guide.md](docs/guide.md)
233
+
234
+ The guide includes:
235
+ - Detailed setup instructions
236
+ - Usage examples
237
+ - Troubleshooting
238
+ - Function reference
239
+ - Advanced configuration
240
+
241
+ ---
242
+
243
+ ## Troubleshooting
244
+
245
+ ### Port Already in Use
246
+
247
+ ```bash
248
+ lsof -i :31415
249
+ kill -9 <PID>
250
+ ```
251
+
252
+ ### Connection Refused
253
+
254
+ ```bash
255
+ curl http://127.0.0.1:31415/health
256
+ zai-codex-bridge --log-level debug
257
+ ```
258
+
259
+ ### Z.AI Errors
260
+
261
+ Verify API key and test Z.AI directly:
262
+ ```bash
263
+ curl -X POST https://api.z.ai/api/coding/paas/v4/chat/completions \
264
+ -H "Authorization: Bearer $ZAI_API_KEY" \
265
+ -H "Content-Type: application/json" \
266
+ -d '{"model":"GLM-4.7","messages":[{"role":"user","content":"test"}]}'
267
+ ```
268
+
269
+ ---
270
+
271
+ ## Development
272
+
273
+ ```bash
274
+ # Clone repo
275
+ git clone https://github.com/DioNanos/zai-codex-bridge.git
276
+ cd zai-codex-bridge
277
+
278
+ # Run in development
279
+ npm start
280
+
281
+ # Run tests
282
+ npm run test:curl
283
+
284
+ # Link globally
285
+ npm link
286
+ ```
287
+
288
+ ---
289
+
290
+ ## License
291
+
292
+ MIT License - Copyright (c) 2026 Davide A. Guglielmi
293
+
294
+ See [LICENSE](LICENSE) for details.
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * zai-codex-bridge CLI
5
+ *
6
+ * Usage: zai-codex-bridge [options]
7
+ *
8
+ * Options:
9
+ * --port <number> Port to listen on (default: 31415)
10
+ * --host <string> Host to bind to (default: 127.0.0.1)
11
+ * --zai-base-url <url> Z.AI base URL (default: https://api.z.ai/api/coding/paas/v4)
12
+ * --log-level <level> Log level: debug|info (default: info)
13
+ * -h, --help Show this help
14
+ */
15
+
16
+ const path = require('path');
17
+ const fs = require('fs');
18
+
19
+ // Parse CLI arguments
20
+ function parseArgs(argv) {
21
+ const args = {
22
+ port: null,
23
+ host: null,
24
+ zaiBaseUrl: null,
25
+ logLevel: null,
26
+ help: false
27
+ };
28
+
29
+ for (let i = 2; i < argv.length; i++) {
30
+ const arg = argv[i];
31
+
32
+ if (arg === '-h' || arg === '--help') {
33
+ args.help = true;
34
+ continue;
35
+ }
36
+
37
+ if (arg === '--port' && i + 1 < argv.length) {
38
+ args.port = parseInt(argv[++i], 10);
39
+ continue;
40
+ }
41
+
42
+ if (arg === '--host' && i + 1 < argv.length) {
43
+ args.host = argv[++i];
44
+ continue;
45
+ }
46
+
47
+ if (arg === '--zai-base-url' && i + 1 < argv.length) {
48
+ args.zaiBaseUrl = argv[++i];
49
+ continue;
50
+ }
51
+
52
+ if (arg === '--log-level' && i + 1 < argv.length) {
53
+ args.logLevel = argv[++i];
54
+ continue;
55
+ }
56
+ }
57
+
58
+ return args;
59
+ }
60
+
61
+ // Show help
62
+ function showHelp() {
63
+ console.log(`
64
+ zai-codex-bridge - Proxy for Codex to use Z.AI GLM models
65
+
66
+ USAGE:
67
+ zai-codex-bridge [options]
68
+
69
+ OPTIONS:
70
+ --port <number> Port to listen on (default: 31415)
71
+ --host <string> Host to bind to (default: 127.0.0.1)
72
+ --zai-base-url <url> Z.AI base URL
73
+ (default: https://api.z.ai/api/coding/paas/v4)
74
+ --log-level <level> Log level: debug|info (default: info)
75
+ -h, --help Show this help
76
+
77
+ ENVIRONMENT VARIABLES:
78
+ PORT Override default port
79
+ HOST Override default host
80
+ ZAI_BASE_URL Override Z.AI base URL
81
+ LOG_LEVEL Override log level
82
+
83
+ EXAMPLES:
84
+ # Start with defaults
85
+ zai-codex-bridge
86
+
87
+ # Start on custom port
88
+ zai-codex-bridge --port 8080
89
+
90
+ # Enable debug logging
91
+ zai-codex-bridge --log-level debug
92
+
93
+ # Point to different Z.AI endpoint
94
+ zai-codex-bridge --zai-base-url https://custom.z.ai/v1
95
+
96
+ HEALTH CHECK:
97
+ curl http://127.0.0.1:31415/health
98
+
99
+ For more information, see: https://github.com/mmmbuto/zai-codex-bridge
100
+ `);
101
+ }
102
+
103
+ // Main
104
+ function main() {
105
+ const args = parseArgs(process.argv);
106
+
107
+ if (args.help) {
108
+ showHelp();
109
+ process.exit(0);
110
+ }
111
+
112
+ // Set environment variables from CLI args
113
+ if (args.port) process.env.PORT = String(args.port);
114
+ if (args.host) process.env.HOST = args.host;
115
+ if (args.zaiBaseUrl) process.env.ZAI_BASE_URL = args.zaiBaseUrl;
116
+ if (args.logLevel) process.env.LOG_LEVEL = args.logLevel;
117
+
118
+ // Start the server
119
+ const serverPath = path.join(__dirname, '..', 'src', 'server.js');
120
+
121
+ if (!fs.existsSync(serverPath)) {
122
+ console.error('Error: server.js not found at', serverPath);
123
+ process.exit(1);
124
+ }
125
+
126
+ require(serverPath);
127
+ }
128
+
129
+ main();
package/docs/guide.md ADDED
@@ -0,0 +1,405 @@
1
+ # Z.AI GLM Proxy - Complete Guide
2
+
3
+ **Purpose**: Practical guide for using GLM-4.7 via Z.AI with Codex CLI through the `zai-codex-bridge` proxy.
4
+
5
+ ---
6
+
7
+ ## Quick Start
8
+
9
+ ### First Time Setup
10
+
11
+ ```bash
12
+ # 1. Install the proxy globally
13
+ cd ~/Dev/zai-codex-bridge
14
+ npm install -g .
15
+
16
+ # 2. Reload .zshrc for the new functions
17
+ source ~/.zshrc
18
+
19
+ # 3. Start Codex with GLM-4.7
20
+ codex-glm-a
21
+ ```
22
+
23
+ ### Daily Usage
24
+
25
+ ```bash
26
+ # Start with account A
27
+ codex-glm-a
28
+
29
+ # Start with account P
30
+ codex-glm-p
31
+
32
+ # Exit Codex
33
+ Ctrl+D
34
+ # Proxy is automatically terminated
35
+ ```
36
+
37
+ ---
38
+
39
+ ## Prerequisites
40
+
41
+ ### 1. Environment Variables
42
+
43
+ Ensure you have API keys in your `.zshrc` or `.zshenv`:
44
+
45
+ ```bash
46
+ # Z.AI API Keys
47
+ export ZAI_API_KEY_A="sk-your-key-account-a"
48
+ export ZAI_API_KEY_P="sk-your-key-account-p"
49
+ ```
50
+
51
+ ### 2. Proxy Installed
52
+
53
+ ```bash
54
+ # Global installation (recommended)
55
+ cd ~/Dev/zai-codex-bridge
56
+ npm install -g .
57
+
58
+ # Verify installation
59
+ which zai-codex-bridge
60
+ ```
61
+
62
+ ### 3. Codex Configuration
63
+
64
+ The file `~/.codex/config.toml` must contain:
65
+
66
+ ```toml
67
+ [model_providers.zai_glm_proxy]
68
+ name = "ZAI GLM via local proxy"
69
+ base_url = "http://127.0.0.1:31415/v1"
70
+ env_key = "OPENAI_API_KEY"
71
+ wire_api = "responses"
72
+ stream_idle_timeout_ms = 3000000
73
+ ```
74
+
75
+ ---
76
+
77
+ ## How It Works
78
+
79
+ ### Architecture
80
+
81
+ ```
82
+ .zshrc → _codex_glm_with_proxy() → Codex → Proxy (port 31415) → Z.AI API
83
+
84
+ Translates:
85
+ Responses → Chat
86
+ Chat → Responses
87
+ ```
88
+
89
+ ### Execution Flow
90
+
91
+ 1. User runs `codex-glm-a`
92
+ 2. Function checks if proxy is active on port 31415
93
+ 3. If NO:
94
+ - Starts proxy in background
95
+ - Saves PID
96
+ - Sets trap for cleanup
97
+ - Waits up to 2 seconds for proxy to be ready
98
+ 4. Codex starts with provider `zai_glm_proxy`
99
+ 5. Request: Codex → Proxy → Translated to Chat → Z.AI
100
+ 6. Response: Z.AI → Chat → Proxy → Translated to Responses → Codex
101
+ 7. User exits Codex (Ctrl+D)
102
+ 8. Trap kills the proxy (only if this function started it)
103
+
104
+ ---
105
+
106
+ ## Usage Examples
107
+
108
+ ### Example 1: Simple Conversation
109
+
110
+ ```bash
111
+ # Start
112
+ codex-glm-a
113
+
114
+ # In Codex prompt:
115
+ > What is 23 * 47?
116
+
117
+ # GLM-4.7 responds:
118
+ > The result of 23 * 47 is 1081.
119
+
120
+ # Exit
121
+ Ctrl+D
122
+ # Proxy automatically terminated
123
+ ```
124
+
125
+ ### Example 2: Code Analysis
126
+
127
+ ```bash
128
+ # Start with additional arguments
129
+ codex-glm-a -s workspace-write
130
+
131
+ # In Codex prompt:
132
+ > Read main.py and tell me what it does
133
+
134
+ # Codex will read the file and GLM-4.7 will analyze the code
135
+ ```
136
+
137
+ ### Example 3: Multi-Session
138
+
139
+ ```bash
140
+ # Terminal 1
141
+ codex-glm-a
142
+ # Proxy starts (PID 12345)
143
+
144
+ # Terminal 2 (while terminal 1 is still open)
145
+ codex-glm-a
146
+ # Proxy ALREADY ACTIVE, gets reused
147
+ # No new process spawned
148
+
149
+ # Terminal 1: Ctrl+D
150
+ # Codex exits but proxy STAYS active (for terminal 2)
151
+
152
+ # Terminal 2: Ctrl+D
153
+ # Codex exits but proxy STAYS active (didn't start it)
154
+ ```
155
+
156
+ ### Example 4: Debug Proxy
157
+
158
+ ```bash
159
+ # Start proxy manually with debug
160
+ zai-codex-bridge --log-level debug --port 31415
161
+
162
+ # In another terminal, test
163
+ curl http://127.0.0.1:31415/health
164
+ # {"ok":true}
165
+
166
+ # Now start codex manually
167
+ OPENAI_API_KEY="$ZAI_API_KEY_A" \
168
+ codex -m "GLM-4.7" -c model_provider="zai_glm_proxy"
169
+ ```
170
+
171
+ ---
172
+
173
+ ## Troubleshooting
174
+
175
+ ### Proxy Won't Start
176
+
177
+ **Symptom**: `codex-glm-a` hangs or errors
178
+
179
+ **Solution**:
180
+ ```bash
181
+ # Verify installation
182
+ which zai-codex-bridge
183
+
184
+ # If not found, reinstall
185
+ cd ~/Dev/zai-codex-bridge
186
+ npm install -g .
187
+ ```
188
+
189
+ ### Port Already in Use
190
+
191
+ **Symptom**: `Error: listen EADDRINUSE: address already in use :::31415`
192
+
193
+ **Solution**:
194
+ ```bash
195
+ # Find process
196
+ lsof -i :31415
197
+ # or
198
+ netstat -tulpn | grep 31415
199
+
200
+ # If it's an old proxy, kill it
201
+ kill -9 <PID>
202
+
203
+ # Or use different port - just use:
204
+ codex-glm-a # will use existing proxy
205
+ ```
206
+
207
+ ### Z.AI Error 1214
208
+
209
+ **Symptom**: `{"error":{"code":"1214","message":"Incorrect role information"}}`
210
+
211
+ **Cause**: Using wrong provider
212
+
213
+ **Solution**:
214
+ ```bash
215
+ # VERIFY in ~/.codex/config.toml
216
+ # You must use: model_provider="zai_glm_proxy"
217
+ # NOT: model_provider="zai"
218
+ ```
219
+
220
+ ### Proxy Won't Stop
221
+
222
+ **Symptom**: Proxy remains active after closing Codex
223
+
224
+ **Solution**:
225
+ ```bash
226
+ # Find and kill manually
227
+ ps aux | grep zai-codex-bridge
228
+ kill <PID>
229
+
230
+ # Or use dedicated command
231
+ pkill -f zai-codex-bridge
232
+ ```
233
+
234
+ ### Debug Logging
235
+
236
+ ```bash
237
+ # Proxy logs are in:
238
+ cat /tmp/zai-codex-bridge.log
239
+
240
+ # For real-time logs:
241
+ tail -f /tmp/zai-codex-bridge.log
242
+
243
+ # Restart with debug:
244
+ codex-glm-a # proxy will write detailed logs
245
+ ```
246
+
247
+ ---
248
+
249
+ ## Health Checks
250
+
251
+ ### Health Check
252
+
253
+ ```bash
254
+ # Verify proxy is active
255
+ curl http://127.0.0.1:31415/health
256
+ # Expected: {"ok":true}
257
+ ```
258
+
259
+ ### Verify Configuration
260
+
261
+ ```bash
262
+ # Check provider in config
263
+ grep -A 5 "zai_glm_proxy" ~/.codex/config.toml
264
+
265
+ # Check functions in .zshrc
266
+ grep -A 30 "_codex_glm_with_proxy" ~/.zshrc
267
+ ```
268
+
269
+ ### Full Test
270
+
271
+ ```bash
272
+ # 1. Test proxy
273
+ zai-codex-bridge --help
274
+
275
+ # 2. Test health
276
+ curl http://127.0.0.1:31415/health
277
+
278
+ # 3. Test Codex
279
+ codex-glm-a --help
280
+
281
+ # 4. Interactive test
282
+ codex-glm-a
283
+ # > Tell me 2+2
284
+ # Ctrl+D
285
+ ```
286
+
287
+ ---
288
+
289
+ ## Function Reference
290
+
291
+ ### `_codex_glm_with_proxy()`
292
+
293
+ **Parameters**:
294
+ - `$1` - API key to use (`$ZAI_API_KEY_A` or `$ZAI_API_KEY_P`)
295
+ - `$@` - Additional arguments passed to Codex
296
+
297
+ **Behavior**:
298
+ 1. Checks health at `http://127.0.0.1:31415/health`
299
+ 2. If fails:
300
+ - Starts `zai-codex-bridge` in background
301
+ - Redirects log to `/tmp/zai-codex-bridge.log`
302
+ - Saves PID in `$PROXY_PID`
303
+ - Sets trap to kill proxy on EXIT
304
+ - Waits up to 2 seconds for proxy to be ready
305
+ 3. Executes `codex` with provider `zai_glm_proxy`
306
+
307
+ ### `codex-glm-a()`
308
+
309
+ **Usage**: Account A
310
+ ```bash
311
+ codex-glm-a [codex options]
312
+ ```
313
+
314
+ **Example**:
315
+ ```bash
316
+ codex-glm-a -s workspace-write
317
+ codex-glm-a --help
318
+ ```
319
+
320
+ ### `codex-glm-p()`
321
+
322
+ **Usage**: Account P
323
+ ```bash
324
+ codex-glm-p [codex options]
325
+ ```
326
+
327
+ ---
328
+
329
+ ## Advanced Configuration
330
+
331
+ ### Change Port
332
+
333
+ Modify the function in `~/.zshrc`:
334
+
335
+ ```bash
336
+ _codex_glm_with_proxy () {
337
+ local KEY="$1"; shift
338
+ local HOST="127.0.0.1"
339
+ local PORT="8080" # ← Change here
340
+ # ...
341
+ }
342
+ ```
343
+
344
+ And update `~/.codex/config.toml`:
345
+
346
+ ```toml
347
+ [model_providers.zai_glm_proxy]
348
+ base_url = "http://127.0.0.1:8080/v1"
349
+ ```
350
+
351
+ ### Verbose Logging
352
+
353
+ Modify the log level in the proxy command:
354
+
355
+ ```bash
356
+ zai-codex-bridge --host "$HOST" --port "$PORT" --log-level debug >"$LOGFILE" 2>&1 &
357
+ ```
358
+
359
+ ---
360
+
361
+ ## Performance
362
+
363
+ ### Startup Times
364
+
365
+ | Operation | Time |
366
+ |-----------|------|
367
+ | Health check (proxy running) | ~50ms |
368
+ | Health check (proxy not running) | ~2s |
369
+ | Proxy startup | ~500ms |
370
+ | Codex startup | ~1s |
371
+ | **Total (first time)** | **~3.5s** |
372
+ | **Total (proxy already up)** | **~1.5s** |
373
+
374
+ ### Memory
375
+
376
+ | Process | RAM |
377
+ |----------|-----|
378
+ | zai-codex-bridge | ~30-50MB |
379
+ | codex CLI | ~100-200MB |
380
+ | **Total** | **~130-250MB** |
381
+
382
+ ---
383
+
384
+ ## Setup Checklist
385
+
386
+ - [ ] Proxy installed: `npm install -g @mmmbuto/zai-codex-bridge`
387
+ - [ ] API keys set: `export ZAI_API_KEY_A=...`
388
+ - [ ] Provider configured: `[model_providers.zai_glm_proxy]` in config.toml
389
+ - [ ] Functions updated: `source ~/.zshrc`
390
+ - [ ] Test health: `curl http://127.0.0.1:31415/health`
391
+ - [ ] Full test: `codex-glm-a`
392
+
393
+ ---
394
+
395
+ ## Related Files
396
+
397
+ - **Proxy Code**: `~/Dev/zai-codex-bridge/`
398
+ - **Codex Config**: `~/.codex/config.toml`
399
+ - **ZSH Functions**: `~/.zshrc`
400
+
401
+ ---
402
+
403
+ ## License
404
+
405
+ MIT
package/package.json ADDED
@@ -0,0 +1,31 @@
1
+ {
2
+ "name": "@mmmbuto/zai-codex-bridge",
3
+ "version": "0.1.0",
4
+ "description": "Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format for Codex",
5
+ "main": "src/server.js",
6
+ "bin": {
7
+ "zai-codex-bridge": "./bin/zai-codex-bridge"
8
+ },
9
+ "scripts": {
10
+ "start": "node src/server.js",
11
+ "test:curl": "node scripts/test-curl.js"
12
+ },
13
+ "keywords": [
14
+ "codex",
15
+ "zai",
16
+ "glm",
17
+ "proxy",
18
+ "openai",
19
+ "responses-api",
20
+ "chat-completions"
21
+ ],
22
+ "author": "Davide A. Guglielmi",
23
+ "license": "MIT",
24
+ "engines": {
25
+ "node": ">=18.0.0"
26
+ },
27
+ "repository": {
28
+ "type": "git",
29
+ "url": "https://github.com/DioNanos/zai-codex-bridge.git"
30
+ }
31
+ }
@@ -0,0 +1,161 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Manual test script for zai-codex-bridge
5
+ *
6
+ * Tests the proxy with a minimal Responses API payload.
7
+ */
8
+
9
+ const http = require('http');
10
+
11
+ const PROXY_HOST = process.env.PROXY_HOST || '127.0.0.1';
12
+ const PROXY_PORT = process.env.PROXY_PORT || '31415';
13
+ const ZAI_API_KEY = process.env.ZAI_API_KEY || process.env.OPENAI_API_KEY || '';
14
+
15
+ async function testHealth() {
16
+ console.log('\n=== Testing Health Endpoint ===\n');
17
+
18
+ return new Promise((resolve, reject) => {
19
+ const options = {
20
+ hostname: PROXY_HOST,
21
+ port: PROXY_PORT,
22
+ path: '/health',
23
+ method: 'GET'
24
+ };
25
+
26
+ const req = http.request(options, (res) => {
27
+ let body = '';
28
+ res.on('data', (chunk) => body += chunk);
29
+ res.on('end', () => {
30
+ console.log('Status:', res.statusCode);
31
+ console.log('Response:', body);
32
+ resolve();
33
+ });
34
+ });
35
+
36
+ req.on('error', reject);
37
+ req.end();
38
+ });
39
+ }
40
+
41
+ async function testResponsesFormat() {
42
+ console.log('\n=== Testing POST /v1/responses (Non-Streaming) ===\n');
43
+
44
+ const payload = {
45
+ model: 'GLM-4.7',
46
+ instructions: 'You are a helpful assistant. Be brief.',
47
+ input: [
48
+ {
49
+ role: 'user',
50
+ content: 'What is 2+2? Answer with just the number.'
51
+ }
52
+ ],
53
+ stream: false
54
+ };
55
+
56
+ return new Promise((resolve, reject) => {
57
+ const options = {
58
+ hostname: PROXY_HOST,
59
+ port: PROXY_PORT,
60
+ path: '/v1/responses',
61
+ method: 'POST',
62
+ headers: {
63
+ 'Content-Type': 'application/json',
64
+ 'Authorization': `Bearer ${ZAI_API_KEY}`
65
+ }
66
+ };
67
+
68
+ const req = http.request(options, (res) => {
69
+ let body = '';
70
+ res.on('data', (chunk) => body += chunk);
71
+ res.on('end', () => {
72
+ console.log('Status:', res.statusCode);
73
+ console.log('Response:', body);
74
+ try {
75
+ const parsed = JSON.parse(body);
76
+ console.log('\nParsed output:', parsed.output?.[0]?.value);
77
+ } catch (e) {
78
+ console.log('\nFailed to parse response');
79
+ }
80
+ resolve();
81
+ });
82
+ });
83
+
84
+ req.on('error', reject);
85
+ req.write(JSON.stringify(payload, null, 2));
86
+ req.end();
87
+ });
88
+ }
89
+
90
+ async function testStreamingFormat() {
91
+ console.log('\n=== Testing POST /v1/responses (Streaming) ===\n');
92
+
93
+ const payload = {
94
+ model: 'GLM-4.7',
95
+ instructions: 'You are a helpful assistant.',
96
+ input: [
97
+ {
98
+ role: 'user',
99
+ content: 'Count from 1 to 3.'
100
+ }
101
+ ],
102
+ stream: true
103
+ };
104
+
105
+ return new Promise((resolve, reject) => {
106
+ const options = {
107
+ hostname: PROXY_HOST,
108
+ port: PROXY_PORT,
109
+ path: '/v1/responses',
110
+ method: 'POST',
111
+ headers: {
112
+ 'Content-Type': 'application/json',
113
+ 'Authorization': `Bearer ${ZAI_API_KEY}`
114
+ }
115
+ };
116
+
117
+ const req = http.request(options, (res) => {
118
+ console.log('Status:', res.statusCode);
119
+ console.log('Headers:', JSON.stringify(res.headers, null, 2));
120
+ console.log('\nStreaming response:');
121
+
122
+ res.on('data', (chunk) => {
123
+ process.stdout.write(chunk);
124
+ });
125
+
126
+ res.on('end', () => {
127
+ console.log('\n\n=== Stream Complete ===');
128
+ resolve();
129
+ });
130
+ });
131
+
132
+ req.on('error', reject);
133
+ req.write(JSON.stringify(payload, null, 2));
134
+ req.end();
135
+ });
136
+ }
137
+
138
+ async function main() {
139
+ console.log('zai-codex-bridge Manual Test');
140
+ console.log('================================');
141
+ console.log('Proxy:', `http://${PROXY_HOST}:${PROXY_PORT}`);
142
+ console.log('API Key:', ZAI_API_KEY ? 'Set' : 'NOT SET - set ZAI_API_KEY or OPENAI_API_KEY');
143
+
144
+ if (!ZAI_API_KEY) {
145
+ console.error('\nError: No API key found. Set ZAI_API_KEY or OPENAI_API_KEY environment variable.');
146
+ process.exit(1);
147
+ }
148
+
149
+ try {
150
+ await testHealth();
151
+ await testResponsesFormat();
152
+ await testStreamingFormat();
153
+
154
+ console.log('\n=== All Tests Complete ===\n');
155
+ } catch (error) {
156
+ console.error('\nError:', error.message);
157
+ process.exit(1);
158
+ }
159
+ }
160
+
161
+ main();
package/src/server.js ADDED
@@ -0,0 +1,371 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * zai-codex-bridge
5
+ *
6
+ * Local proxy that translates OpenAI Responses API format to Z.AI Chat Completions format.
7
+ * Allows Codex to use Z.AI GLM models through the /responses endpoint.
8
+ *
9
+ * Author: Davide A. Guglielmi
10
+ * License: MIT
11
+ */
12
+
13
+ const http = require('http');
14
+
15
+ // Configuration from environment
16
+ const PORT = parseInt(process.env.PORT || '31415', 10);
17
+ const HOST = process.env.HOST || '127.0.0.1';
18
+ const ZAI_BASE_URL = process.env.ZAI_BASE_URL || 'https://api.z.ai/api/coding/paas/v4';
19
+ const LOG_LEVEL = process.env.LOG_LEVEL || 'info';
20
+
21
+ /**
22
+ * Logger
23
+ */
24
+ function log(level, ...args) {
25
+ const levels = { debug: 0, info: 1, warn: 2, error: 3 };
26
+ if (levels[level] >= levels[LOG_LEVEL]) {
27
+ console.error(`[${level.toUpperCase()}]`, new Date().toISOString(), ...args);
28
+ }
29
+ }
30
+
31
+ /**
32
+ * Detect if request body is Responses format or Chat format
33
+ */
34
+ function detectFormat(body) {
35
+ if (body.instructions !== undefined || body.input !== undefined) {
36
+ return 'responses';
37
+ }
38
+ if (body.messages !== undefined) {
39
+ return 'chat';
40
+ }
41
+ return 'unknown';
42
+ }
43
+
44
+ /**
45
+ * Flatten content parts to string
46
+ */
47
+ function flattenContent(content) {
48
+ if (typeof content === 'string') {
49
+ return content;
50
+ }
51
+ if (Array.isArray(content)) {
52
+ const textParts = content
53
+ .filter(part => part.type === 'text' && part.text)
54
+ .map(part => part.text);
55
+ return textParts.length > 0 ? textParts.join('\n') : JSON.stringify(content);
56
+ }
57
+ return String(content);
58
+ }
59
+
60
+ /**
61
+ * Translate Responses format to Chat Completions format
62
+ */
63
+ function translateResponsesToChat(request) {
64
+ const messages = [];
65
+
66
+ // Add system message from instructions
67
+ if (request.instructions) {
68
+ messages.push({
69
+ role: 'system',
70
+ content: request.instructions
71
+ });
72
+ }
73
+
74
+ // Add messages from input array
75
+ if (request.input && Array.isArray(request.input)) {
76
+ for (const item of request.input) {
77
+ const msg = {
78
+ role: item.role,
79
+ content: flattenContent(item.content)
80
+ };
81
+
82
+ // Handle tool calls if present
83
+ if (item.tool_calls && Array.isArray(item.tool_calls)) {
84
+ msg.tool_calls = item.tool_calls;
85
+ }
86
+
87
+ // Handle tool call ID for tool responses
88
+ if (item.tool_call_id) {
89
+ msg.tool_call_id = item.tool_call_id;
90
+ }
91
+
92
+ messages.push(msg);
93
+ }
94
+ }
95
+
96
+ // Build chat request
97
+ const chatRequest = {
98
+ model: request.model,
99
+ messages: messages,
100
+ stream: request.stream !== false // default true
101
+ };
102
+
103
+ // Map optional fields
104
+ if (request.max_output_tokens) {
105
+ chatRequest.max_tokens = request.max_output_tokens;
106
+ } else if (request.max_tokens) {
107
+ chatRequest.max_tokens = request.max_tokens;
108
+ }
109
+
110
+ if (request.temperature !== undefined) {
111
+ chatRequest.temperature = request.temperature;
112
+ }
113
+
114
+ if (request.top_p !== undefined) {
115
+ chatRequest.top_p = request.top_p;
116
+ }
117
+
118
+ if (request.tools && Array.isArray(request.tools)) {
119
+ chatRequest.tools = request.tools;
120
+ }
121
+
122
+ if (request.tool_choice) {
123
+ chatRequest.tool_choice = request.tool_choice;
124
+ }
125
+
126
+ log('debug', 'Translated Responses->Chat:', {
127
+ messagesCount: messages.length,
128
+ model: chatRequest.model,
129
+ stream: chatRequest.stream
130
+ });
131
+
132
+ return chatRequest;
133
+ }
134
+
135
+ /**
136
+ * Translate Chat Completions response to Responses format
137
+ */
138
+ function translateChatToResponses(chatResponse) {
139
+ let text = '';
140
+
141
+ // Extract content from Chat format
142
+ if (chatResponse.choices && chatResponse.choices.length > 0) {
143
+ const choice = chatResponse.choices[0];
144
+ if (choice.message && choice.message.content) {
145
+ text = choice.message.content;
146
+ }
147
+ }
148
+
149
+ // Map usage
150
+ const usage = {};
151
+ if (chatResponse.usage) {
152
+ if (chatResponse.usage.prompt_tokens) {
153
+ usage.input_tokens = chatResponse.usage.prompt_tokens;
154
+ }
155
+ if (chatResponse.usage.completion_tokens) {
156
+ usage.output_tokens = chatResponse.usage.completion_tokens;
157
+ }
158
+ if (chatResponse.usage.total_tokens) {
159
+ usage.total_tokens = chatResponse.usage.total_tokens;
160
+ }
161
+ }
162
+
163
+ const response = {
164
+ output: [{
165
+ value: text,
166
+ content_type: 'text'
167
+ }],
168
+ status: 'completed',
169
+ usage: Object.keys(usage).length > 0 ? usage : undefined
170
+ };
171
+
172
+ log('debug', 'Translated Chat->Responses:', {
173
+ outputLength: text.length,
174
+ status: response.status
175
+ });
176
+
177
+ return response;
178
+ }
179
+
180
+ /**
181
+ * Make upstream request to Z.AI
182
+ */
183
+ async function makeUpstreamRequest(path, body, headers) {
184
+ const url = new URL(path, ZAI_BASE_URL);
185
+
186
+ const upstreamHeaders = {
187
+ 'Content-Type': 'application/json',
188
+ 'Authorization': headers['authorization'] || headers['Authorization'] || ''
189
+ };
190
+
191
+ log('debug', 'Upstream request:', {
192
+ url: url.href,
193
+ hasAuth: !!upstreamHeaders.Authorization
194
+ });
195
+
196
+ const response = await fetch(url, {
197
+ method: 'POST',
198
+ headers: upstreamHeaders,
199
+ body: JSON.stringify(body)
200
+ });
201
+
202
+ return response;
203
+ }
204
+
205
+ /**
206
+ * Handle streaming response from Z.AI
207
+ */
208
+ async function* streamChatToResponses(stream, res) {
209
+ const decoder = new TextDecoder();
210
+ let buffer = '';
211
+
212
+ for await (const chunk of stream) {
213
+ buffer += decoder.decode(chunk, { stream: true });
214
+ const lines = buffer.split('\n');
215
+ buffer = lines.pop() || '';
216
+
217
+ for (const line of lines) {
218
+ if (!line.trim() || !line.startsWith('data: ')) continue;
219
+
220
+ const data = line.slice(6).trim();
221
+
222
+ // Check for stream end
223
+ if (data === '[DONE]') {
224
+ res.write(`event: completed\n`);
225
+ res.write(`data: ${JSON.stringify({ status: 'completed' })}\n\n`);
226
+ return;
227
+ }
228
+
229
+ try {
230
+ const parsed = JSON.parse(data);
231
+ const delta = parsed.choices?.[0]?.delta;
232
+
233
+ if (delta?.content) {
234
+ res.write(`event: output.text.delta\n`);
235
+ res.write(`data: ${JSON.stringify({ value: delta.content })}\n\n`);
236
+ }
237
+ } catch (e) {
238
+ log('warn', 'Failed to parse SSE chunk:', e.message);
239
+ }
240
+ }
241
+ }
242
+ }
243
+
244
+ /**
245
+ * Handle POST requests
246
+ */
247
+ async function handlePostRequest(req, res) {
248
+ const path = req.url;
249
+
250
+ // Only handle /responses and /v1/responses
251
+ if (!path.endsWith('/responses') && !path.endsWith('/v1/responses')) {
252
+ res.writeHead(404, { 'Content-Type': 'application/json' });
253
+ res.end(JSON.stringify({ error: 'Not Found', path }));
254
+ return;
255
+ }
256
+
257
+ let body = '';
258
+ for await (const chunk of req) {
259
+ body += chunk.toString();
260
+ }
261
+
262
+ let request;
263
+ try {
264
+ request = JSON.parse(body);
265
+ } catch (e) {
266
+ res.writeHead(400, { 'Content-Type': 'application/json' });
267
+ res.end(JSON.stringify({ error: 'Invalid JSON' }));
268
+ return;
269
+ }
270
+
271
+ log('info', 'Incoming request:', {
272
+ path,
273
+ format: detectFormat(request),
274
+ model: request.model
275
+ });
276
+
277
+ let upstreamBody;
278
+ const format = detectFormat(request);
279
+
280
+ if (format === 'responses') {
281
+ // Translate Responses to Chat
282
+ upstreamBody = translateResponsesToChat(request);
283
+ } else if (format === 'chat') {
284
+ // Pass through Chat format
285
+ upstreamBody = request;
286
+ } else {
287
+ res.writeHead(400, { 'Content-Type': 'application/json' });
288
+ res.end(JSON.stringify({ error: 'Unknown request format' }));
289
+ return;
290
+ }
291
+
292
+ try {
293
+ const upstreamResponse = await makeUpstreamRequest(
294
+ '/chat/completions',
295
+ upstreamBody,
296
+ req.headers
297
+ );
298
+
299
+ if (!upstreamResponse.ok) {
300
+ const errorBody = await upstreamResponse.text();
301
+ log('error', 'Upstream error:', {
302
+ status: upstreamResponse.status,
303
+ body: errorBody.substring(0, 200)
304
+ });
305
+
306
+ res.writeHead(502, { 'Content-Type': 'application/json' });
307
+ res.end(JSON.stringify({
308
+ error: 'Upstream request failed',
309
+ upstream_status: upstreamResponse.status,
310
+ upstream_body: errorBody
311
+ }));
312
+ return;
313
+ }
314
+
315
+ // Handle streaming response
316
+ if (upstreamBody.stream) {
317
+ res.writeHead(200, {
318
+ 'Content-Type': 'text/event-stream; charset=utf-8',
319
+ 'Cache-Control': 'no-cache',
320
+ 'Connection': 'keep-alive'
321
+ });
322
+
323
+ await streamChatToResponses(upstreamResponse.body, res);
324
+ res.end();
325
+ } else {
326
+ // Non-streaming response
327
+ const chatResponse = await upstreamResponse.json();
328
+ const response = translateChatToResponses(chatResponse);
329
+
330
+ res.writeHead(200, { 'Content-Type': 'application/json' });
331
+ res.end(JSON.stringify(response));
332
+ }
333
+ } catch (error) {
334
+ log('error', 'Request failed:', error);
335
+ res.writeHead(500, { 'Content-Type': 'application/json' });
336
+ res.end(JSON.stringify({ error: error.message }));
337
+ }
338
+ }
339
+
340
+ /**
341
+ * Create HTTP server
342
+ */
343
+ const server = http.createServer(async (req, res) => {
344
+ log('debug', 'Request:', req.method, req.url);
345
+
346
+ // Health check
347
+ if (req.url === '/health' && req.method === 'GET') {
348
+ res.writeHead(200, { 'Content-Type': 'application/json' });
349
+ res.end(JSON.stringify({ ok: true }));
350
+ return;
351
+ }
352
+
353
+ // POST /responses
354
+ if (req.method === 'POST') {
355
+ await handlePostRequest(req, res);
356
+ return;
357
+ }
358
+
359
+ // 404
360
+ res.writeHead(404, { 'Content-Type': 'application/json' });
361
+ res.end(JSON.stringify({ error: 'Not Found' }));
362
+ });
363
+
364
+ /**
365
+ * Start server
366
+ */
367
+ server.listen(PORT, HOST, () => {
368
+ log('info', `zai-codex-bridge listening on http://${HOST}:${PORT}`);
369
+ log('info', `Proxying to Z.AI at: ${ZAI_BASE_URL}`);
370
+ log('info', `Health check: http://${HOST}:${PORT}/health`);
371
+ });