mini-coder 0.0.18 → 0.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -10
- package/dist/mc.js +72 -10
- package/docs/mini-coder.1.md +158 -0
- package/package.json +1 -1
- package/docs/chatgpt-subscription-auth.md +0 -68
package/README.md
CHANGED
|
@@ -6,6 +6,8 @@
|
|
|
6
6
|
|
|
7
7
|
> _Small. Fast. Gets out of your way._
|
|
8
8
|
|
|
9
|
+
[📖 Read the Full Manual](https://sacenox.github.io/mini-coder/)
|
|
10
|
+
|
|
9
11
|
Hey there! I'm **mini-coder** — a CLI coding agent built for developers who want a sharp tool, not a bloated IDE plugin. Think of me as the pocket knife of AI coding assistants: lightweight, reliable, and always ready.
|
|
10
12
|
|
|
11
13
|
---
|
|
@@ -16,16 +18,7 @@ I'm `mc` — your new terminal companion. I live in your shell, speak to large l
|
|
|
16
18
|
|
|
17
19
|
I was built with a simple philosophy: **dev flow first**. No slow startup. No clunky GUI. No vendor lock-in. Just you, your terminal, and an AI that keeps up.
|
|
18
20
|
|
|
19
|
-
|
|
20
|
-
$ mc
|
|
21
|
-
┌─ mini-coder ──────────────────────────────────────────┐
|
|
22
|
-
│ What would you like to work on today? │
|
|
23
|
-
│ │
|
|
24
|
-
│ > _ │
|
|
25
|
-
│ │
|
|
26
|
-
│ [zen/claude-sonnet-4-6] [~/src/my-project] [main] ...│
|
|
27
|
-
└───────────────────────────────────────────────────────┘
|
|
28
|
-
```
|
|
21
|
+

|
|
29
22
|
|
|
30
23
|
---
|
|
31
24
|
|
package/dist/mc.js
CHANGED
|
@@ -88,7 +88,7 @@ var terminal = new TerminalIO;
|
|
|
88
88
|
import * as c from "yoctocolors";
|
|
89
89
|
|
|
90
90
|
// src/cli/error-log.ts
|
|
91
|
-
import { mkdirSync } from "fs";
|
|
91
|
+
import { mkdirSync, writeFileSync } from "fs";
|
|
92
92
|
import { homedir } from "os";
|
|
93
93
|
import { join } from "path";
|
|
94
94
|
var writer = null;
|
|
@@ -98,6 +98,7 @@ function initErrorLog() {
|
|
|
98
98
|
const dirPath = join(homedir(), ".config", "mini-coder");
|
|
99
99
|
const logPath = join(dirPath, "errors.log");
|
|
100
100
|
mkdirSync(dirPath, { recursive: true });
|
|
101
|
+
writeFileSync(logPath, "");
|
|
101
102
|
writer = Bun.file(logPath).writer();
|
|
102
103
|
process.on("uncaughtException", (err) => {
|
|
103
104
|
logError(err, "uncaught");
|
|
@@ -212,6 +213,12 @@ function parseAppError(err) {
|
|
|
212
213
|
hint: "Check network or local server"
|
|
213
214
|
};
|
|
214
215
|
}
|
|
216
|
+
if (code === "ECONNRESET" || message.includes("ECONNRESET") || message.includes("socket connection was closed unexpectedly")) {
|
|
217
|
+
return {
|
|
218
|
+
headline: "Connection lost",
|
|
219
|
+
hint: "The server closed the connection \u2014 retry or switch model with /model"
|
|
220
|
+
};
|
|
221
|
+
}
|
|
215
222
|
const firstLine = message.split(`
|
|
216
223
|
`)[0]?.trim() || "Unknown error";
|
|
217
224
|
return { headline: firstLine };
|
|
@@ -894,7 +901,7 @@ async function renderTurn(events, spinner, opts) {
|
|
|
894
901
|
|
|
895
902
|
// src/cli/output.ts
|
|
896
903
|
var HOME2 = homedir3();
|
|
897
|
-
var PACKAGE_VERSION = "0.0.
|
|
904
|
+
var PACKAGE_VERSION = "0.0.19";
|
|
898
905
|
function tildePath(p) {
|
|
899
906
|
return p.startsWith(HOME2) ? `~${p.slice(HOME2.length)}` : p;
|
|
900
907
|
}
|
|
@@ -1295,6 +1302,21 @@ function getDb() {
|
|
|
1295
1302
|
}
|
|
1296
1303
|
return _db;
|
|
1297
1304
|
}
|
|
1305
|
+
var MAX_SESSIONS = 100;
|
|
1306
|
+
var MAX_PROMPT_HISTORY = 500;
|
|
1307
|
+
function pruneOldData() {
|
|
1308
|
+
const db = getDb();
|
|
1309
|
+
const deletedSessions = db.run(`DELETE FROM sessions WHERE id NOT IN (
|
|
1310
|
+
SELECT id FROM sessions ORDER BY updated_at DESC LIMIT ?
|
|
1311
|
+
)`, [MAX_SESSIONS]).changes;
|
|
1312
|
+
const deletedHistory = db.run(`DELETE FROM prompt_history WHERE id NOT IN (
|
|
1313
|
+
SELECT id FROM prompt_history ORDER BY id DESC LIMIT ?
|
|
1314
|
+
)`, [MAX_PROMPT_HISTORY]).changes;
|
|
1315
|
+
if (deletedSessions > 0 || deletedHistory > 0) {
|
|
1316
|
+
db.exec("VACUUM;");
|
|
1317
|
+
}
|
|
1318
|
+
db.exec("PRAGMA wal_checkpoint(TRUNCATE);");
|
|
1319
|
+
}
|
|
1298
1320
|
// src/session/db/mcp-repo.ts
|
|
1299
1321
|
function listMcpServers() {
|
|
1300
1322
|
return getDb().query("SELECT name, transport, url, command, args, env FROM mcp_servers ORDER BY name").all();
|
|
@@ -2193,18 +2215,47 @@ import { createOpenAI } from "@ai-sdk/openai";
|
|
|
2193
2215
|
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
2194
2216
|
|
|
2195
2217
|
// src/llm-api/api-log.ts
|
|
2196
|
-
import { mkdirSync as mkdirSync3 } from "fs";
|
|
2218
|
+
import { mkdirSync as mkdirSync3, writeFileSync as writeFileSync2 } from "fs";
|
|
2197
2219
|
import { homedir as homedir6 } from "os";
|
|
2198
2220
|
import { join as join5 } from "path";
|
|
2199
2221
|
var writer2 = null;
|
|
2222
|
+
var MAX_ENTRY_BYTES = 8 * 1024;
|
|
2200
2223
|
function initApiLog() {
|
|
2201
2224
|
if (writer2)
|
|
2202
2225
|
return;
|
|
2203
2226
|
const dirPath = join5(homedir6(), ".config", "mini-coder");
|
|
2204
2227
|
const logPath = join5(dirPath, "api.log");
|
|
2205
2228
|
mkdirSync3(dirPath, { recursive: true });
|
|
2229
|
+
writeFileSync2(logPath, "");
|
|
2206
2230
|
writer2 = Bun.file(logPath).writer();
|
|
2207
2231
|
}
|
|
2232
|
+
function isObject2(v) {
|
|
2233
|
+
return typeof v === "object" && v !== null;
|
|
2234
|
+
}
|
|
2235
|
+
var LOG_DROP_KEYS = new Set([
|
|
2236
|
+
"requestBodyValues",
|
|
2237
|
+
"responseBody",
|
|
2238
|
+
"responseHeaders",
|
|
2239
|
+
"stack"
|
|
2240
|
+
]);
|
|
2241
|
+
function sanitizeForLog(data) {
|
|
2242
|
+
if (!isObject2(data))
|
|
2243
|
+
return data;
|
|
2244
|
+
const result = {};
|
|
2245
|
+
for (const key in data) {
|
|
2246
|
+
if (LOG_DROP_KEYS.has(key))
|
|
2247
|
+
continue;
|
|
2248
|
+
const value = data[key];
|
|
2249
|
+
if (key === "errors" && Array.isArray(value)) {
|
|
2250
|
+
result[key] = value.map((e) => sanitizeForLog(e));
|
|
2251
|
+
} else if (key === "lastError" && isObject2(value)) {
|
|
2252
|
+
result[key] = sanitizeForLog(value);
|
|
2253
|
+
} else {
|
|
2254
|
+
result[key] = value;
|
|
2255
|
+
}
|
|
2256
|
+
}
|
|
2257
|
+
return result;
|
|
2258
|
+
}
|
|
2208
2259
|
function logApiEvent(event, data) {
|
|
2209
2260
|
if (!writer2)
|
|
2210
2261
|
return;
|
|
@@ -2213,7 +2264,13 @@ function logApiEvent(event, data) {
|
|
|
2213
2264
|
`;
|
|
2214
2265
|
if (data !== undefined) {
|
|
2215
2266
|
try {
|
|
2216
|
-
|
|
2267
|
+
const safe = sanitizeForLog(data);
|
|
2268
|
+
let serialized = JSON.stringify(safe, null, 2);
|
|
2269
|
+
if (serialized.length > MAX_ENTRY_BYTES) {
|
|
2270
|
+
serialized = `${serialized.slice(0, MAX_ENTRY_BYTES)}
|
|
2271
|
+
\u2026truncated`;
|
|
2272
|
+
}
|
|
2273
|
+
entry += serialized.split(`
|
|
2217
2274
|
`).map((line) => ` ${line}`).join(`
|
|
2218
2275
|
`);
|
|
2219
2276
|
entry += `
|
|
@@ -3162,14 +3219,14 @@ function applyContextPruning(messages, mode) {
|
|
|
3162
3219
|
return pruneMessages({
|
|
3163
3220
|
messages,
|
|
3164
3221
|
reasoning: "before-last-message",
|
|
3165
|
-
toolCalls: "before-last-
|
|
3222
|
+
toolCalls: "before-last-20-messages",
|
|
3166
3223
|
emptyMessages: "remove"
|
|
3167
3224
|
});
|
|
3168
3225
|
}
|
|
3169
3226
|
return pruneMessages({
|
|
3170
3227
|
messages,
|
|
3171
3228
|
reasoning: "before-last-message",
|
|
3172
|
-
toolCalls: "before-last-
|
|
3229
|
+
toolCalls: "before-last-40-messages",
|
|
3173
3230
|
emptyMessages: "remove"
|
|
3174
3231
|
});
|
|
3175
3232
|
}
|
|
@@ -5513,7 +5570,7 @@ ${c13.bold("Examples:")}`);
|
|
|
5513
5570
|
}
|
|
5514
5571
|
|
|
5515
5572
|
// src/cli/bootstrap.ts
|
|
5516
|
-
import { existsSync as existsSync5, mkdirSync as mkdirSync5, writeFileSync } from "fs";
|
|
5573
|
+
import { existsSync as existsSync5, mkdirSync as mkdirSync5, writeFileSync as writeFileSync3 } from "fs";
|
|
5517
5574
|
import { homedir as homedir9 } from "os";
|
|
5518
5575
|
import { join as join11 } from "path";
|
|
5519
5576
|
import * as c14 from "yoctocolors";
|
|
@@ -5539,7 +5596,7 @@ function bootstrapGlobalDefaults() {
|
|
|
5539
5596
|
const reviewPath = join11(commandsDir, "review.md");
|
|
5540
5597
|
if (!existsSync5(reviewPath)) {
|
|
5541
5598
|
mkdirSync5(commandsDir, { recursive: true });
|
|
5542
|
-
|
|
5599
|
+
writeFileSync3(reviewPath, REVIEW_COMMAND_CONTENT, "utf-8");
|
|
5543
5600
|
writeln(`${c14.green("\u2713")} created ${c14.dim("~/.agents/commands/review.md")} ${c14.dim("(edit it to customise your reviews)")}`);
|
|
5544
5601
|
}
|
|
5545
5602
|
}
|
|
@@ -6296,7 +6353,9 @@ async function runInputLoop(opts) {
|
|
|
6296
6353
|
}
|
|
6297
6354
|
if (result.type === "inject-user-message") {
|
|
6298
6355
|
const { text: resolvedText, images: refImages } = await resolveFileRefs(result.text, cwd);
|
|
6299
|
-
|
|
6356
|
+
try {
|
|
6357
|
+
await runner.processUserInput(resolvedText, refImages);
|
|
6358
|
+
} catch {}
|
|
6300
6359
|
}
|
|
6301
6360
|
continue;
|
|
6302
6361
|
}
|
|
@@ -6322,7 +6381,9 @@ ${out}
|
|
|
6322
6381
|
const { text: resolvedText, images: refImages } = await resolveFileRefs(input.text, cwd);
|
|
6323
6382
|
const allImages = [...input.images || [], ...refImages];
|
|
6324
6383
|
if (!runner.ralphMode) {
|
|
6325
|
-
|
|
6384
|
+
try {
|
|
6385
|
+
await runner.processUserInput(resolvedText, allImages);
|
|
6386
|
+
} catch {}
|
|
6326
6387
|
continue;
|
|
6327
6388
|
}
|
|
6328
6389
|
if (allImages.length > 0) {
|
|
@@ -6404,6 +6465,7 @@ registerTerminalCleanup();
|
|
|
6404
6465
|
initErrorLog();
|
|
6405
6466
|
initApiLog();
|
|
6406
6467
|
initModelInfoCache();
|
|
6468
|
+
pruneOldData();
|
|
6407
6469
|
refreshModelInfoInBackground().catch(() => {});
|
|
6408
6470
|
async function main() {
|
|
6409
6471
|
const argv = process.argv.slice(2);
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
# MINI-CODER(1)
|
|
2
|
+
|
|
3
|
+
## NAME
|
|
4
|
+
**mini-coder** (executable: `mc`) - A small, fast CLI coding agent built for developers.
|
|
5
|
+
|
|
6
|
+
## SYNOPSIS
|
|
7
|
+
`mc [options] [prompt]`
|
|
8
|
+
|
|
9
|
+
## DESCRIPTION
|
|
10
|
+
**mini-coder** is a developer-focused CLI coding agent. It prioritizes developer flow with no slow startup, no clunky GUI, and no vendor lock-in. It uses a minimalist terminal UI restricted to 16 ANSI colors to inherit the user's terminal theme, and is built entirely on Bun.js for maximum performance.
|
|
11
|
+
|
|
12
|
+
## OPTIONS
|
|
13
|
+
**-m, --model <id>**
|
|
14
|
+
: Specify the model to use (e.g., `zen/claude-sonnet-4-6`).
|
|
15
|
+
|
|
16
|
+
**-c, --continue**
|
|
17
|
+
: Continue the most recent session.
|
|
18
|
+
|
|
19
|
+
**-r, --resume <id>**
|
|
20
|
+
: Resume a specific session by its ID.
|
|
21
|
+
|
|
22
|
+
**-l, --list**
|
|
23
|
+
: List recent sessions.
|
|
24
|
+
|
|
25
|
+
**--cwd <path>**
|
|
26
|
+
: Set the working directory (defaults to current directory).
|
|
27
|
+
|
|
28
|
+
**-h, --help**
|
|
29
|
+
: Display help information.
|
|
30
|
+
|
|
31
|
+
**[prompt]**
|
|
32
|
+
: Optional one-shot prompt text before entering interactive mode.
|
|
33
|
+
|
|
34
|
+
## INTERACTIVE COMMANDS
|
|
35
|
+
Inside the interactive session, the following slash commands are available:
|
|
36
|
+
|
|
37
|
+
**/model**
|
|
38
|
+
: List all available models, indicating free models and context sizes.
|
|
39
|
+
|
|
40
|
+
**/model <id>**
|
|
41
|
+
: Switch to a specific model.
|
|
42
|
+
|
|
43
|
+
**/model effort <low|medium|high|xhigh|off>**
|
|
44
|
+
: Configure reasoning effort levels for models that support it.
|
|
45
|
+
|
|
46
|
+
**/reasoning [on|off]**
|
|
47
|
+
: Toggle the display of the model's reasoning/thought process.
|
|
48
|
+
|
|
49
|
+
**/context prune <off|balanced|aggressive>**
|
|
50
|
+
: Configure context window pruning strategies.
|
|
51
|
+
|
|
52
|
+
**/context cap <off|bytes|kb>**
|
|
53
|
+
: Set a hard payload cap size for tool results to avoid blowing out context.
|
|
54
|
+
|
|
55
|
+
**/cache <on|off>**
|
|
56
|
+
: Toggle prompt caching globally.
|
|
57
|
+
|
|
58
|
+
**/cache openai <in_memory|24h>**
|
|
59
|
+
: Set OpenAI prompt cache retention policies.
|
|
60
|
+
|
|
61
|
+
**/cache gemini <off|cachedContents/...>**
|
|
62
|
+
: Attach Google Gemini cached content.
|
|
63
|
+
|
|
64
|
+
**/plan**
|
|
65
|
+
: Toggle read-only planning mode.
|
|
66
|
+
|
|
67
|
+
**/ralph**
|
|
68
|
+
: Toggle autonomous execution looping.
|
|
69
|
+
|
|
70
|
+
**/undo**
|
|
71
|
+
: Revert the last turn and restore files.
|
|
72
|
+
|
|
73
|
+
**/new**
|
|
74
|
+
: Clear context and start a fresh session.
|
|
75
|
+
|
|
76
|
+
**/mcp list**
|
|
77
|
+
: List configured MCP servers.
|
|
78
|
+
|
|
79
|
+
**/mcp add <name> http <url>**
|
|
80
|
+
: Add an MCP server over HTTP.
|
|
81
|
+
|
|
82
|
+
**/mcp add <name> stdio <cmd> [args...]**
|
|
83
|
+
: Add an MCP server over stdio.
|
|
84
|
+
|
|
85
|
+
**/mcp remove <name>** (or **rm**)
|
|
86
|
+
: Remove an MCP server.
|
|
87
|
+
|
|
88
|
+
**/agent [name]**
|
|
89
|
+
: Set or clear an active primary custom agent.
|
|
90
|
+
|
|
91
|
+
**/help**
|
|
92
|
+
: Display command help.
|
|
93
|
+
|
|
94
|
+
**/exit, /quit, /q**
|
|
95
|
+
: Leave the session.
|
|
96
|
+
|
|
97
|
+
## INLINE FEATURES
|
|
98
|
+
**Shell Integration**
|
|
99
|
+
: Prefix user prompts with `!` to run shell commands inline directly into the context.
|
|
100
|
+
|
|
101
|
+
**File & Agent Referencing**
|
|
102
|
+
: Prefix words with `@` to reference files, custom agents, or skills within prompts (supports tab completion).
|
|
103
|
+
|
|
104
|
+
## BUILT-IN TOOLS
|
|
105
|
+
The agent has access to the following tools:
|
|
106
|
+
* **glob**: Discover files by glob pattern across the project.
|
|
107
|
+
* **grep**: Search file contents using regular expressions.
|
|
108
|
+
* **read**: Read file contents with line-range pagination support.
|
|
109
|
+
* **create**: Write a new file or completely overwrite an existing one.
|
|
110
|
+
* **replace**: Replace or delete targeted lines using hashline anchors.
|
|
111
|
+
* **insert**: Insert new lines before/after an anchor without replacing existing content.
|
|
112
|
+
* **shell**: Execute bash commands and capture output.
|
|
113
|
+
* **subagent**: Spawn a focused mini-agent with a prompt.
|
|
114
|
+
* **webSearch**: Search the internet (requires EXA key).
|
|
115
|
+
* **webContent**: Fetch full page content from a URL (requires EXA key).
|
|
116
|
+
|
|
117
|
+
## ENVIRONMENT
|
|
118
|
+
**OPENCODE_API_KEY**
|
|
119
|
+
: OpenCode Zen API key (Recommended provider).
|
|
120
|
+
|
|
121
|
+
**ANTHROPIC_API_KEY**
|
|
122
|
+
: Direct Anthropic API key.
|
|
123
|
+
|
|
124
|
+
**OPENAI_API_KEY**
|
|
125
|
+
: Direct OpenAI API key.
|
|
126
|
+
|
|
127
|
+
**GOOGLE_API_KEY** (or **GEMINI_API_KEY**)
|
|
128
|
+
: Direct Google Gemini API key.
|
|
129
|
+
|
|
130
|
+
**OLLAMA_BASE_URL**
|
|
131
|
+
: Ollama local base URL (Defaults to `http://localhost:11434`).
|
|
132
|
+
|
|
133
|
+
**EXA_API_KEY**
|
|
134
|
+
: Enables built-in `webSearch` and `webContent` tools.
|
|
135
|
+
|
|
136
|
+
## FILES & DIRECTORIES
|
|
137
|
+
**~/.config/mini-coder/**
|
|
138
|
+
: Application data directory. Contains `sessions.db` (SQLite database for session history, tool snapshots, MCP server configs, and model metadata), `api.log`, and `errors.log`.
|
|
139
|
+
|
|
140
|
+
**.agents/ or .claude/ (Local or Global in ~/)**
|
|
141
|
+
: Configuration directories for advanced features:
|
|
142
|
+
* **commands/*.md**: Custom slash commands.
|
|
143
|
+
* **agents/*.md**: Custom behavioral wrappers or subagents.
|
|
144
|
+
* **skills/<name>/SKILL.md**: Isolated context/instruction snippets.
|
|
145
|
+
* **hooks/post-<tool>**: Executable scripts triggered upon tool execution.
|
|
146
|
+
|
|
147
|
+
**AGENTS.md / CLAUDE.md**
|
|
148
|
+
: Auto-loaded system context files for project-specific instructions.
|
|
149
|
+
|
|
150
|
+
## CORE FEATURES & ARCHITECTURE
|
|
151
|
+
* **Multi-Provider LLM Routing**: Automatically discovers API keys to route to OpenCode (Zen), Anthropic, OpenAI, Google/Gemini, or local Ollama instances.
|
|
152
|
+
* **Session Memory**: Persists conversation history in a local SQLite database, allowing users to resume past sessions effortlessly.
|
|
153
|
+
* **Subagent Delegation**: Includes a tool to spawn parallel instances of itself to tackle independent subtasks simultaneously (up to 10 levels deep).
|
|
154
|
+
* **Autonomous Mode (Ralph)**: An autonomous looping mode that runs tasks in an isolated context loop (up to 20 iterations) until completion.
|
|
155
|
+
* **Plan Mode**: A read-only thinking mode utilizing read tools + MCP, safely analyzing code without making mutations or executing shell commands.
|
|
156
|
+
* **Model Context Protocol (MCP)**: Native support for connecting external tools via MCP servers over HTTP or stdio.
|
|
157
|
+
* **Prompt Caching**: Configurable caching behaviors for supported providers (OpenAI, Gemini).
|
|
158
|
+
* **Undo Functionality**: Roll back the last conversation turn, cleanly restoring previous file states and git history via snapshots.
|
package/package.json
CHANGED
|
@@ -1,68 +0,0 @@
|
|
|
1
|
-
# ChatGPT/Codex subscription auth notes
|
|
2
|
-
|
|
3
|
-
mini-coder does **not** currently support logging in with a ChatGPT Plus/Pro/Codex subscription.
|
|
4
|
-
|
|
5
|
-
## Why
|
|
6
|
-
|
|
7
|
-
We looked at two implementations:
|
|
8
|
-
|
|
9
|
-
- OpenCode in `/tmp/opencode-src`
|
|
10
|
-
- official Codex in `/tmp/openai-codex/codex-rs`
|
|
11
|
-
|
|
12
|
-
Both rely on OpenAI **first-party/private** auth and backend APIs rather than a documented public developer API.
|
|
13
|
-
|
|
14
|
-
## What those implementations do
|
|
15
|
-
|
|
16
|
-
### Auth
|
|
17
|
-
|
|
18
|
-
They use OAuth-like flows against `https://auth.openai.com`, including:
|
|
19
|
-
|
|
20
|
-
- browser login with PKCE and a localhost callback server
|
|
21
|
-
- device-code / headless login
|
|
22
|
-
- refresh tokens via `POST /oauth/token`
|
|
23
|
-
|
|
24
|
-
Both also rely on a hardcoded first-party client id embedded in their source trees.
|
|
25
|
-
|
|
26
|
-
Examples:
|
|
27
|
-
|
|
28
|
-
- official Codex: `/tmp/openai-codex/codex-rs/core/src/auth.rs`
|
|
29
|
-
- OpenCode: `/tmp/opencode-src/packages/opencode/src/plugin/codex.ts`
|
|
30
|
-
|
|
31
|
-
### Runtime API
|
|
32
|
-
|
|
33
|
-
After login, requests are sent to ChatGPT backend endpoints such as:
|
|
34
|
-
|
|
35
|
-
- `https://chatgpt.com/backend-api/codex`
|
|
36
|
-
- `https://chatgpt.com/backend-api/codex/responses`
|
|
37
|
-
|
|
38
|
-
with headers like:
|
|
39
|
-
|
|
40
|
-
- `Authorization: Bearer <oauth access token>`
|
|
41
|
-
- `ChatGPT-Account-Id: <account id>`
|
|
42
|
-
|
|
43
|
-
Examples:
|
|
44
|
-
|
|
45
|
-
- official Codex: `/tmp/openai-codex/codex-rs/core/src/model_provider_info.rs`
|
|
46
|
-
- official Codex headers: `/tmp/openai-codex/codex-rs/backend-client/src/client.rs`
|
|
47
|
-
- OpenCode rewrite layer: `/tmp/opencode-src/packages/opencode/src/plugin/codex.ts`
|
|
48
|
-
|
|
49
|
-
## Why mini-coder is not adopting this
|
|
50
|
-
|
|
51
|
-
- It depends on undocumented/private auth endpoints.
|
|
52
|
-
- It depends on a hardcoded first-party client id.
|
|
53
|
-
- It depends on private ChatGPT backend routes.
|
|
54
|
-
- Browser login would require running a local callback server.
|
|
55
|
-
- Even the official Codex source does not expose a clean public API-based alternative here.
|
|
56
|
-
|
|
57
|
-
## Future stance
|
|
58
|
-
|
|
59
|
-
We may revisit support if OpenAI exposes a stable, documented path for:
|
|
60
|
-
|
|
61
|
-
- ChatGPT subscription login for third-party tools, or
|
|
62
|
-
- a public Codex/ChatGPT backend API intended for external clients
|
|
63
|
-
|
|
64
|
-
Until then, mini-coder only supports providers with clearer public integration paths.
|
|
65
|
-
|
|
66
|
-
## Note
|
|
67
|
-
|
|
68
|
-
If you want a supported hosted integration instead of ChatGPT subscription auth, mini-coder already supports OpenCode Zen via `OPENCODE_API_KEY`. See the existing `zen/<model>` provider path.
|