@elvatis_com/openclaw-cli-bridge-elvatis 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.ai/handoff/CONVENTIONS.md +31 -0
- package/.ai/handoff/DASHBOARD.md +74 -0
- package/.ai/handoff/LOG-ARCHIVE.md +10 -0
- package/.ai/handoff/LOG.md +28 -0
- package/.ai/handoff/MANIFEST.json +35 -0
- package/.ai/handoff/NEXT_ACTIONS.md +37 -0
- package/.ai/handoff/STATUS.md +32 -0
- package/.ai/handoff/TRUST.md +69 -0
- package/.ai/handoff/WORKFLOW.md +152 -0
- package/README.md +84 -0
- package/index.ts +160 -0
- package/openclaw.plugin.json +37 -0
- package/package.json +17 -0
- package/src/cli-runner.ts +193 -0
- package/src/codex-auth.ts +90 -0
- package/src/config-patcher.ts +137 -0
- package/src/proxy-server.ts +280 -0
- package/tsconfig.json +20 -0
package/index.ts
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* openclaw-cli-bridge-elvatis — index.ts
|
|
3
|
+
*
|
|
4
|
+
* Phase 1 (auth bridge): registers openai-codex provider using tokens from
|
|
5
|
+
* ~/.codex/auth.json (Codex CLI is already logged in — no re-login needed).
|
|
6
|
+
*
|
|
7
|
+
* Phase 2 (request bridge): starts a local OpenAI-compatible HTTP proxy server
|
|
8
|
+
* and configures OpenClaw's vllm provider to route through it. Model calls
|
|
9
|
+
* are handled by the Gemini CLI and Claude Code CLI subprocesses.
|
|
10
|
+
*
|
|
11
|
+
* Provider / model naming:
|
|
12
|
+
* vllm/cli-gemini/gemini-2.5-pro → `gemini -m gemini-2.5-pro -p "<prompt>"`
|
|
13
|
+
* vllm/cli-claude/claude-opus-4-6 → `claude -p -m claude-opus-4-6 --output-format text "<prompt>"`
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import type {
|
|
17
|
+
OpenClawPluginApi,
|
|
18
|
+
ProviderAuthContext,
|
|
19
|
+
ProviderAuthResult,
|
|
20
|
+
} from "openclaw/plugin-sdk";
|
|
21
|
+
import { buildOauthProviderAuthResult } from "openclaw/plugin-sdk";
|
|
22
|
+
import {
|
|
23
|
+
DEFAULT_CODEX_AUTH_PATH,
|
|
24
|
+
DEFAULT_MODEL as CODEX_DEFAULT_MODEL,
|
|
25
|
+
readCodexCredentials,
|
|
26
|
+
} from "./src/codex-auth.js";
|
|
27
|
+
import { startProxyServer } from "./src/proxy-server.js";
|
|
28
|
+
import { patchOpencllawConfig } from "./src/config-patcher.js";
|
|
29
|
+
|
|
30
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
31
|
+
// Plugin config type
|
|
32
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
33
|
+
interface CliPluginConfig {
|
|
34
|
+
// Phase 1: auth bridge
|
|
35
|
+
codexAuthPath?: string;
|
|
36
|
+
enableCodex?: boolean;
|
|
37
|
+
// Phase 2: request proxy
|
|
38
|
+
enableProxy?: boolean;
|
|
39
|
+
proxyPort?: number;
|
|
40
|
+
proxyApiKey?: string;
|
|
41
|
+
proxyTimeoutMs?: number;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const DEFAULT_PROXY_PORT = 31337;
|
|
45
|
+
const DEFAULT_PROXY_API_KEY = "cli-bridge";
|
|
46
|
+
|
|
47
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
48
|
+
// Plugin definition
|
|
49
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
50
|
+
const plugin = {
|
|
51
|
+
id: "openclaw-cli-bridge-elvatis",
|
|
52
|
+
name: "OpenClaw CLI Bridge",
|
|
53
|
+
version: "0.2.0",
|
|
54
|
+
description:
|
|
55
|
+
"Phase 1: openai-codex auth bridge (reads ~/.codex/auth.json). " +
|
|
56
|
+
"Phase 2: HTTP proxy server routing model calls through gemini/claude CLIs.",
|
|
57
|
+
|
|
58
|
+
register(api: OpenClawPluginApi) {
|
|
59
|
+
const cfg = (api.pluginConfig ?? {}) as CliPluginConfig;
|
|
60
|
+
const enableCodex = cfg.enableCodex ?? true;
|
|
61
|
+
const enableProxy = cfg.enableProxy ?? true;
|
|
62
|
+
const port = cfg.proxyPort ?? DEFAULT_PROXY_PORT;
|
|
63
|
+
const apiKey = cfg.proxyApiKey ?? DEFAULT_PROXY_API_KEY;
|
|
64
|
+
const timeoutMs = cfg.proxyTimeoutMs ?? 120_000;
|
|
65
|
+
const codexAuthPath = cfg.codexAuthPath ?? DEFAULT_CODEX_AUTH_PATH;
|
|
66
|
+
|
|
67
|
+
// ── Phase 1: openai-codex auth bridge ─────────────────────────────────────
|
|
68
|
+
if (enableCodex) {
|
|
69
|
+
api.registerProvider({
|
|
70
|
+
id: "openai-codex",
|
|
71
|
+
label: "OpenAI Codex (CLI bridge)",
|
|
72
|
+
docsPath: "/providers/openai",
|
|
73
|
+
aliases: ["codex-cli"],
|
|
74
|
+
|
|
75
|
+
auth: [
|
|
76
|
+
{
|
|
77
|
+
id: "codex-cli-oauth",
|
|
78
|
+
label: "Codex CLI (existing login)",
|
|
79
|
+
hint: "Reads OAuth tokens from ~/.codex/auth.json — no re-login needed",
|
|
80
|
+
kind: "oauth",
|
|
81
|
+
|
|
82
|
+
run: async (ctx: ProviderAuthContext): Promise<ProviderAuthResult> => {
|
|
83
|
+
const spin = ctx.prompter.progress("Reading Codex CLI credentials…");
|
|
84
|
+
try {
|
|
85
|
+
const creds = await readCodexCredentials(codexAuthPath);
|
|
86
|
+
spin.stop("Codex CLI credentials loaded");
|
|
87
|
+
|
|
88
|
+
return buildOauthProviderAuthResult({
|
|
89
|
+
providerId: "openai-codex",
|
|
90
|
+
defaultModel: CODEX_DEFAULT_MODEL,
|
|
91
|
+
access: creds.accessToken,
|
|
92
|
+
refresh: creds.refreshToken,
|
|
93
|
+
expires: creds.expiresAt,
|
|
94
|
+
email: creds.email,
|
|
95
|
+
notes: [
|
|
96
|
+
`Auth read from: ${codexAuthPath}`,
|
|
97
|
+
"If calls fail, run 'codex login' to refresh, then re-run auth.",
|
|
98
|
+
],
|
|
99
|
+
});
|
|
100
|
+
} catch (err) {
|
|
101
|
+
spin.stop("Failed to read Codex credentials");
|
|
102
|
+
throw err;
|
|
103
|
+
}
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
],
|
|
107
|
+
|
|
108
|
+
refreshOAuth: async (cred) => {
|
|
109
|
+
try {
|
|
110
|
+
const fresh = await readCodexCredentials(codexAuthPath);
|
|
111
|
+
return {
|
|
112
|
+
...cred,
|
|
113
|
+
access: fresh.accessToken,
|
|
114
|
+
refresh: fresh.refreshToken ?? cred.refresh,
|
|
115
|
+
expires: fresh.expiresAt ?? cred.expires,
|
|
116
|
+
};
|
|
117
|
+
} catch {
|
|
118
|
+
return cred;
|
|
119
|
+
}
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
api.logger.info("[cli-bridge] openai-codex provider registered (Codex CLI auth bridge)");
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// ── Phase 2: CLI request proxy ─────────────────────────────────────────────
|
|
127
|
+
if (enableProxy) {
|
|
128
|
+
startProxyServer({
|
|
129
|
+
port,
|
|
130
|
+
apiKey,
|
|
131
|
+
timeoutMs,
|
|
132
|
+
log: (msg) => api.logger.info(msg),
|
|
133
|
+
warn: (msg) => api.logger.warn(msg),
|
|
134
|
+
})
|
|
135
|
+
.then(() => {
|
|
136
|
+
api.logger.info(
|
|
137
|
+
`[cli-bridge] proxy ready — vllm/cli-gemini/* and vllm/cli-claude/* available`
|
|
138
|
+
);
|
|
139
|
+
|
|
140
|
+
// Auto-patch openclaw.json with vllm provider config (once)
|
|
141
|
+
const result = patchOpencllawConfig(port);
|
|
142
|
+
if (result.patched) {
|
|
143
|
+
api.logger.info(
|
|
144
|
+
`[cli-bridge] openclaw.json patched with vllm provider. ` +
|
|
145
|
+
`Restart gateway to activate cli-gemini/* and cli-claude/* models.`
|
|
146
|
+
);
|
|
147
|
+
} else {
|
|
148
|
+
api.logger.info(`[cli-bridge] config check: ${result.reason}`);
|
|
149
|
+
}
|
|
150
|
+
})
|
|
151
|
+
.catch((err: Error) => {
|
|
152
|
+
api.logger.warn(
|
|
153
|
+
`[cli-bridge] proxy server failed to start on port ${port}: ${err.message}`
|
|
154
|
+
);
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
},
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
export default plugin;
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "openclaw-cli-bridge-elvatis",
|
|
3
|
+
"name": "OpenClaw CLI Bridge",
|
|
4
|
+
"version": "0.2.0",
|
|
5
|
+
"description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
|
|
6
|
+
"providers": ["openai-codex"],
|
|
7
|
+
"configSchema": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"additionalProperties": false,
|
|
10
|
+
"properties": {
|
|
11
|
+
"codexAuthPath": {
|
|
12
|
+
"type": "string",
|
|
13
|
+
"description": "Path to ~/.codex/auth.json (default: auto)"
|
|
14
|
+
},
|
|
15
|
+
"enableCodex": {
|
|
16
|
+
"type": "boolean",
|
|
17
|
+
"description": "Register openai-codex provider from Codex CLI auth (default: true)"
|
|
18
|
+
},
|
|
19
|
+
"enableProxy": {
|
|
20
|
+
"type": "boolean",
|
|
21
|
+
"description": "Start the local CLI proxy server for gemini/claude (default: true)"
|
|
22
|
+
},
|
|
23
|
+
"proxyPort": {
|
|
24
|
+
"type": "number",
|
|
25
|
+
"description": "Port for the local CLI proxy server (default: 31337)"
|
|
26
|
+
},
|
|
27
|
+
"proxyApiKey": {
|
|
28
|
+
"type": "string",
|
|
29
|
+
"description": "API key the vllm provider uses to auth with the proxy (default: cli-bridge)"
|
|
30
|
+
},
|
|
31
|
+
"proxyTimeoutMs": {
|
|
32
|
+
"type": "number",
|
|
33
|
+
"description": "Max time to wait for a CLI response in ms (default: 120000)"
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@elvatis_com/openclaw-cli-bridge-elvatis",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"scripts": {
|
|
7
|
+
"build": "tsc",
|
|
8
|
+
"typecheck": "tsc -p tsconfig.check.json",
|
|
9
|
+
"test": "vitest run",
|
|
10
|
+
"ci": "npm run typecheck && npm run test"
|
|
11
|
+
},
|
|
12
|
+
"devDependencies": {
|
|
13
|
+
"@types/node": "^25.3.2",
|
|
14
|
+
"typescript": "^5.9.3",
|
|
15
|
+
"vitest": "^4.0.18"
|
|
16
|
+
}
|
|
17
|
+
}
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* cli-runner.ts
|
|
3
|
+
*
|
|
4
|
+
* Spawns CLI subprocesses (gemini, claude) and captures their output.
|
|
5
|
+
* Input: OpenAI-format messages → formatted prompt string → CLI stdout.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { spawn } from "node:child_process";
|
|
9
|
+
|
|
10
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
11
|
+
// Message formatting
|
|
12
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
13
|
+
|
|
14
|
+
export interface ChatMessage {
|
|
15
|
+
role: "system" | "user" | "assistant";
|
|
16
|
+
content: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Convert OpenAI messages to a single flat prompt string.
|
|
21
|
+
* Both Gemini and Claude CLIs accept a plain text prompt.
|
|
22
|
+
*/
|
|
23
|
+
export function formatPrompt(messages: ChatMessage[]): string {
|
|
24
|
+
if (messages.length === 0) return "";
|
|
25
|
+
|
|
26
|
+
// If it's just a single user message, send it directly — no wrapping.
|
|
27
|
+
if (messages.length === 1 && messages[0].role === "user") {
|
|
28
|
+
return messages[0].content;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
return messages
|
|
32
|
+
.map((m) => {
|
|
33
|
+
switch (m.role) {
|
|
34
|
+
case "system":
|
|
35
|
+
return `[System]\n${m.content}`;
|
|
36
|
+
case "assistant":
|
|
37
|
+
return `[Assistant]\n${m.content}`;
|
|
38
|
+
case "user":
|
|
39
|
+
default:
|
|
40
|
+
return `[User]\n${m.content}`;
|
|
41
|
+
}
|
|
42
|
+
})
|
|
43
|
+
.join("\n\n");
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
47
|
+
// Core subprocess runner
|
|
48
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
49
|
+
|
|
50
|
+
export interface CliRunResult {
|
|
51
|
+
stdout: string;
|
|
52
|
+
stderr: string;
|
|
53
|
+
exitCode: number;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export function runCli(
|
|
57
|
+
cmd: string,
|
|
58
|
+
args: string[],
|
|
59
|
+
timeoutMs = 120_000
|
|
60
|
+
): Promise<CliRunResult> {
|
|
61
|
+
return new Promise((resolve, reject) => {
|
|
62
|
+
const proc = spawn(cmd, args, {
|
|
63
|
+
timeout: timeoutMs,
|
|
64
|
+
env: { ...process.env, NO_COLOR: "1" }, // strip ANSI codes from output
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
let stdout = "";
|
|
68
|
+
let stderr = "";
|
|
69
|
+
|
|
70
|
+
// Important: some CLIs (notably Claude Code) keep waiting for stdin EOF
|
|
71
|
+
// even when prompt is provided as an argument. Close stdin immediately.
|
|
72
|
+
proc.stdin.end();
|
|
73
|
+
|
|
74
|
+
proc.stdout.on("data", (d: Buffer) => {
|
|
75
|
+
stdout += d.toString();
|
|
76
|
+
});
|
|
77
|
+
proc.stderr.on("data", (d: Buffer) => {
|
|
78
|
+
stderr += d.toString();
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
proc.on("close", (code) => {
|
|
82
|
+
resolve({ stdout: stdout.trim(), stderr: stderr.trim(), exitCode: code ?? 0 });
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
proc.on("error", (err) => {
|
|
86
|
+
reject(new Error(`Failed to spawn '${cmd}': ${err.message}`));
|
|
87
|
+
});
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
92
|
+
// Gemini CLI
|
|
93
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Run: gemini -m <modelId> -p "<prompt>"
|
|
97
|
+
* Strips the model prefix ("cli-gemini/gemini-2.5-pro" → "gemini-2.5-pro").
|
|
98
|
+
*/
|
|
99
|
+
export async function runGemini(
|
|
100
|
+
prompt: string,
|
|
101
|
+
modelId: string,
|
|
102
|
+
timeoutMs: number
|
|
103
|
+
): Promise<string> {
|
|
104
|
+
const model = stripPrefix(modelId);
|
|
105
|
+
const args = ["-m", model, "-p", prompt];
|
|
106
|
+
const result = await runCli("gemini", args, timeoutMs);
|
|
107
|
+
|
|
108
|
+
if (result.exitCode !== 0 && result.stdout.length === 0) {
|
|
109
|
+
throw new Error(
|
|
110
|
+
`gemini exited ${result.exitCode}: ${result.stderr || "(no output)"}`
|
|
111
|
+
);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return result.stdout || result.stderr; // gemini sometimes writes to stderr
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
118
|
+
// Claude Code CLI
|
|
119
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Run: claude -p --output-format text -m <modelId> "<prompt>"
|
|
123
|
+
* Strips the model prefix ("cli-claude/claude-opus-4-6" → "claude-opus-4-6").
|
|
124
|
+
*/
|
|
125
|
+
export async function runClaude(
|
|
126
|
+
prompt: string,
|
|
127
|
+
modelId: string,
|
|
128
|
+
timeoutMs: number
|
|
129
|
+
): Promise<string> {
|
|
130
|
+
const model = stripPrefix(modelId);
|
|
131
|
+
const args = [
|
|
132
|
+
"-p",
|
|
133
|
+
"--output-format",
|
|
134
|
+
"text",
|
|
135
|
+
"--permission-mode",
|
|
136
|
+
"plan",
|
|
137
|
+
"--tools",
|
|
138
|
+
"",
|
|
139
|
+
"--model",
|
|
140
|
+
model,
|
|
141
|
+
prompt,
|
|
142
|
+
];
|
|
143
|
+
const result = await runCli("claude", args, timeoutMs);
|
|
144
|
+
|
|
145
|
+
if (result.exitCode !== 0 && result.stdout.length === 0) {
|
|
146
|
+
throw new Error(
|
|
147
|
+
`claude exited ${result.exitCode}: ${result.stderr || "(no output)"}`
|
|
148
|
+
);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return result.stdout;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
155
|
+
// Router
|
|
156
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Route a chat completion request to the right CLI based on the model name.
|
|
160
|
+
* Model naming convention:
|
|
161
|
+
* cli-gemini/<id> → gemini CLI
|
|
162
|
+
* cli-claude/<id> → claude CLI
|
|
163
|
+
*/
|
|
164
|
+
export async function routeToCliRunner(
|
|
165
|
+
model: string,
|
|
166
|
+
messages: ChatMessage[],
|
|
167
|
+
timeoutMs: number
|
|
168
|
+
): Promise<string> {
|
|
169
|
+
const prompt = formatPrompt(messages);
|
|
170
|
+
|
|
171
|
+
if (model.startsWith("cli-gemini/")) {
|
|
172
|
+
return runGemini(prompt, model, timeoutMs);
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
if (model.startsWith("cli-claude/")) {
|
|
176
|
+
return runClaude(prompt, model, timeoutMs);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
throw new Error(
|
|
180
|
+
`Unknown CLI bridge model: "${model}". ` +
|
|
181
|
+
`Use "cli-gemini/<model>" or "cli-claude/<model>".`
|
|
182
|
+
);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
186
|
+
// Helpers
|
|
187
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
188
|
+
|
|
189
|
+
/** Strip the "cli-gemini/" or "cli-claude/" prefix from a model ID. */
|
|
190
|
+
function stripPrefix(modelId: string): string {
|
|
191
|
+
const slash = modelId.indexOf("/");
|
|
192
|
+
return slash === -1 ? modelId : modelId.slice(slash + 1);
|
|
193
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* codex-auth.ts
|
|
3
|
+
*
|
|
4
|
+
* Reads OAuth credentials stored by the Codex CLI (~/.codex/auth.json)
|
|
5
|
+
* and bridges them into OpenClaw's openai-codex provider registration.
|
|
6
|
+
*
|
|
7
|
+
* The Codex CLI manages its own token lifecycle (auto-refresh). This module
|
|
8
|
+
* reads the stored tokens on demand and re-reads on OAuth refresh to pick up
|
|
9
|
+
* any token the Codex CLI has renewed since last read.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { readFile } from "node:fs/promises";
|
|
13
|
+
import { homedir } from "node:os";
|
|
14
|
+
import { join } from "node:path";
|
|
15
|
+
|
|
16
|
+
export const DEFAULT_CODEX_AUTH_PATH = join(homedir(), ".codex", "auth.json");
|
|
17
|
+
export const DEFAULT_MODEL = "openai-codex/gpt-5.2";
|
|
18
|
+
|
|
19
|
+
/** Subset of ~/.codex/auth.json we care about */
|
|
20
|
+
interface CodexAuthFile {
|
|
21
|
+
auth_mode: string;
|
|
22
|
+
OPENAI_API_KEY?: string | null;
|
|
23
|
+
tokens?: {
|
|
24
|
+
access_token?: string;
|
|
25
|
+
refresh_token?: string;
|
|
26
|
+
id_token?: string;
|
|
27
|
+
account_id?: string;
|
|
28
|
+
};
|
|
29
|
+
last_refresh?: string;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export interface CodexCredentials {
|
|
33
|
+
accessToken: string;
|
|
34
|
+
refreshToken: string | null;
|
|
35
|
+
/** approximate expiry epoch-ms — Codex tokens typically last ~1h */
|
|
36
|
+
expiresAt: number | null;
|
|
37
|
+
email: string | null;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Read and validate credentials from the Codex auth file.
|
|
42
|
+
* Throws if the file is missing, unreadable, or contains no usable token.
|
|
43
|
+
*/
|
|
44
|
+
export async function readCodexCredentials(
|
|
45
|
+
authPath: string = DEFAULT_CODEX_AUTH_PATH
|
|
46
|
+
): Promise<CodexCredentials> {
|
|
47
|
+
let raw: string;
|
|
48
|
+
try {
|
|
49
|
+
raw = await readFile(authPath, "utf8");
|
|
50
|
+
} catch (err) {
|
|
51
|
+
throw new Error(
|
|
52
|
+
`Cannot read Codex auth file at ${authPath}. ` +
|
|
53
|
+
`Run 'codex login' first, then retry. (${(err as Error).message})`
|
|
54
|
+
);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
let parsed: CodexAuthFile;
|
|
58
|
+
try {
|
|
59
|
+
parsed = JSON.parse(raw) as CodexAuthFile;
|
|
60
|
+
} catch {
|
|
61
|
+
throw new Error(`Codex auth file at ${authPath} is not valid JSON.`);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Prefer OAuth access_token; fall back to API key stored by --with-api-key login
|
|
65
|
+
const accessToken =
|
|
66
|
+
parsed.tokens?.access_token ?? parsed.OPENAI_API_KEY ?? null;
|
|
67
|
+
|
|
68
|
+
if (!accessToken) {
|
|
69
|
+
throw new Error(
|
|
70
|
+
`No access token found in ${authPath}. ` +
|
|
71
|
+
`Run 'codex login' and sign in with your ChatGPT account, then retry.`
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// Estimate expiry: last_refresh + 3600s (typical ChatGPT token lifetime)
|
|
76
|
+
let expiresAt: number | null = null;
|
|
77
|
+
if (parsed.last_refresh) {
|
|
78
|
+
const refreshedAt = Date.parse(parsed.last_refresh);
|
|
79
|
+
if (!isNaN(refreshedAt)) {
|
|
80
|
+
expiresAt = refreshedAt + 3600 * 1000;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
accessToken,
|
|
86
|
+
refreshToken: parsed.tokens?.refresh_token ?? null,
|
|
87
|
+
expiresAt,
|
|
88
|
+
email: null, // Codex auth.json doesn't expose email directly
|
|
89
|
+
};
|
|
90
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* config-patcher.ts
|
|
3
|
+
*
|
|
4
|
+
* Patches ~/.openclaw/openclaw.json to add the vllm provider config
|
|
5
|
+
* pointing at our local CLI proxy server.
|
|
6
|
+
*
|
|
7
|
+
* Only patches if the cli-bridge models are not already present.
|
|
8
|
+
* Always backs up + validates before writing.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import fs from "node:fs";
|
|
12
|
+
import path from "node:path";
|
|
13
|
+
import { homedir } from "node:os";
|
|
14
|
+
import { CLI_MODELS } from "./proxy-server.js";
|
|
15
|
+
|
|
16
|
+
const CONFIG_PATH = path.join(homedir(), ".openclaw", "openclaw.json");
|
|
17
|
+
const BACKUPS_DIR = path.join(homedir(), ".openclaw", "backups");
|
|
18
|
+
const CLI_BRIDGE_API_KEY = "cli-bridge";
|
|
19
|
+
|
|
20
|
+
export interface PatchResult {
|
|
21
|
+
patched: boolean;
|
|
22
|
+
reason: string;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Ensure the vllm provider entry in openclaw.json includes CLI bridge models.
|
|
27
|
+
* Returns {patched: false} if already up to date.
|
|
28
|
+
*/
|
|
29
|
+
export function patchOpencllawConfig(port: number): PatchResult {
|
|
30
|
+
let raw: string;
|
|
31
|
+
try {
|
|
32
|
+
raw = fs.readFileSync(CONFIG_PATH, "utf-8");
|
|
33
|
+
} catch (err) {
|
|
34
|
+
return { patched: false, reason: `Cannot read config: ${(err as Error).message}` };
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
let cfg: Record<string, unknown>;
|
|
38
|
+
try {
|
|
39
|
+
cfg = JSON.parse(raw) as Record<string, unknown>;
|
|
40
|
+
} catch {
|
|
41
|
+
return { patched: false, reason: "Config is not valid JSON — skipping patch." };
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Check if already patched (both provider models + agent allowlist)
|
|
45
|
+
const models = (cfg as any)?.models?.providers?.vllm?.models;
|
|
46
|
+
const hasBridgeProviderModels =
|
|
47
|
+
Array.isArray(models) &&
|
|
48
|
+
models.some((m: any) => typeof m?.id === "string" && m.id.startsWith("cli-"));
|
|
49
|
+
|
|
50
|
+
const allowedModels = (cfg as any)?.agents?.defaults?.models ?? {};
|
|
51
|
+
const hasBridgeAllowlist =
|
|
52
|
+
!!allowedModels["vllm/cli-gemini/gemini-2.5-pro"] ||
|
|
53
|
+
!!allowedModels["vllm/cli-claude/claude-sonnet-4-6"];
|
|
54
|
+
|
|
55
|
+
if (hasBridgeProviderModels && hasBridgeAllowlist) {
|
|
56
|
+
return { patched: false, reason: "vllm provider + agent allowlist already include cli-bridge models." };
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Backup
|
|
60
|
+
try {
|
|
61
|
+
fs.mkdirSync(BACKUPS_DIR, { recursive: true });
|
|
62
|
+
const ts = new Date().toISOString().replace(/[:.]/g, "-").slice(0, 19) + "Z";
|
|
63
|
+
const backupPath = path.join(BACKUPS_DIR, `openclaw.json.${ts}.bak`);
|
|
64
|
+
fs.copyFileSync(CONFIG_PATH, backupPath);
|
|
65
|
+
} catch {
|
|
66
|
+
// Non-fatal — proceed without backup
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Build vllm model list
|
|
70
|
+
const vllmModels = CLI_MODELS.map((m) => ({
|
|
71
|
+
id: m.id,
|
|
72
|
+
name: m.name,
|
|
73
|
+
reasoning: false,
|
|
74
|
+
input: ["text"],
|
|
75
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
76
|
+
contextWindow: m.contextWindow,
|
|
77
|
+
maxTokens: m.maxTokens,
|
|
78
|
+
}));
|
|
79
|
+
|
|
80
|
+
// Merge into config
|
|
81
|
+
const cfgAny = cfg as any;
|
|
82
|
+
cfgAny.models = cfgAny.models ?? {};
|
|
83
|
+
cfgAny.models.providers = cfgAny.models.providers ?? {};
|
|
84
|
+
|
|
85
|
+
const existingVllm = cfgAny.models.providers.vllm ?? {};
|
|
86
|
+
const existingModels: unknown[] = Array.isArray(existingVllm.models)
|
|
87
|
+
? existingVllm.models
|
|
88
|
+
: [];
|
|
89
|
+
|
|
90
|
+
// Merge: keep non-cli-bridge models, add/replace cli-bridge models
|
|
91
|
+
const mergedModels = [
|
|
92
|
+
...existingModels.filter(
|
|
93
|
+
(m: any) => typeof m?.id === "string" && !m.id.startsWith("cli-")
|
|
94
|
+
),
|
|
95
|
+
...vllmModels,
|
|
96
|
+
];
|
|
97
|
+
|
|
98
|
+
cfgAny.models.providers.vllm = {
|
|
99
|
+
...existingVllm,
|
|
100
|
+
baseUrl: `http://127.0.0.1:${port}/v1`,
|
|
101
|
+
apiKey: CLI_BRIDGE_API_KEY,
|
|
102
|
+
api: "openai-completions",
|
|
103
|
+
models: mergedModels,
|
|
104
|
+
};
|
|
105
|
+
|
|
106
|
+
// Also whitelist the full provider/model refs in agents.defaults.models
|
|
107
|
+
// so session model switching and allow checks accept them.
|
|
108
|
+
cfgAny.agents = cfgAny.agents ?? {};
|
|
109
|
+
cfgAny.agents.defaults = cfgAny.agents.defaults ?? {};
|
|
110
|
+
cfgAny.agents.defaults.models = cfgAny.agents.defaults.models ?? {};
|
|
111
|
+
|
|
112
|
+
for (const m of vllmModels) {
|
|
113
|
+
const ref = `vllm/${m.id}`;
|
|
114
|
+
if (!cfgAny.agents.defaults.models[ref]) {
|
|
115
|
+
cfgAny.agents.defaults.models[ref] = {
|
|
116
|
+
alias: m.id.replace(/\//g, "-")
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Validate JSON before writing
|
|
122
|
+
let newRaw: string;
|
|
123
|
+
try {
|
|
124
|
+
newRaw = JSON.stringify(cfg, null, 2);
|
|
125
|
+
JSON.parse(newRaw); // validate
|
|
126
|
+
} catch {
|
|
127
|
+
return { patched: false, reason: "Failed to serialize patched config." };
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Write
|
|
131
|
+
try {
|
|
132
|
+
fs.writeFileSync(CONFIG_PATH, newRaw, "utf-8");
|
|
133
|
+
return { patched: true, reason: `vllm provider configured at http://127.0.0.1:${port}/v1` };
|
|
134
|
+
} catch (err) {
|
|
135
|
+
return { patched: false, reason: `Cannot write config: ${(err as Error).message}` };
|
|
136
|
+
}
|
|
137
|
+
}
|