free-coding-models 0.1.82 → 0.1.84
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -51
- package/bin/free-coding-models.js +429 -4276
- package/package.json +2 -2
- package/sources.js +3 -2
- package/src/account-manager.js +600 -0
- package/src/analysis.js +197 -0
- package/{lib → src}/config.js +122 -0
- package/src/constants.js +116 -0
- package/src/error-classifier.js +154 -0
- package/src/favorites.js +98 -0
- package/src/key-handler.js +1005 -0
- package/src/log-reader.js +174 -0
- package/src/model-merger.js +78 -0
- package/src/openclaw.js +131 -0
- package/src/opencode-sync.js +159 -0
- package/src/opencode.js +952 -0
- package/src/overlays.js +840 -0
- package/src/ping.js +186 -0
- package/src/provider-metadata.js +218 -0
- package/src/provider-quota-fetchers.js +319 -0
- package/src/proxy-server.js +543 -0
- package/src/quota-capabilities.js +112 -0
- package/src/render-helpers.js +239 -0
- package/src/render-table.js +567 -0
- package/src/request-transformer.js +180 -0
- package/src/setup.js +105 -0
- package/src/telemetry.js +382 -0
- package/src/tier-colors.js +37 -0
- package/src/token-stats.js +310 -0
- package/src/token-usage-reader.js +63 -0
- package/src/updater.js +237 -0
- package/src/usage-reader.js +245 -0
- package/{lib → src}/utils.js +55 -0
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file lib/log-reader.js
|
|
3
|
+
* @description Pure functions to load recent request-log entries from
|
|
4
|
+
* ~/.free-coding-models/request-log.jsonl, newest-first, bounded by a
|
|
5
|
+
* configurable row limit.
|
|
6
|
+
*
|
|
7
|
+
* Design principles:
|
|
8
|
+
* - Bounded reads only — never slurp the entire log for every TUI repaint.
|
|
9
|
+
* - Tolerates malformed / partially-written JSONL lines by skipping them.
|
|
10
|
+
* - No shared mutable state (pure functions, injectable file path for tests).
|
|
11
|
+
* - No new npm dependencies — uses only Node.js built-ins.
|
|
12
|
+
*
|
|
13
|
+
* Default path:
|
|
14
|
+
* ~/.free-coding-models/request-log.jsonl
|
|
15
|
+
*
|
|
16
|
+
* Row object shape returned from loadRecentLogs():
|
|
17
|
+
* {
|
|
18
|
+
* time: string // ISO timestamp string (from entry.timestamp)
|
|
19
|
+
* requestType: string // e.g. "chat.completions"
|
|
20
|
+
* model: string // e.g. "llama-3.3-70b-instruct"
|
|
21
|
+
* provider: string // e.g. "nvidia"
|
|
22
|
+
* status: string // e.g. "200" | "429" | "error"
|
|
23
|
+
* tokens: number // promptTokens + completionTokens (0 if unknown)
|
|
24
|
+
* latency: number // ms (0 if unknown)
|
|
25
|
+
* }
|
|
26
|
+
*
|
|
27
|
+
* @exports loadRecentLogs
|
|
28
|
+
* @exports parseLogLine
|
|
29
|
+
*/
|
|
30
|
+
|
|
31
|
+
import { existsSync, statSync, openSync, readSync, closeSync } from 'node:fs'
|
|
32
|
+
import { join } from 'node:path'
|
|
33
|
+
import { homedir } from 'node:os'
|
|
34
|
+
|
|
35
|
+
const DEFAULT_LOG_FILE = join(homedir(), '.free-coding-models', 'request-log.jsonl')
|
|
36
|
+
|
|
37
|
+
/** Maximum bytes to read from the tail of the file to avoid OOM on large logs. */
|
|
38
|
+
const MAX_READ_BYTES = 128 * 1024 // 128 KB
|
|
39
|
+
|
|
40
|
+
function normalizeTimestamp(raw) {
|
|
41
|
+
if (typeof raw === 'number' && Number.isFinite(raw)) {
|
|
42
|
+
return new Date(raw).toISOString()
|
|
43
|
+
}
|
|
44
|
+
if (typeof raw === 'string') {
|
|
45
|
+
const numeric = Number(raw)
|
|
46
|
+
if (Number.isFinite(numeric)) return new Date(numeric).toISOString()
|
|
47
|
+
const parsed = new Date(raw)
|
|
48
|
+
if (!Number.isNaN(parsed.getTime())) return parsed.toISOString()
|
|
49
|
+
}
|
|
50
|
+
return null
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
function inferProvider(entry) {
|
|
54
|
+
if (entry.providerKey || entry.provider) {
|
|
55
|
+
return String(entry.providerKey ?? entry.provider)
|
|
56
|
+
}
|
|
57
|
+
if (typeof entry.accountId === 'string' && entry.accountId.includes('/')) {
|
|
58
|
+
return entry.accountId.split('/')[0] || 'unknown'
|
|
59
|
+
}
|
|
60
|
+
return 'unknown'
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function inferStatus(entry) {
|
|
64
|
+
if (entry.statusCode !== undefined || entry.status !== undefined) {
|
|
65
|
+
return String(entry.statusCode ?? entry.status)
|
|
66
|
+
}
|
|
67
|
+
if (typeof entry.success === 'boolean') {
|
|
68
|
+
return entry.success ? '200' : 'error'
|
|
69
|
+
}
|
|
70
|
+
return 'unknown'
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
function inferRequestType(entry) {
|
|
74
|
+
if (entry.requestType !== undefined || entry.type !== undefined) {
|
|
75
|
+
return String(entry.requestType ?? entry.type)
|
|
76
|
+
}
|
|
77
|
+
if (typeof entry.url === 'string') {
|
|
78
|
+
if (entry.url.includes('/chat/completions')) return 'chat.completions'
|
|
79
|
+
if (entry.url.includes('/models')) return 'models'
|
|
80
|
+
}
|
|
81
|
+
return 'chat.completions'
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Parse a single JSONL line into a normalised log row object.
|
|
86
|
+
*
|
|
87
|
+
* Returns `null` for any line that is blank, not valid JSON, or missing
|
|
88
|
+
* the required `timestamp` field.
|
|
89
|
+
*
|
|
90
|
+
* @param {string} line - A single text line from the JSONL file.
|
|
91
|
+
* @returns {{ time: string, requestType: string, model: string, provider: string, status: string, tokens: number, latency: number } | null}
|
|
92
|
+
*/
|
|
93
|
+
export function parseLogLine(line) {
|
|
94
|
+
const trimmed = line.trim()
|
|
95
|
+
if (!trimmed) return null
|
|
96
|
+
let entry
|
|
97
|
+
try {
|
|
98
|
+
entry = JSON.parse(trimmed)
|
|
99
|
+
} catch {
|
|
100
|
+
return null
|
|
101
|
+
}
|
|
102
|
+
if (!entry || typeof entry !== 'object') return null
|
|
103
|
+
if (!entry.timestamp) return null
|
|
104
|
+
|
|
105
|
+
const normalizedTime = normalizeTimestamp(entry.timestamp)
|
|
106
|
+
if (!normalizedTime) return null
|
|
107
|
+
|
|
108
|
+
const model = String(entry.modelId ?? entry.model ?? 'unknown')
|
|
109
|
+
const provider = inferProvider(entry)
|
|
110
|
+
const status = inferStatus(entry)
|
|
111
|
+
const requestType = inferRequestType(entry)
|
|
112
|
+
const tokens = (Number(entry.usage?.prompt_tokens ?? entry.promptTokens ?? 0) +
|
|
113
|
+
Number(entry.usage?.completion_tokens ?? entry.completionTokens ?? 0)) || 0
|
|
114
|
+
const latency = Number(entry.latencyMs ?? entry.latency ?? 0) || 0
|
|
115
|
+
|
|
116
|
+
return {
|
|
117
|
+
time: normalizedTime,
|
|
118
|
+
requestType,
|
|
119
|
+
model,
|
|
120
|
+
provider,
|
|
121
|
+
status,
|
|
122
|
+
tokens,
|
|
123
|
+
latency,
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Load the N most-recent log entries from the JSONL file, newest-first.
|
|
129
|
+
*
|
|
130
|
+
* Only reads up to MAX_READ_BYTES from the end of the file to avoid
|
|
131
|
+
* loading the entire log history. Malformed lines are silently skipped.
|
|
132
|
+
*
|
|
133
|
+
* @param {object} [opts]
|
|
134
|
+
* @param {string} [opts.logFile] - Path to request-log.jsonl (injectable for tests)
|
|
135
|
+
* @param {number} [opts.limit] - Maximum rows to return (default 200)
|
|
136
|
+
* @returns {Array<{ time: string, requestType: string, model: string, provider: string, status: string, tokens: number, latency: number }>}
|
|
137
|
+
*/
|
|
138
|
+
export function loadRecentLogs({ logFile = DEFAULT_LOG_FILE, limit = 200 } = {}) {
|
|
139
|
+
try {
|
|
140
|
+
if (!existsSync(logFile)) return []
|
|
141
|
+
|
|
142
|
+
const fileSize = statSync(logFile).size
|
|
143
|
+
if (fileSize === 0) return []
|
|
144
|
+
|
|
145
|
+
// 📖 Read only the tail of the file (bounded by MAX_READ_BYTES) to avoid
|
|
146
|
+
// 📖 reading multi-megabyte logs on every TUI repaint.
|
|
147
|
+
const readBytes = Math.min(fileSize, MAX_READ_BYTES)
|
|
148
|
+
const fileOffset = fileSize - readBytes
|
|
149
|
+
|
|
150
|
+
const buf = Buffer.allocUnsafe(readBytes)
|
|
151
|
+
const fd = openSync(logFile, 'r')
|
|
152
|
+
try {
|
|
153
|
+
readSync(fd, buf, 0, readBytes, fileOffset)
|
|
154
|
+
} finally {
|
|
155
|
+
closeSync(fd)
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const text = buf.toString('utf8')
|
|
159
|
+
|
|
160
|
+
// 📖 Split on newlines; if we started mid-line (fileOffset > 0), drop
|
|
161
|
+
// 📖 the first (potentially incomplete) line to avoid corrupt JSON.
|
|
162
|
+
const rawLines = text.split('\n')
|
|
163
|
+
const lines = fileOffset > 0 ? rawLines.slice(1) : rawLines
|
|
164
|
+
|
|
165
|
+
const rows = []
|
|
166
|
+
for (let i = lines.length - 1; i >= 0 && rows.length < limit; i--) {
|
|
167
|
+
const row = parseLogLine(lines[i])
|
|
168
|
+
if (row) rows.push(row)
|
|
169
|
+
}
|
|
170
|
+
return rows
|
|
171
|
+
} catch {
|
|
172
|
+
return []
|
|
173
|
+
}
|
|
174
|
+
}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
const TIER_RANK = { 'S+': 0, 'S': 1, 'A+': 2, 'A': 3, 'A-': 4, 'B+': 5, 'B': 6, 'C': 7 }
|
|
2
|
+
|
|
3
|
+
function parseCtxK(ctx) {
|
|
4
|
+
if (!ctx) return 0
|
|
5
|
+
const s = ctx.toLowerCase()
|
|
6
|
+
if (s.endsWith('m')) return parseFloat(s) * 1000
|
|
7
|
+
return parseFloat(s) || 0
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
function parseSwePercent(swe) {
|
|
11
|
+
return parseFloat(swe) || 0
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Generate a unique slug from a label.
|
|
16
|
+
* "DeepSeek V3.2" → "deepseek-v3-2"
|
|
17
|
+
* Appends suffix if collision detected.
|
|
18
|
+
*/
|
|
19
|
+
function slugify(label, existingSlugs) {
|
|
20
|
+
let base = label.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-|-$/g, '')
|
|
21
|
+
let slug = base
|
|
22
|
+
let i = 2
|
|
23
|
+
while (existingSlugs.has(slug)) {
|
|
24
|
+
slug = `${base}-${i++}`
|
|
25
|
+
}
|
|
26
|
+
existingSlugs.add(slug)
|
|
27
|
+
return slug
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Build merged model list from flat MODELS array.
|
|
32
|
+
* Groups by display label. Each merged entry contains all providers.
|
|
33
|
+
*
|
|
34
|
+
* @param {Array} models - Flat array of [modelId, label, tier, sweScore, ctx, providerKey]
|
|
35
|
+
* @returns {Array<MergedModel>}
|
|
36
|
+
*
|
|
37
|
+
* MergedModel: {
|
|
38
|
+
* slug: string, // unique URL-safe identifier
|
|
39
|
+
* label: string, // display name
|
|
40
|
+
* tier: string, // best tier across providers
|
|
41
|
+
* sweScore: string, // highest SWE score
|
|
42
|
+
* ctx: string, // largest context window
|
|
43
|
+
* providerCount: number,
|
|
44
|
+
* providers: Array<{ modelId: string, providerKey: string, tier: string }>
|
|
45
|
+
* }
|
|
46
|
+
*/
|
|
47
|
+
export function buildMergedModels(models) {
|
|
48
|
+
const groups = new Map()
|
|
49
|
+
|
|
50
|
+
for (const [modelId, label, tier, sweScore, ctx, providerKey] of models) {
|
|
51
|
+
if (!groups.has(label)) {
|
|
52
|
+
groups.set(label, { label, tier, sweScore, ctx, providers: [] })
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const group = groups.get(label)
|
|
56
|
+
group.providers.push({ modelId, providerKey, tier })
|
|
57
|
+
|
|
58
|
+
// Keep best tier
|
|
59
|
+
if ((TIER_RANK[tier] ?? 99) < (TIER_RANK[group.tier] ?? 99)) {
|
|
60
|
+
group.tier = tier
|
|
61
|
+
}
|
|
62
|
+
// Keep highest SWE score
|
|
63
|
+
if (parseSwePercent(sweScore) > parseSwePercent(group.sweScore)) {
|
|
64
|
+
group.sweScore = sweScore
|
|
65
|
+
}
|
|
66
|
+
// Keep largest context
|
|
67
|
+
if (parseCtxK(ctx) > parseCtxK(group.ctx)) {
|
|
68
|
+
group.ctx = ctx
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const existingSlugs = new Set()
|
|
73
|
+
return Array.from(groups.values()).map(g => ({
|
|
74
|
+
...g,
|
|
75
|
+
slug: slugify(g.label, existingSlugs),
|
|
76
|
+
providerCount: g.providers.length,
|
|
77
|
+
}))
|
|
78
|
+
}
|
package/src/openclaw.js
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file openclaw.js
|
|
3
|
+
* @description OpenClaw config helpers for setting NVIDIA NIM defaults.
|
|
4
|
+
*
|
|
5
|
+
* @details
|
|
6
|
+
* This module owns the OpenClaw integration logic:
|
|
7
|
+
* - Read/write ~/.openclaw/openclaw.json
|
|
8
|
+
* - Ensure the NVIDIA provider block exists under models.providers
|
|
9
|
+
* - Patch the OpenClaw allowlist for NVIDIA models when needed
|
|
10
|
+
* - Set the selected model as the default primary model
|
|
11
|
+
*
|
|
12
|
+
* → Functions:
|
|
13
|
+
* - `loadOpenClawConfig` — read OpenClaw config as JSON
|
|
14
|
+
* - `saveOpenClawConfig` — persist OpenClaw config safely
|
|
15
|
+
* - `startOpenClaw` — set NVIDIA model as OpenClaw default
|
|
16
|
+
*
|
|
17
|
+
* @exports { loadOpenClawConfig, saveOpenClawConfig, startOpenClaw }
|
|
18
|
+
* @see ../patch-openclaw-models.js
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
import chalk from 'chalk'
|
|
22
|
+
import { copyFileSync, existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs'
|
|
23
|
+
import { homedir } from 'os'
|
|
24
|
+
import { join } from 'path'
|
|
25
|
+
import { patchOpenClawModelsJson } from '../patch-openclaw-models.js'
|
|
26
|
+
|
|
27
|
+
// 📖 OpenClaw config: ~/.openclaw/openclaw.json (JSON format, may be JSON5 in newer versions)
|
|
28
|
+
const OPENCLAW_CONFIG = join(homedir(), '.openclaw', 'openclaw.json')
|
|
29
|
+
|
|
30
|
+
export function loadOpenClawConfig() {
|
|
31
|
+
if (!existsSync(OPENCLAW_CONFIG)) return {}
|
|
32
|
+
try {
|
|
33
|
+
// 📖 JSON.parse works for standard JSON; OpenClaw may use JSON5 but base config is valid JSON
|
|
34
|
+
return JSON.parse(readFileSync(OPENCLAW_CONFIG, 'utf8'))
|
|
35
|
+
} catch {
|
|
36
|
+
return {}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
export function saveOpenClawConfig(config) {
|
|
41
|
+
const dir = join(homedir(), '.openclaw')
|
|
42
|
+
if (!existsSync(dir)) {
|
|
43
|
+
mkdirSync(dir, { recursive: true })
|
|
44
|
+
}
|
|
45
|
+
writeFileSync(OPENCLAW_CONFIG, JSON.stringify(config, null, 2))
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// 📖 startOpenClaw: sets the selected NVIDIA NIM model as default in OpenClaw config.
|
|
49
|
+
// 📖 Also ensures the nvidia provider block is present with the NIM base URL.
|
|
50
|
+
// 📖 Does NOT launch OpenClaw — OpenClaw runs as a daemon, so config changes are picked up on restart.
|
|
51
|
+
export async function startOpenClaw(model, apiKey) {
|
|
52
|
+
console.log(chalk.rgb(255, 100, 50)(` 🦞 Setting ${chalk.bold(model.label)} as OpenClaw default…`))
|
|
53
|
+
console.log(chalk.dim(` Model: nvidia/${model.modelId}`))
|
|
54
|
+
console.log()
|
|
55
|
+
|
|
56
|
+
const config = loadOpenClawConfig()
|
|
57
|
+
|
|
58
|
+
// 📖 Backup existing config before touching it
|
|
59
|
+
if (existsSync(OPENCLAW_CONFIG)) {
|
|
60
|
+
const backupPath = `${OPENCLAW_CONFIG}.backup-${Date.now()}`
|
|
61
|
+
copyFileSync(OPENCLAW_CONFIG, backupPath)
|
|
62
|
+
console.log(chalk.dim(` 💾 Backup: ${backupPath}`))
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// 📖 Patch models.json to add all NVIDIA models (fixes "not allowed" errors)
|
|
66
|
+
const patchResult = patchOpenClawModelsJson()
|
|
67
|
+
if (patchResult.wasPatched) {
|
|
68
|
+
console.log(chalk.dim(` ✨ Added ${patchResult.added} NVIDIA models to allowlist (${patchResult.total} total)`))
|
|
69
|
+
if (patchResult.backup) {
|
|
70
|
+
console.log(chalk.dim(` 💾 models.json backup: ${patchResult.backup}`))
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// 📖 Ensure models.providers section exists with nvidia NIM block.
|
|
75
|
+
// 📖 Per OpenClaw docs (docs.openclaw.ai/providers/nvidia), providers MUST be nested under
|
|
76
|
+
// 📖 "models.providers", NOT at the config root. Root-level "providers" is ignored by OpenClaw.
|
|
77
|
+
// 📖 API key is NOT stored in the provider block — it's read from env var NVIDIA_API_KEY.
|
|
78
|
+
// 📖 If needed, it can be stored under the root "env" key: { env: { NVIDIA_API_KEY: "nvapi-..." } }
|
|
79
|
+
if (!config.models) config.models = {}
|
|
80
|
+
if (!config.models.providers) config.models.providers = {}
|
|
81
|
+
if (!config.models.providers.nvidia) {
|
|
82
|
+
config.models.providers.nvidia = {
|
|
83
|
+
baseUrl: 'https://integrate.api.nvidia.com/v1',
|
|
84
|
+
api: 'openai-completions',
|
|
85
|
+
models: [],
|
|
86
|
+
}
|
|
87
|
+
console.log(chalk.dim(' ➕ Added nvidia provider block to OpenClaw config (models.providers.nvidia)'))
|
|
88
|
+
}
|
|
89
|
+
// 📖 Ensure models array exists even if the provider block was created by an older version
|
|
90
|
+
if (!Array.isArray(config.models.providers.nvidia.models)) {
|
|
91
|
+
config.models.providers.nvidia.models = []
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// 📖 Store API key in the root "env" section so OpenClaw can read it as NVIDIA_API_KEY env var.
|
|
95
|
+
// 📖 Only writes if not already set to avoid overwriting an existing key.
|
|
96
|
+
const resolvedKey = apiKey || process.env.NVIDIA_API_KEY
|
|
97
|
+
if (resolvedKey) {
|
|
98
|
+
if (!config.env) config.env = {}
|
|
99
|
+
if (!config.env.NVIDIA_API_KEY) {
|
|
100
|
+
config.env.NVIDIA_API_KEY = resolvedKey
|
|
101
|
+
console.log(chalk.dim(' 🔑 Stored NVIDIA_API_KEY in config env section'))
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// 📖 Set as the default primary model for all agents.
|
|
106
|
+
// 📖 Format: "provider/model-id" — e.g. "nvidia/deepseek-ai/deepseek-v3.2"
|
|
107
|
+
if (!config.agents) config.agents = {}
|
|
108
|
+
if (!config.agents.defaults) config.agents.defaults = {}
|
|
109
|
+
if (!config.agents.defaults.model) config.agents.defaults.model = {}
|
|
110
|
+
config.agents.defaults.model.primary = `nvidia/${model.modelId}`
|
|
111
|
+
|
|
112
|
+
// 📖 REQUIRED: OpenClaw requires the model to be explicitly listed in agents.defaults.models
|
|
113
|
+
// 📖 (the allowlist). Without this entry, OpenClaw rejects the model with "not allowed".
|
|
114
|
+
// 📖 See: https://docs.openclaw.ai/gateway/configuration-reference
|
|
115
|
+
if (!config.agents.defaults.models) config.agents.defaults.models = {}
|
|
116
|
+
config.agents.defaults.models[`nvidia/${model.modelId}`] = {}
|
|
117
|
+
|
|
118
|
+
saveOpenClawConfig(config)
|
|
119
|
+
|
|
120
|
+
console.log(chalk.rgb(255, 140, 0)(` ✓ Default model set to: nvidia/${model.modelId}`))
|
|
121
|
+
console.log()
|
|
122
|
+
console.log(chalk.dim(' 📄 Config updated: ' + OPENCLAW_CONFIG))
|
|
123
|
+
console.log()
|
|
124
|
+
// 📖 "openclaw restart" does NOT exist. The gateway auto-reloads on config file changes.
|
|
125
|
+
// 📖 To apply manually: use "openclaw models set" or "openclaw configure"
|
|
126
|
+
// 📖 See: https://docs.openclaw.ai/gateway/configuration
|
|
127
|
+
console.log(chalk.dim(' 💡 OpenClaw will reload config automatically (gateway.reload.mode).'))
|
|
128
|
+
console.log(chalk.dim(' To apply manually: openclaw models set nvidia/' + model.modelId))
|
|
129
|
+
console.log(chalk.dim(' Or run the setup wizard: openclaw configure'))
|
|
130
|
+
console.log()
|
|
131
|
+
}
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import { readFileSync, writeFileSync, copyFileSync, existsSync, mkdirSync } from 'node:fs'
|
|
2
|
+
import { join } from 'node:path'
|
|
3
|
+
import { homedir } from 'node:os'
|
|
4
|
+
import { randomBytes } from 'node:crypto'
|
|
5
|
+
|
|
6
|
+
const OC_CONFIG_DIR = join(homedir(), '.config', 'opencode')
|
|
7
|
+
const OC_CONFIG_PATH = join(OC_CONFIG_DIR, 'opencode.json')
|
|
8
|
+
const OC_BACKUP_PATH = join(OC_CONFIG_DIR, 'opencode.json.bak')
|
|
9
|
+
const FCM_PROVIDER_ID = 'fcm-proxy'
|
|
10
|
+
const DEFAULT_PROXY_BASE_URL = 'http://127.0.0.1:8045/v1'
|
|
11
|
+
|
|
12
|
+
function generateProxyToken() {
|
|
13
|
+
return `fcm_${randomBytes(24).toString('hex')}`
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
function ensureV1BaseUrl(baseURL) {
|
|
17
|
+
if (typeof baseURL !== 'string' || baseURL.length === 0) {
|
|
18
|
+
return DEFAULT_PROXY_BASE_URL
|
|
19
|
+
}
|
|
20
|
+
const trimmed = baseURL.replace(/\/+$/, '')
|
|
21
|
+
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Load existing OpenCode config, or return empty object.
|
|
26
|
+
*/
|
|
27
|
+
export function loadOpenCodeConfig() {
|
|
28
|
+
try {
|
|
29
|
+
if (existsSync(OC_CONFIG_PATH)) {
|
|
30
|
+
return JSON.parse(readFileSync(OC_CONFIG_PATH, 'utf8'))
|
|
31
|
+
}
|
|
32
|
+
} catch {}
|
|
33
|
+
return {}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Save OpenCode config with automatic backup.
|
|
38
|
+
* Creates backup of current config before overwriting.
|
|
39
|
+
*/
|
|
40
|
+
export function saveOpenCodeConfig(config) {
|
|
41
|
+
mkdirSync(OC_CONFIG_DIR, { recursive: true })
|
|
42
|
+
// Backup existing config before saving
|
|
43
|
+
if (existsSync(OC_CONFIG_PATH)) {
|
|
44
|
+
copyFileSync(OC_CONFIG_PATH, OC_BACKUP_PATH)
|
|
45
|
+
}
|
|
46
|
+
writeFileSync(OC_CONFIG_PATH, JSON.stringify(config, null, 2) + '\n')
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Restore OpenCode config from backup.
|
|
51
|
+
* @returns {boolean} true if restored, false if no backup exists
|
|
52
|
+
*/
|
|
53
|
+
export function restoreOpenCodeBackup() {
|
|
54
|
+
if (!existsSync(OC_BACKUP_PATH)) return false
|
|
55
|
+
copyFileSync(OC_BACKUP_PATH, OC_CONFIG_PATH)
|
|
56
|
+
return true
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Pure merge: apply FCM provider entry into an existing OpenCode config object.
|
|
61
|
+
*
|
|
62
|
+
* This function contains the merge logic without any filesystem I/O so it can
|
|
63
|
+
* be unit-tested in isolation. It is exported for tests and used internally by
|
|
64
|
+
* syncToOpenCode.
|
|
65
|
+
*
|
|
66
|
+
* CRITICAL: This function ONLY adds/updates the fcm-proxy provider entry.
|
|
67
|
+
* It PRESERVES all existing providers (antigravity-manager, openai, iflow, etc.)
|
|
68
|
+
* and all other top-level keys ($schema, mcp, plugin, command, model).
|
|
69
|
+
*
|
|
70
|
+
* proxyInfo should only carry runtime port/token when the proxy is actively
|
|
71
|
+
* running (running === true). Callers MUST NOT pass stale values from a stopped
|
|
72
|
+
* proxy — use undefined/omit the fields instead so we fall back to the existing
|
|
73
|
+
* persisted provider options cleanly.
|
|
74
|
+
*
|
|
75
|
+
* @param {Object} ocConfig - Existing OpenCode config object (will be mutated in-place)
|
|
76
|
+
* @param {Array} mergedModels - Output of buildMergedModels()
|
|
77
|
+
* @param {{ proxyPort?: number, proxyToken?: string, availableModelSlugs?: Set<string>|string[] }} proxyInfo
|
|
78
|
+
* availableModelSlugs: when provided, only models whose slug is in this set are written
|
|
79
|
+
* to the OpenCode catalog. Use this to prevent "ghost" entries for models with no API keys.
|
|
80
|
+
* @returns {Object} The mutated ocConfig
|
|
81
|
+
*/
|
|
82
|
+
export function mergeOcConfig(ocConfig, mergedModels, proxyInfo = {}) {
|
|
83
|
+
ocConfig.provider = ocConfig.provider || {}
|
|
84
|
+
|
|
85
|
+
const existingProvider = ocConfig.provider[FCM_PROVIDER_ID] || {}
|
|
86
|
+
const existingOptions = existingProvider.options || {}
|
|
87
|
+
|
|
88
|
+
// Only use the runtime proxyPort if it is a valid positive integer.
|
|
89
|
+
// A null/undefined/0 port means the proxy is not running — fall back to
|
|
90
|
+
// the existing persisted baseURL so we don't write a broken URL.
|
|
91
|
+
const hasValidPort = Number.isInteger(proxyInfo.proxyPort) && proxyInfo.proxyPort > 0
|
|
92
|
+
const baseURL = ensureV1BaseUrl(
|
|
93
|
+
hasValidPort
|
|
94
|
+
? `http://127.0.0.1:${proxyInfo.proxyPort}`
|
|
95
|
+
: existingOptions.baseURL
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
// Keep token stable unless caller provides a runtime token.
|
|
99
|
+
// A non-string or empty proxyToken is treated as absent.
|
|
100
|
+
const hasValidToken = typeof proxyInfo.proxyToken === 'string' && proxyInfo.proxyToken.length > 0
|
|
101
|
+
const hasExistingToken =
|
|
102
|
+
typeof existingOptions.apiKey === 'string' &&
|
|
103
|
+
existingOptions.apiKey.length > 0 &&
|
|
104
|
+
existingOptions.apiKey !== 'fcm-proxy-token'
|
|
105
|
+
const apiKey = hasValidToken ? proxyInfo.proxyToken : (hasExistingToken ? existingOptions.apiKey : generateProxyToken())
|
|
106
|
+
|
|
107
|
+
const slugFilter = proxyInfo.availableModelSlugs
|
|
108
|
+
? new Set(proxyInfo.availableModelSlugs)
|
|
109
|
+
: null
|
|
110
|
+
|
|
111
|
+
const models = {}
|
|
112
|
+
for (const m of mergedModels) {
|
|
113
|
+
if (slugFilter && !slugFilter.has(m.slug)) continue
|
|
114
|
+
models[m.slug] = { name: m.label }
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
ocConfig.provider[FCM_PROVIDER_ID] = {
|
|
118
|
+
npm: '@ai-sdk/openai-compatible',
|
|
119
|
+
name: 'FCM Rotation Proxy',
|
|
120
|
+
options: {
|
|
121
|
+
...existingOptions,
|
|
122
|
+
baseURL,
|
|
123
|
+
apiKey,
|
|
124
|
+
},
|
|
125
|
+
models,
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
return ocConfig
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* MERGE the single FCM proxy provider into OpenCode config.
|
|
133
|
+
*
|
|
134
|
+
* CRITICAL: This function ONLY adds/updates the fcm-proxy provider entry.
|
|
135
|
+
* It PRESERVES all existing providers (antigravity-manager, openai, iflow, etc.)
|
|
136
|
+
* and all other top-level keys ($schema, mcp, plugin, command, model).
|
|
137
|
+
*
|
|
138
|
+
* proxyInfo should only carry runtime port/token when the proxy is actively
|
|
139
|
+
* running (running === true). Callers MUST NOT pass stale values from a stopped
|
|
140
|
+
* proxy — use undefined/omit the fields instead so we fall back to the existing
|
|
141
|
+
* persisted provider options cleanly.
|
|
142
|
+
*
|
|
143
|
+
* @param {Object} fcmConfig - FCM config (from loadConfig())
|
|
144
|
+
* @param {Object} _sources - PROVIDERS object from sources.js (unused, kept for signature compatibility)
|
|
145
|
+
* @param {Array} mergedModels - Output of buildMergedModels()
|
|
146
|
+
* @param {{ proxyPort?: number, proxyToken?: string, availableModelSlugs?: Set<string>|string[] }} proxyInfo
|
|
147
|
+
* availableModelSlugs: slugs of models that have real API key accounts. When provided,
|
|
148
|
+
* only those models appear in the OpenCode catalog, preventing ghost entries.
|
|
149
|
+
*/
|
|
150
|
+
export function syncToOpenCode(fcmConfig, _sources, mergedModels, proxyInfo = {}) {
|
|
151
|
+
const oc = loadOpenCodeConfig()
|
|
152
|
+
const merged = mergeOcConfig(oc, mergedModels, proxyInfo)
|
|
153
|
+
saveOpenCodeConfig(merged)
|
|
154
|
+
return {
|
|
155
|
+
providerKey: FCM_PROVIDER_ID,
|
|
156
|
+
modelCount: Object.keys(merged.provider[FCM_PROVIDER_ID].models).length,
|
|
157
|
+
path: OC_CONFIG_PATH,
|
|
158
|
+
}
|
|
159
|
+
}
|