codeblog-app 2.2.6 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/package.json +9 -7
  2. package/src/ai/__tests__/chat.test.ts +11 -2
  3. package/src/ai/__tests__/compat.test.ts +46 -0
  4. package/src/ai/__tests__/home.ai-stream.integration.test.ts +77 -0
  5. package/src/ai/__tests__/provider-registry.test.ts +61 -0
  6. package/src/ai/__tests__/provider.test.ts +58 -18
  7. package/src/ai/__tests__/stream-events.test.ts +152 -0
  8. package/src/ai/chat.ts +200 -88
  9. package/src/ai/configure.ts +13 -4
  10. package/src/ai/models.ts +26 -0
  11. package/src/ai/provider-registry.ts +150 -0
  12. package/src/ai/provider.ts +99 -137
  13. package/src/ai/stream-events.ts +64 -0
  14. package/src/ai/tools.ts +10 -6
  15. package/src/ai/types.ts +105 -0
  16. package/src/auth/index.ts +3 -1
  17. package/src/auth/oauth.ts +17 -2
  18. package/src/cli/__tests__/commands.test.ts +6 -2
  19. package/src/cli/cmd/ai.ts +10 -0
  20. package/src/cli/cmd/setup.ts +275 -5
  21. package/src/cli/ui.ts +131 -24
  22. package/src/config/index.ts +38 -1
  23. package/src/index.ts +4 -1
  24. package/src/mcp/__tests__/client.test.ts +2 -2
  25. package/src/mcp/__tests__/e2e.ts +10 -6
  26. package/src/mcp/client.ts +33 -63
  27. package/src/storage/chat.ts +3 -1
  28. package/src/tui/__tests__/input-intent.test.ts +27 -0
  29. package/src/tui/__tests__/stream-assembler.test.ts +33 -0
  30. package/src/tui/ai-stream.ts +28 -0
  31. package/src/tui/app.tsx +27 -1
  32. package/src/tui/commands.ts +41 -7
  33. package/src/tui/context/theme.tsx +2 -1
  34. package/src/tui/input-intent.ts +26 -0
  35. package/src/tui/routes/home.tsx +590 -190
  36. package/src/tui/routes/setup.tsx +20 -8
  37. package/src/tui/stream-assembler.ts +49 -0
  38. package/src/util/log.ts +3 -1
  39. package/tsconfig.json +1 -1
package/src/ai/chat.ts CHANGED
@@ -2,6 +2,7 @@ import { streamText, stepCountIs } from "ai"
2
2
  import { AIProvider } from "./provider"
3
3
  import { getChatTools } from "./tools"
4
4
  import { Log } from "../util/log"
5
+ import { createRunEventFactory, type StreamEvent } from "./stream-events"
5
6
 
6
7
  const log = Log.create({ service: "ai-chat" })
7
8
 
@@ -26,8 +27,9 @@ CRITICAL: When using tools, ALWAYS use the EXACT data returned by previous tool
26
27
  Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
27
28
  Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
28
29
 
29
- const IDLE_TIMEOUT_MS = 15_000 // 15s without any stream event → abort
30
- const DEFAULT_MAX_STEPS = 10 // Allow AI to retry tools up to 10 steps (each tool call + result = 1 step)
30
+ const IDLE_TIMEOUT_MS = 60_000
31
+ const TOOL_TIMEOUT_MS = 45_000
32
+ const DEFAULT_MAX_STEPS = 10
31
33
 
32
34
  export namespace AIChat {
33
35
  export interface Message {
@@ -39,79 +41,141 @@ export namespace AIChat {
39
41
  onToken?: (token: string) => void
40
42
  onFinish?: (text: string) => void
41
43
  onError?: (error: Error) => void
42
- onToolCall?: (name: string, args: unknown) => void
43
- onToolResult?: (name: string, result: unknown) => void
44
+ onToolCall?: (name: string, args: unknown, callID: string) => void
45
+ onToolResult?: (name: string, result: unknown, callID: string) => void
44
46
  }
45
47
 
46
48
  export interface StreamOptions {
47
49
  maxSteps?: number
50
+ runId?: string
51
+ idleTimeoutMs?: number
52
+ toolTimeoutMs?: number
48
53
  }
49
54
 
50
- export async function stream(
55
+ export async function* streamEvents(
51
56
  messages: Message[],
52
- callbacks: StreamCallbacks,
53
57
  modelID?: string,
54
58
  signal?: AbortSignal,
55
- options?: StreamOptions
56
- ) {
57
- const model = await AIProvider.getModel(modelID)
58
- const tools = await getChatTools()
59
- const maxSteps = options?.maxSteps ?? DEFAULT_MAX_STEPS
60
- log.info("streaming", { model: modelID || AIProvider.DEFAULT_MODEL, messages: messages.length, toolCount: Object.keys(tools).length, maxSteps })
61
-
59
+ options?: StreamOptions,
60
+ ): AsyncGenerator<StreamEvent> {
62
61
  const history = messages
63
62
  .filter((m) => m.role === "user" || m.role === "assistant")
64
63
  .map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
64
+
65
+ const routeCompat = await AIProvider.resolveModelCompat(modelID).catch(() => undefined)
66
+ const tools = await getChatTools(routeCompat || "default")
67
+ const model = await AIProvider.getModel(modelID)
68
+ const maxSteps = options?.maxSteps ?? DEFAULT_MAX_STEPS
69
+ const idleTimeoutMs = options?.idleTimeoutMs ?? IDLE_TIMEOUT_MS
70
+ const toolTimeoutMs = options?.toolTimeoutMs ?? TOOL_TIMEOUT_MS
71
+
72
+ const run = createRunEventFactory(options?.runId)
65
73
  let full = ""
74
+ let aborted = false
75
+ let externalAbort = false
76
+ let abortError: Error | undefined
77
+ let errorEmitted = false
78
+ const toolQueue = new Map<string, string[]>()
79
+ const activeTools = new Map<string, { name: string; timer?: ReturnType<typeof setTimeout> }>()
66
80
 
67
- // Create an internal AbortController that we can trigger on idle timeout
68
81
  const internalAbort = new AbortController()
69
- const onExternalAbort = () => {
70
- log.info("external abort signal received")
82
+ const abortRun = (error?: Error) => {
83
+ if (aborted) return
84
+ aborted = true
85
+ if (error) abortError = error
71
86
  internalAbort.abort()
72
87
  }
88
+ const onExternalAbort = () => {
89
+ externalAbort = true
90
+ abortRun()
91
+ }
73
92
  signal?.addEventListener("abort", onExternalAbort)
74
93
 
75
- const result = streamText({
76
- model,
77
- system: SYSTEM_PROMPT,
78
- messages: history,
79
- tools,
80
- stopWhen: stepCountIs(maxSteps),
81
- toolChoice: "auto",
82
- abortSignal: internalAbort.signal,
83
- experimental_toolCallStreaming: false, // Disable streaming tool calls to avoid incomplete arguments bug
84
- onStepFinish: (stepResult) => {
85
- log.info("onStepFinish", {
86
- stepNumber: stepResult.stepNumber,
87
- finishReason: stepResult.finishReason,
88
- textLength: stepResult.text?.length ?? 0,
89
- toolCallsCount: stepResult.toolCalls?.length ?? 0,
90
- toolResultsCount: stepResult.toolResults?.length ?? 0,
91
- })
92
- },
94
+ yield run.next("run-start", {
95
+ modelID: modelID || AIProvider.DEFAULT_MODEL,
96
+ messageCount: history.length,
93
97
  })
94
98
 
95
- let partCount = 0
96
- let toolExecuting = false
97
- try {
98
- // Idle timeout: if no stream events arrive for IDLE_TIMEOUT_MS, abort.
99
- // Paused during tool execution (tools can take longer than 15s).
100
- let idleTimer: ReturnType<typeof setTimeout> | undefined
101
- const resetIdle = () => {
102
- if (idleTimer) clearTimeout(idleTimer)
103
- if (toolExecuting) return // Don't start timer while tool is running
104
- idleTimer = setTimeout(() => {
105
- log.info("IDLE TIMEOUT FIRED", { partCount, fullLength: full.length })
106
- internalAbort.abort()
107
- }, IDLE_TIMEOUT_MS)
99
+ let idleTimer: ReturnType<typeof setTimeout> | undefined
100
+ const clearAllToolTimers = () => {
101
+ for (const entry of activeTools.values()) {
102
+ if (entry.timer) clearTimeout(entry.timer)
108
103
  }
109
- resetIdle()
104
+ }
110
105
 
106
+ const pushToolID = (name: string, callID: string) => {
107
+ const queue = toolQueue.get(name)
108
+ if (!queue) {
109
+ toolQueue.set(name, [callID])
110
+ return
111
+ }
112
+ queue.push(callID)
113
+ }
114
+
115
+ const shiftToolID = (name: string) => {
116
+ const queue = toolQueue.get(name)
117
+ if (!queue || queue.length === 0) return undefined
118
+ const callID = queue.shift()
119
+ if (queue.length === 0) toolQueue.delete(name)
120
+ return callID
121
+ }
122
+
123
+ const dropToolID = (name: string, callID: string) => {
124
+ const queue = toolQueue.get(name)
125
+ if (!queue || queue.length === 0) return
126
+ const next = queue.filter((id) => id !== callID)
127
+ if (next.length === 0) {
128
+ toolQueue.delete(name)
129
+ return
130
+ }
131
+ toolQueue.set(name, next)
132
+ }
133
+
134
+ const armToolTimeout = (name: string, callID: string) => {
135
+ if (toolTimeoutMs <= 0) return
136
+ const timer = setTimeout(() => {
137
+ abortRun(new Error(`Tool call "${name}" timed out after ${toolTimeoutMs}ms`))
138
+ }, toolTimeoutMs)
139
+ const active = activeTools.get(callID)
140
+ if (!active) return
141
+ if (active.timer) clearTimeout(active.timer)
142
+ active.timer = timer
143
+ }
144
+
145
+ const startTool = (name: string, callID: string) => {
146
+ activeTools.set(callID, { name })
147
+ armToolTimeout(name, callID)
148
+ }
149
+
150
+ const finishTool = (callID?: string) => {
151
+ if (!callID) return
152
+ const active = activeTools.get(callID)
153
+ if (!active) return
154
+ if (active.timer) clearTimeout(active.timer)
155
+ activeTools.delete(callID)
156
+ }
157
+
158
+ const resetIdle = () => {
159
+ if (idleTimer) clearTimeout(idleTimer)
160
+ if (activeTools.size > 0) return
161
+ idleTimer = setTimeout(() => {
162
+ abortRun(new Error(`Stream idle timeout after ${idleTimeoutMs}ms`))
163
+ }, idleTimeoutMs)
164
+ }
165
+
166
+ try {
167
+ const result = streamText({
168
+ model,
169
+ system: SYSTEM_PROMPT,
170
+ messages: history,
171
+ tools,
172
+ stopWhen: stepCountIs(maxSteps),
173
+ toolChoice: "auto",
174
+ abortSignal: internalAbort.signal,
175
+ })
176
+ resetIdle()
111
177
  for await (const part of result.fullStream) {
112
- partCount++
113
178
  if (internalAbort.signal.aborted) {
114
- log.info("abort detected in loop, breaking", { partCount })
115
179
  break
116
180
  }
117
181
  resetIdle()
@@ -119,70 +183,118 @@ export namespace AIChat {
119
183
  switch (part.type) {
120
184
  case "text-delta": {
121
185
  const delta = (part as any).text ?? (part as any).textDelta ?? ""
122
- if (delta) { full += delta; callbacks.onToken?.(delta) }
186
+ if (!delta) break
187
+ full += delta
188
+ yield run.next("text-delta", { text: delta })
123
189
  break
124
190
  }
125
191
  case "tool-call": {
126
- const toolName = (part as any).toolName
127
- const toolArgs = (part as any).args ?? (part as any).input ?? {}
128
- log.info("tool-call", { toolName, args: toolArgs, partCount })
129
- // Pause idle timer — tool execution happens between tool-call and tool-result
130
- toolExecuting = true
131
- if (idleTimer) { clearTimeout(idleTimer); idleTimer = undefined }
132
- callbacks.onToolCall?.(toolName, toolArgs)
192
+ if (idleTimer) {
193
+ clearTimeout(idleTimer)
194
+ idleTimer = undefined
195
+ }
196
+ const name = (part as any).toolName || "unknown"
197
+ const args = (part as any).args ?? (part as any).input ?? {}
198
+ const callID = (part as any).toolCallId || (part as any).id || `${run.runId}:tool:${crypto.randomUUID()}`
199
+ pushToolID(name, callID)
200
+ startTool(name, callID)
201
+ yield run.next("tool-start", { callID, name, args })
133
202
  break
134
203
  }
135
204
  case "tool-result": {
136
- log.info("tool-result", { toolName: (part as any).toolName, partCount })
137
- toolExecuting = false
138
- callbacks.onToolResult?.((part as any).toolName, (part as any).output ?? (part as any).result ?? {})
205
+ const name = (part as any).toolName || "unknown"
206
+ const callID = (part as any).toolCallId || (part as any).id || shiftToolID(name) || `${run.runId}:tool:${crypto.randomUUID()}`
207
+ dropToolID(name, callID)
208
+ finishTool(callID)
209
+ resetIdle()
210
+ const result = (part as any).output ?? (part as any).result ?? {}
211
+ yield run.next("tool-result", { callID, name, result })
139
212
  break
140
213
  }
141
214
  case "tool-error" as any: {
142
- const errorMsg = String((part as any).error).slice(0, 500)
143
- log.error("tool-error", { toolName: (part as any).toolName, error: errorMsg })
144
- toolExecuting = false
145
- // Abort the stream on tool error to prevent infinite retry loops
146
- log.info("aborting stream due to tool error")
147
- internalAbort.abort()
215
+ const name = (part as any).toolName || "unknown"
216
+ const callID = (part as any).toolCallId || (part as any).id || shiftToolID(name)
217
+ if (callID) {
218
+ dropToolID(name, callID)
219
+ finishTool(callID)
220
+ }
221
+ resetIdle()
222
+ const error = new Error(String((part as any).error || "tool error"))
223
+ errorEmitted = true
224
+ yield run.next("error", { error })
225
+ abortRun(error)
148
226
  break
149
227
  }
150
228
  case "error": {
151
- const msg = (part as any).error instanceof Error ? (part as any).error.message : String((part as any).error)
152
- log.error("stream part error", { error: msg })
153
- callbacks.onError?.((part as any).error instanceof Error ? (part as any).error : new Error(msg))
229
+ const err = (part as any).error
230
+ errorEmitted = true
231
+ yield run.next("error", { error: err instanceof Error ? err : new Error(String(err)) })
154
232
  break
155
233
  }
156
234
  default:
157
235
  break
158
236
  }
159
237
  }
160
-
161
- if (idleTimer) clearTimeout(idleTimer)
162
- log.info("for-await loop exited normally", { partCount, fullLength: full.length })
163
238
  } catch (err) {
164
239
  const error = err instanceof Error ? err : new Error(String(err))
165
- log.info("catch block entered", { name: error.name, message: error.message.slice(0, 200), partCount })
166
- // Don't treat abort as a real error
167
- if (error.name !== "AbortError") {
168
- log.error("stream error (non-abort)", { error: error.message })
169
- if (callbacks.onError) callbacks.onError(error)
170
- else throw error
240
+ if (error.name === "AbortError") {
241
+ if (abortError && !externalAbort) {
242
+ errorEmitted = true
243
+ yield run.next("error", { error: abortError })
244
+ }
171
245
  } else {
172
- log.info("AbortError caught treating as normal completion")
246
+ log.error("stream error", { error: error.message })
247
+ errorEmitted = true
248
+ yield run.next("error", { error })
173
249
  }
174
- // On abort or error, still call onFinish so UI cleans up
175
- log.info("calling onFinish from catch", { fullLength: full.length })
176
- callbacks.onFinish?.(full || "(No response)")
177
- return full
178
250
  } finally {
179
- log.info("finally block", { partCount, fullLength: full.length })
251
+ if (idleTimer) clearTimeout(idleTimer)
252
+ clearAllToolTimers()
180
253
  signal?.removeEventListener("abort", onExternalAbort)
254
+ if (abortError && !externalAbort && !errorEmitted) {
255
+ yield run.next("error", { error: abortError })
256
+ }
257
+ yield run.next("run-finish", { text: full, aborted })
181
258
  }
259
+ }
182
260
 
183
- log.info("calling onFinish from normal path", { fullLength: full.length })
184
- callbacks.onFinish?.(full || "(No response)")
185
- return full
261
+ export async function stream(
262
+ messages: Message[],
263
+ callbacks: StreamCallbacks,
264
+ modelID?: string,
265
+ signal?: AbortSignal,
266
+ options?: StreamOptions,
267
+ ) {
268
+ let full = ""
269
+ try {
270
+ for await (const event of streamEvents(messages, modelID, signal, options)) {
271
+ switch (event.type) {
272
+ case "text-delta":
273
+ full += event.text
274
+ callbacks.onToken?.(event.text)
275
+ break
276
+ case "tool-start":
277
+ callbacks.onToolCall?.(event.name, event.args, event.callID)
278
+ break
279
+ case "tool-result":
280
+ callbacks.onToolResult?.(event.name, event.result, event.callID)
281
+ break
282
+ case "error":
283
+ callbacks.onError?.(event.error)
284
+ break
285
+ case "run-finish":
286
+ callbacks.onFinish?.(event.text || "(No response)")
287
+ return event.text || "(No response)"
288
+ }
289
+ }
290
+ callbacks.onFinish?.(full || "(No response)")
291
+ return full || "(No response)"
292
+ } catch (err) {
293
+ const error = err instanceof Error ? err : new Error(String(err))
294
+ callbacks.onError?.(error)
295
+ callbacks.onFinish?.(full || "(No response)")
296
+ return full || "(No response)"
297
+ }
186
298
  }
187
299
 
188
300
  export async function generate(prompt: string, modelID?: string) {
@@ -91,10 +91,15 @@ export async function saveProvider(url: string, key: string): Promise<{ provider
91
91
 
92
92
  const cfg = await Config.load()
93
93
  const providers = cfg.providers || {}
94
- providers[provider] = { api_key: key, base_url: url }
94
+ providers[provider] = {
95
+ api_key: key,
96
+ base_url: url,
97
+ api: detected === "anthropic" ? "anthropic" : "openai-compatible",
98
+ compat_profile: detected === "anthropic" ? "anthropic" : "openai-compatible",
99
+ }
95
100
 
96
101
  // Auto-set model if not already configured
97
- const update: Record<string, unknown> = { providers }
102
+ const update: Record<string, unknown> = { providers, default_provider: provider }
98
103
  if (!cfg.model) {
99
104
  if (detected === "anthropic") {
100
105
  update.model = "claude-sonnet-4-20250514"
@@ -114,10 +119,14 @@ export async function saveProvider(url: string, key: string): Promise<{ provider
114
119
 
115
120
  const cfg = await Config.load()
116
121
  const providers = cfg.providers || {}
117
- providers[provider] = { api_key: key }
122
+ providers[provider] = {
123
+ api_key: key,
124
+ api: provider === "anthropic" ? "anthropic" : provider === "google" ? "google" : provider === "openai" ? "openai" : "openai-compatible",
125
+ compat_profile: provider === "anthropic" ? "anthropic" : provider === "google" ? "google" : provider === "openai" ? "openai" : "openai-compatible",
126
+ }
118
127
 
119
128
  // Auto-set model for known providers
120
- const update: Record<string, unknown> = { providers }
129
+ const update: Record<string, unknown> = { providers, default_provider: provider }
121
130
  if (!cfg.model) {
122
131
  const { AIProvider } = await import("./provider")
123
132
  const models = Object.values(AIProvider.BUILTIN_MODELS).filter((m) => m.providerID === provider)
@@ -0,0 +1,26 @@
1
+ export interface ModelInfo {
2
+ id: string
3
+ providerID: string
4
+ name: string
5
+ contextWindow: number
6
+ outputTokens: number
7
+ }
8
+
9
+ export const BUILTIN_MODELS: Record<string, ModelInfo> = {
10
+ "claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
11
+ "claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
12
+ "gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
13
+ "gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
14
+ "o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
15
+ "gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
16
+ "gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
17
+ }
18
+
19
+ export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
20
+
21
+ export function inferProviderByModelPrefix(modelID: string): string | undefined {
22
+ if (modelID.startsWith("claude-")) return "anthropic"
23
+ if (modelID.startsWith("gpt-") || modelID.startsWith("o1-") || modelID.startsWith("o3-")) return "openai"
24
+ if (modelID.startsWith("gemini-")) return "google"
25
+ return undefined
26
+ }
@@ -0,0 +1,150 @@
1
+ import { Config } from "../config"
2
+ import { Log } from "../util/log"
3
+ import { BUILTIN_MODELS, DEFAULT_MODEL, inferProviderByModelPrefix } from "./models"
4
+ import { type ModelCompatConfig, resolveCompat } from "./types"
5
+
6
+ const log = Log.create({ service: "ai-provider-registry" })
7
+
8
+ export const PROVIDER_ENV: Record<string, string[]> = {
9
+ anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
10
+ openai: ["OPENAI_API_KEY"],
11
+ google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
12
+ "openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
13
+ }
14
+
15
+ export const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
16
+ anthropic: ["ANTHROPIC_BASE_URL"],
17
+ openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
18
+ google: ["GOOGLE_API_BASE_URL"],
19
+ "openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
20
+ }
21
+
22
+ export interface ProviderRuntimeConfig {
23
+ id: string
24
+ apiKey?: string
25
+ baseURL?: string
26
+ config?: Config.ProviderConfig
27
+ }
28
+
29
+ export interface ProviderRegistryView {
30
+ providers: Record<string, ProviderRuntimeConfig>
31
+ defaultProvider?: string
32
+ }
33
+
34
+ export interface ModelRoute {
35
+ requestedModel: string
36
+ providerID: string
37
+ modelID: string
38
+ apiKey: string
39
+ baseURL?: string
40
+ compat: ModelCompatConfig
41
+ }
42
+
43
+ function readFirstEnv(keys: string[]): string | undefined {
44
+ for (const key of keys) {
45
+ if (process.env[key]) return process.env[key]
46
+ }
47
+ return undefined
48
+ }
49
+
50
+ export async function loadProviders(cfgInput?: Config.CodeblogConfig): Promise<ProviderRegistryView> {
51
+ const cfg = cfgInput || await Config.load()
52
+ const user = cfg.providers || {}
53
+ const ids = new Set<string>([
54
+ ...Object.keys(PROVIDER_ENV),
55
+ ...Object.keys(user),
56
+ ])
57
+
58
+ const providers: Record<string, ProviderRuntimeConfig> = {}
59
+
60
+ for (const id of ids) {
61
+ const config = user[id]
62
+ providers[id] = {
63
+ id,
64
+ config,
65
+ apiKey: readFirstEnv(PROVIDER_ENV[id] || []) || config?.api_key,
66
+ baseURL: readFirstEnv(PROVIDER_BASE_URL_ENV[id] || []) || config?.base_url,
67
+ }
68
+ }
69
+
70
+ return { providers, defaultProvider: cfg.default_provider }
71
+ }
72
+
73
+ function availableProvidersWithKeys(providers: Record<string, ProviderRuntimeConfig>): string[] {
74
+ return Object.values(providers)
75
+ .filter((p) => p.apiKey)
76
+ .map((p) => p.id)
77
+ .sort()
78
+ }
79
+
80
+ function unknownModelError(modelID: string, providers: Record<string, ProviderRuntimeConfig>): Error {
81
+ const available = availableProvidersWithKeys(providers)
82
+ const base = `Unknown model "${modelID}".`
83
+ if (available.length === 0) {
84
+ return new Error(`${base} No AI providers are configured. Run: codeblog ai setup`)
85
+ }
86
+ return new Error(`${base} Available providers with keys: ${available.join(", ")}. Try: codeblog config --model <provider>/<model>`)
87
+ }
88
+
89
+ function noKeyError(providerID: string, modelID: string): Error {
90
+ const envKeys = PROVIDER_ENV[providerID] || []
91
+ const envHint = envKeys[0] || `${providerID.toUpperCase().replace(/-/g, "_")}_API_KEY`
92
+ return new Error(`No API key for ${providerID} (model: ${modelID}). Set ${envHint} or run: codeblog config --provider ${providerID} --api-key <key>`)
93
+ }
94
+
95
+ function routeViaProvider(
96
+ providers: Record<string, ProviderRuntimeConfig>,
97
+ requestedModel: string,
98
+ providerID: string,
99
+ modelID: string,
100
+ ): ModelRoute {
101
+ const provider = providers[providerID]
102
+ if (!provider) throw unknownModelError(requestedModel, providers)
103
+ if (!provider.apiKey) throw noKeyError(providerID, modelID)
104
+
105
+ const compat = resolveCompat({ providerID, modelID, providerConfig: provider.config })
106
+ return {
107
+ requestedModel,
108
+ providerID,
109
+ modelID,
110
+ apiKey: provider.apiKey,
111
+ baseURL: provider.baseURL,
112
+ compat,
113
+ }
114
+ }
115
+
116
+ export async function routeModel(inputModel?: string, cfgInput?: Config.CodeblogConfig): Promise<ModelRoute> {
117
+ const cfg = cfgInput || await Config.load()
118
+ const requestedModel = inputModel || cfg.model || DEFAULT_MODEL
119
+ const loaded = await loadProviders(cfg)
120
+ const providers = loaded.providers
121
+
122
+ if (requestedModel.includes("/")) {
123
+ const [providerID, ...rest] = requestedModel.split("/")
124
+ const modelID = rest.join("/")
125
+ return routeViaProvider(providers, requestedModel, providerID!, modelID)
126
+ }
127
+
128
+ if (BUILTIN_MODELS[requestedModel]) {
129
+ const providerID = BUILTIN_MODELS[requestedModel]!.providerID
130
+ return routeViaProvider(providers, requestedModel, providerID, requestedModel)
131
+ }
132
+
133
+ const prefixed = inferProviderByModelPrefix(requestedModel)
134
+ if (prefixed) {
135
+ return routeViaProvider(providers, requestedModel, prefixed, requestedModel)
136
+ }
137
+
138
+ if (loaded.defaultProvider) {
139
+ return routeViaProvider(providers, requestedModel, loaded.defaultProvider, requestedModel)
140
+ }
141
+
142
+ log.warn("route failed", { requestedModel })
143
+ throw unknownModelError(requestedModel, providers)
144
+ }
145
+
146
+ export async function resolveProviderCompat(providerID: string, modelID: string, cfgInput?: Config.CodeblogConfig): Promise<ModelCompatConfig> {
147
+ const loaded = await loadProviders(cfgInput)
148
+ const provider = loaded.providers[providerID]
149
+ return resolveCompat({ providerID, modelID, providerConfig: provider?.config })
150
+ }