codeblog-app 2.3.1 → 2.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/package.json +8 -73
  2. package/drizzle/0000_init.sql +0 -34
  3. package/drizzle/meta/_journal.json +0 -13
  4. package/drizzle.config.ts +0 -10
  5. package/src/ai/__tests__/chat.test.ts +0 -188
  6. package/src/ai/__tests__/compat.test.ts +0 -46
  7. package/src/ai/__tests__/home.ai-stream.integration.test.ts +0 -77
  8. package/src/ai/__tests__/provider-registry.test.ts +0 -61
  9. package/src/ai/__tests__/provider.test.ts +0 -238
  10. package/src/ai/__tests__/stream-events.test.ts +0 -152
  11. package/src/ai/__tests__/tools.test.ts +0 -93
  12. package/src/ai/chat.ts +0 -336
  13. package/src/ai/configure.ts +0 -143
  14. package/src/ai/models.ts +0 -26
  15. package/src/ai/provider-registry.ts +0 -150
  16. package/src/ai/provider.ts +0 -264
  17. package/src/ai/stream-events.ts +0 -64
  18. package/src/ai/tools.ts +0 -118
  19. package/src/ai/types.ts +0 -105
  20. package/src/auth/index.ts +0 -49
  21. package/src/auth/oauth.ts +0 -123
  22. package/src/cli/__tests__/commands.test.ts +0 -229
  23. package/src/cli/cmd/agent.ts +0 -97
  24. package/src/cli/cmd/ai.ts +0 -10
  25. package/src/cli/cmd/chat.ts +0 -190
  26. package/src/cli/cmd/comment.ts +0 -67
  27. package/src/cli/cmd/config.ts +0 -153
  28. package/src/cli/cmd/feed.ts +0 -53
  29. package/src/cli/cmd/forum.ts +0 -106
  30. package/src/cli/cmd/login.ts +0 -45
  31. package/src/cli/cmd/logout.ts +0 -12
  32. package/src/cli/cmd/me.ts +0 -188
  33. package/src/cli/cmd/post.ts +0 -25
  34. package/src/cli/cmd/publish.ts +0 -64
  35. package/src/cli/cmd/scan.ts +0 -78
  36. package/src/cli/cmd/search.ts +0 -35
  37. package/src/cli/cmd/setup.ts +0 -622
  38. package/src/cli/cmd/tui.ts +0 -20
  39. package/src/cli/cmd/uninstall.ts +0 -281
  40. package/src/cli/cmd/update.ts +0 -123
  41. package/src/cli/cmd/vote.ts +0 -50
  42. package/src/cli/cmd/whoami.ts +0 -18
  43. package/src/cli/mcp-print.ts +0 -6
  44. package/src/cli/ui.ts +0 -357
  45. package/src/config/index.ts +0 -92
  46. package/src/flag/index.ts +0 -23
  47. package/src/global/index.ts +0 -38
  48. package/src/id/index.ts +0 -20
  49. package/src/index.ts +0 -203
  50. package/src/mcp/__tests__/client.test.ts +0 -149
  51. package/src/mcp/__tests__/e2e.ts +0 -331
  52. package/src/mcp/__tests__/integration.ts +0 -148
  53. package/src/mcp/client.ts +0 -118
  54. package/src/server/index.ts +0 -48
  55. package/src/storage/chat.ts +0 -73
  56. package/src/storage/db.ts +0 -85
  57. package/src/storage/schema.sql.ts +0 -39
  58. package/src/storage/schema.ts +0 -1
  59. package/src/tui/__tests__/input-intent.test.ts +0 -27
  60. package/src/tui/__tests__/stream-assembler.test.ts +0 -33
  61. package/src/tui/ai-stream.ts +0 -28
  62. package/src/tui/app.tsx +0 -210
  63. package/src/tui/commands.ts +0 -220
  64. package/src/tui/context/exit.tsx +0 -15
  65. package/src/tui/context/helper.tsx +0 -25
  66. package/src/tui/context/route.tsx +0 -24
  67. package/src/tui/context/theme.tsx +0 -471
  68. package/src/tui/input-intent.ts +0 -26
  69. package/src/tui/routes/home.tsx +0 -1060
  70. package/src/tui/routes/model.tsx +0 -210
  71. package/src/tui/routes/notifications.tsx +0 -87
  72. package/src/tui/routes/post.tsx +0 -102
  73. package/src/tui/routes/search.tsx +0 -105
  74. package/src/tui/routes/setup.tsx +0 -267
  75. package/src/tui/routes/trending.tsx +0 -107
  76. package/src/tui/stream-assembler.ts +0 -49
  77. package/src/util/__tests__/context.test.ts +0 -31
  78. package/src/util/__tests__/lazy.test.ts +0 -37
  79. package/src/util/context.ts +0 -23
  80. package/src/util/error.ts +0 -46
  81. package/src/util/lazy.ts +0 -18
  82. package/src/util/log.ts +0 -144
  83. package/tsconfig.json +0 -11
package/src/ai/chat.ts DELETED
@@ -1,336 +0,0 @@
1
- import { streamText, stepCountIs } from "ai"
2
- import { AIProvider } from "./provider"
3
- import { getChatTools } from "./tools"
4
- import { Log } from "../util/log"
5
- import { createRunEventFactory, type StreamEvent } from "./stream-events"
6
-
7
- const log = Log.create({ service: "ai-chat" })
8
-
9
- const SYSTEM_PROMPT = `You are CodeBlog AI — an assistant for the CodeBlog developer forum (codeblog.ai).
10
-
11
- You help developers with everything on the platform:
12
- - Scan and analyze their local IDE coding sessions
13
- - Write and publish blog posts from coding sessions
14
- - Browse, search, read, comment, vote on forum posts
15
- - Manage bookmarks, notifications, debates, tags, trending topics
16
- - Manage agents, view dashboard, follow users
17
- - Generate weekly digests
18
-
19
- You have 20+ tools. Use them whenever the user's request matches. Chain multiple tools if needed.
20
- After a tool returns results, summarize them naturally for the user.
21
-
22
- CRITICAL: When using tools, ALWAYS use the EXACT data returned by previous tool calls.
23
- - If scan_sessions returns a path like "/Users/zhaoyifei/...", use that EXACT path
24
- - NEVER modify, guess, or infer file paths — use them exactly as returned
25
- - If a tool call fails with "file not found", the path is wrong — check the scan results again
26
-
27
- Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
28
- Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
29
-
30
- const IDLE_TIMEOUT_MS = 60_000
31
- const TOOL_TIMEOUT_MS = 45_000
32
- const DEFAULT_MAX_STEPS = 10
33
-
34
- export namespace AIChat {
35
- export interface Message {
36
- role: "user" | "assistant" | "system"
37
- content: string
38
- }
39
-
40
- export interface StreamCallbacks {
41
- onToken?: (token: string) => void
42
- onFinish?: (text: string) => void
43
- onError?: (error: Error) => void
44
- onToolCall?: (name: string, args: unknown, callID: string) => void
45
- onToolResult?: (name: string, result: unknown, callID: string) => void
46
- }
47
-
48
- export interface StreamOptions {
49
- maxSteps?: number
50
- runId?: string
51
- idleTimeoutMs?: number
52
- toolTimeoutMs?: number
53
- }
54
-
55
- export async function* streamEvents(
56
- messages: Message[],
57
- modelID?: string,
58
- signal?: AbortSignal,
59
- options?: StreamOptions,
60
- ): AsyncGenerator<StreamEvent> {
61
- const history = messages
62
- .filter((m) => m.role === "user" || m.role === "assistant")
63
- .map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
64
-
65
- const routeCompat = await AIProvider.resolveModelCompat(modelID).catch(() => undefined)
66
- const tools = await getChatTools(routeCompat || "default")
67
- const model = await AIProvider.getModel(modelID)
68
- const maxSteps = options?.maxSteps ?? DEFAULT_MAX_STEPS
69
- const idleTimeoutMs = options?.idleTimeoutMs ?? IDLE_TIMEOUT_MS
70
- const toolTimeoutMs = options?.toolTimeoutMs ?? TOOL_TIMEOUT_MS
71
-
72
- const run = createRunEventFactory(options?.runId)
73
- let full = ""
74
- let aborted = false
75
- let externalAbort = false
76
- let abortError: Error | undefined
77
- let errorEmitted = false
78
- const toolQueue = new Map<string, string[]>()
79
- const activeTools = new Map<string, { name: string; timer?: ReturnType<typeof setTimeout> }>()
80
-
81
- const internalAbort = new AbortController()
82
- const abortRun = (error?: Error) => {
83
- if (aborted) return
84
- aborted = true
85
- if (error) abortError = error
86
- internalAbort.abort()
87
- }
88
- const onExternalAbort = () => {
89
- externalAbort = true
90
- abortRun()
91
- }
92
- signal?.addEventListener("abort", onExternalAbort)
93
-
94
- yield run.next("run-start", {
95
- modelID: modelID || AIProvider.DEFAULT_MODEL,
96
- messageCount: history.length,
97
- })
98
-
99
- let idleTimer: ReturnType<typeof setTimeout> | undefined
100
- const clearAllToolTimers = () => {
101
- for (const entry of activeTools.values()) {
102
- if (entry.timer) clearTimeout(entry.timer)
103
- }
104
- }
105
-
106
- const pushToolID = (name: string, callID: string) => {
107
- const queue = toolQueue.get(name)
108
- if (!queue) {
109
- toolQueue.set(name, [callID])
110
- return
111
- }
112
- queue.push(callID)
113
- }
114
-
115
- const shiftToolID = (name: string) => {
116
- const queue = toolQueue.get(name)
117
- if (!queue || queue.length === 0) return undefined
118
- const callID = queue.shift()
119
- if (queue.length === 0) toolQueue.delete(name)
120
- return callID
121
- }
122
-
123
- const dropToolID = (name: string, callID: string) => {
124
- const queue = toolQueue.get(name)
125
- if (!queue || queue.length === 0) return
126
- const next = queue.filter((id) => id !== callID)
127
- if (next.length === 0) {
128
- toolQueue.delete(name)
129
- return
130
- }
131
- toolQueue.set(name, next)
132
- }
133
-
134
- const armToolTimeout = (name: string, callID: string) => {
135
- if (toolTimeoutMs <= 0) return
136
- const timer = setTimeout(() => {
137
- abortRun(new Error(`Tool call "${name}" timed out after ${toolTimeoutMs}ms`))
138
- }, toolTimeoutMs)
139
- const active = activeTools.get(callID)
140
- if (!active) return
141
- if (active.timer) clearTimeout(active.timer)
142
- active.timer = timer
143
- }
144
-
145
- const startTool = (name: string, callID: string) => {
146
- activeTools.set(callID, { name })
147
- armToolTimeout(name, callID)
148
- }
149
-
150
- const finishTool = (callID?: string) => {
151
- if (!callID) return
152
- const active = activeTools.get(callID)
153
- if (!active) return
154
- if (active.timer) clearTimeout(active.timer)
155
- activeTools.delete(callID)
156
- }
157
-
158
- const resetIdle = () => {
159
- if (idleTimer) clearTimeout(idleTimer)
160
- if (activeTools.size > 0) return
161
- idleTimer = setTimeout(() => {
162
- abortRun(new Error(`Stream idle timeout after ${idleTimeoutMs}ms`))
163
- }, idleTimeoutMs)
164
- }
165
-
166
- try {
167
- const result = streamText({
168
- model,
169
- system: SYSTEM_PROMPT,
170
- messages: history,
171
- tools,
172
- stopWhen: stepCountIs(maxSteps),
173
- toolChoice: "auto",
174
- abortSignal: internalAbort.signal,
175
- })
176
- resetIdle()
177
- for await (const part of result.fullStream) {
178
- if (internalAbort.signal.aborted) {
179
- break
180
- }
181
- resetIdle()
182
-
183
- switch (part.type) {
184
- case "text-delta": {
185
- const delta = (part as any).text ?? (part as any).textDelta ?? ""
186
- if (!delta) break
187
- full += delta
188
- yield run.next("text-delta", { text: delta })
189
- break
190
- }
191
- case "tool-call": {
192
- if (idleTimer) {
193
- clearTimeout(idleTimer)
194
- idleTimer = undefined
195
- }
196
- const name = (part as any).toolName || "unknown"
197
- const args = (part as any).args ?? (part as any).input ?? {}
198
- const callID = (part as any).toolCallId || (part as any).id || `${run.runId}:tool:${crypto.randomUUID()}`
199
- pushToolID(name, callID)
200
- startTool(name, callID)
201
- yield run.next("tool-start", { callID, name, args })
202
- break
203
- }
204
- case "tool-result": {
205
- const name = (part as any).toolName || "unknown"
206
- const callID = (part as any).toolCallId || (part as any).id || shiftToolID(name) || `${run.runId}:tool:${crypto.randomUUID()}`
207
- dropToolID(name, callID)
208
- finishTool(callID)
209
- resetIdle()
210
- const result = (part as any).output ?? (part as any).result ?? {}
211
- yield run.next("tool-result", { callID, name, result })
212
- break
213
- }
214
- case "tool-error" as any: {
215
- const name = (part as any).toolName || "unknown"
216
- const callID = (part as any).toolCallId || (part as any).id || shiftToolID(name)
217
- if (callID) {
218
- dropToolID(name, callID)
219
- finishTool(callID)
220
- }
221
- resetIdle()
222
- const error = new Error(String((part as any).error || "tool error"))
223
- errorEmitted = true
224
- yield run.next("error", { error })
225
- abortRun(error)
226
- break
227
- }
228
- case "error": {
229
- const err = (part as any).error
230
- errorEmitted = true
231
- yield run.next("error", { error: err instanceof Error ? err : new Error(String(err)) })
232
- break
233
- }
234
- default:
235
- break
236
- }
237
- }
238
- } catch (err) {
239
- const error = err instanceof Error ? err : new Error(String(err))
240
- if (error.name === "AbortError") {
241
- if (abortError && !externalAbort) {
242
- errorEmitted = true
243
- yield run.next("error", { error: abortError })
244
- }
245
- } else {
246
- log.error("stream error", { error: error.message })
247
- errorEmitted = true
248
- yield run.next("error", { error })
249
- }
250
- } finally {
251
- if (idleTimer) clearTimeout(idleTimer)
252
- clearAllToolTimers()
253
- signal?.removeEventListener("abort", onExternalAbort)
254
- if (abortError && !externalAbort && !errorEmitted) {
255
- yield run.next("error", { error: abortError })
256
- }
257
- yield run.next("run-finish", { text: full, aborted })
258
- }
259
- }
260
-
261
- export async function stream(
262
- messages: Message[],
263
- callbacks: StreamCallbacks,
264
- modelID?: string,
265
- signal?: AbortSignal,
266
- options?: StreamOptions,
267
- ) {
268
- let full = ""
269
- try {
270
- for await (const event of streamEvents(messages, modelID, signal, options)) {
271
- switch (event.type) {
272
- case "text-delta":
273
- full += event.text
274
- callbacks.onToken?.(event.text)
275
- break
276
- case "tool-start":
277
- callbacks.onToolCall?.(event.name, event.args, event.callID)
278
- break
279
- case "tool-result":
280
- callbacks.onToolResult?.(event.name, event.result, event.callID)
281
- break
282
- case "error":
283
- callbacks.onError?.(event.error)
284
- break
285
- case "run-finish":
286
- callbacks.onFinish?.(event.text || "(No response)")
287
- return event.text || "(No response)"
288
- }
289
- }
290
- callbacks.onFinish?.(full || "(No response)")
291
- return full || "(No response)"
292
- } catch (err) {
293
- const error = err instanceof Error ? err : new Error(String(err))
294
- callbacks.onError?.(error)
295
- callbacks.onFinish?.(full || "(No response)")
296
- return full || "(No response)"
297
- }
298
- }
299
-
300
- export async function generate(prompt: string, modelID?: string) {
301
- let result = ""
302
- await stream([{ role: "user", content: prompt }], { onFinish: (text) => (result = text) }, modelID)
303
- return result
304
- }
305
-
306
- export async function analyzeAndPost(sessionContent: string, modelID?: string) {
307
- const prompt = `Analyze this coding session and write a blog post about it.
308
-
309
- The post should:
310
- - Have a catchy, dev-friendly title (like HN or Juejin)
311
- - Tell a story: what you were doing, what went wrong/right, what you learned
312
- - Include relevant code snippets
313
- - Be casual and genuine, written in first person
314
- - End with key takeaways
315
-
316
- Also provide:
317
- - 3-8 relevant tags (lowercase, hyphenated)
318
- - A one-line summary/hook
319
-
320
- Session content:
321
- ${sessionContent.slice(0, 50000)}
322
-
323
- Respond in this exact JSON format:
324
- {
325
- "title": "...",
326
- "content": "... (markdown)",
327
- "tags": ["tag1", "tag2"],
328
- "summary": "..."
329
- }`
330
-
331
- const raw = await generate(prompt, modelID)
332
- const jsonMatch = raw.match(/\{[\s\S]*\}/)
333
- if (!jsonMatch) throw new Error("AI did not return valid JSON")
334
- return JSON.parse(jsonMatch[0])
335
- }
336
- }
@@ -1,143 +0,0 @@
1
- // AI provider auto-detection and configuration
2
-
3
- function looksLikeApi(r: Response) {
4
- const ct = r.headers.get("content-type") || ""
5
- return ct.includes("json") || ct.includes("text/plain")
6
- }
7
-
8
- export async function probe(base: string, key: string): Promise<"openai" | "anthropic" | null> {
9
- const clean = base.replace(/\/+$/, "")
10
- try {
11
- const r = await fetch(`${clean}/v1/models`, {
12
- headers: { Authorization: `Bearer ${key}` },
13
- signal: AbortSignal.timeout(8000),
14
- })
15
- if (r.ok || ((r.status === 401 || r.status === 403) && looksLikeApi(r))) return "openai"
16
- } catch {}
17
- try {
18
- const r = await fetch(`${clean}/v1/messages`, {
19
- method: "POST",
20
- headers: { "x-api-key": key, "anthropic-version": "2023-06-01", "content-type": "application/json" },
21
- body: JSON.stringify({ model: "test", max_tokens: 1, messages: [] }),
22
- signal: AbortSignal.timeout(8000),
23
- })
24
- if (r.status !== 404 && looksLikeApi(r)) return "anthropic"
25
- } catch {}
26
- return null
27
- }
28
-
29
- const KEY_PREFIX_MAP: Record<string, string> = {
30
- "sk-ant-": "anthropic",
31
- "AIza": "google",
32
- "xai-": "xai",
33
- "gsk_": "groq",
34
- "sk-or-": "openrouter",
35
- "pplx-": "perplexity",
36
- }
37
-
38
- const ENV_MAP: Record<string, string> = {
39
- anthropic: "ANTHROPIC_API_KEY",
40
- openai: "OPENAI_API_KEY",
41
- google: "GOOGLE_GENERATIVE_AI_API_KEY",
42
- xai: "XAI_API_KEY",
43
- groq: "GROQ_API_KEY",
44
- openrouter: "OPENROUTER_API_KEY",
45
- perplexity: "PERPLEXITY_API_KEY",
46
- "openai-compatible": "OPENAI_COMPATIBLE_API_KEY",
47
- }
48
-
49
- async function fetchFirstModel(base: string, key: string): Promise<string | null> {
50
- try {
51
- const clean = base.replace(/\/+$/, "")
52
- const r = await fetch(`${clean}/v1/models`, {
53
- headers: { Authorization: `Bearer ${key}` },
54
- signal: AbortSignal.timeout(8000),
55
- })
56
- if (!r.ok) return null
57
- const data = await r.json() as { data?: Array<{ id: string }> }
58
- if (!data.data || data.data.length === 0) return null
59
-
60
- // Prefer capable models: claude-sonnet > gpt-4o > claude-opus > first available
61
- const ids = data.data.map((m) => m.id)
62
- const preferred = [/^claude-sonnet-4/, /^gpt-4o$/, /^claude-opus-4/, /^gpt-4o-mini$/, /^gemini-2\.5-flash$/]
63
- for (const pattern of preferred) {
64
- const match = ids.find((id) => pattern.test(id))
65
- if (match) return match
66
- }
67
- return ids[0] ?? null
68
- } catch {}
69
- return null
70
- }
71
-
72
- export function detectProvider(key: string) {
73
- for (const [prefix, provider] of Object.entries(KEY_PREFIX_MAP)) {
74
- if (key.startsWith(prefix)) return provider
75
- }
76
- return "openai"
77
- }
78
-
79
- export async function saveProvider(url: string, key: string): Promise<{ provider: string; error?: string }> {
80
- const { Config } = await import("../config")
81
-
82
- if (url) {
83
- const detected = await probe(url, key)
84
- if (!detected) return { provider: "", error: "Could not connect. Check URL and key." }
85
-
86
- const provider = detected === "anthropic" ? "anthropic" : "openai-compatible"
87
- const envKey = detected === "anthropic" ? "ANTHROPIC_API_KEY" : "OPENAI_COMPATIBLE_API_KEY"
88
- const envBase = detected === "anthropic" ? "ANTHROPIC_BASE_URL" : "OPENAI_COMPATIBLE_BASE_URL"
89
- process.env[envKey] = key
90
- process.env[envBase] = url
91
-
92
- const cfg = await Config.load()
93
- const providers = cfg.providers || {}
94
- providers[provider] = {
95
- api_key: key,
96
- base_url: url,
97
- api: detected === "anthropic" ? "anthropic" : "openai-compatible",
98
- compat_profile: detected === "anthropic" ? "anthropic" : "openai-compatible",
99
- }
100
-
101
- // Auto-set model if not already configured
102
- const update: Record<string, unknown> = { providers, default_provider: provider }
103
- if (!cfg.model) {
104
- if (detected === "anthropic") {
105
- update.model = "claude-sonnet-4-20250514"
106
- } else {
107
- // For openai-compatible with custom URL, try to fetch available models
108
- const model = await fetchFirstModel(url, key)
109
- if (model) update.model = `openai-compatible/${model}`
110
- }
111
- }
112
-
113
- await Config.save(update)
114
- return { provider: `${detected} format` }
115
- }
116
-
117
- const provider = detectProvider(key)
118
- if (ENV_MAP[provider]) process.env[ENV_MAP[provider]] = key
119
-
120
- const cfg = await Config.load()
121
- const providers = cfg.providers || {}
122
- providers[provider] = {
123
- api_key: key,
124
- api: provider === "anthropic" ? "anthropic" : provider === "google" ? "google" : provider === "openai" ? "openai" : "openai-compatible",
125
- compat_profile: provider === "anthropic" ? "anthropic" : provider === "google" ? "google" : provider === "openai" ? "openai" : "openai-compatible",
126
- }
127
-
128
- // Auto-set model for known providers
129
- const update: Record<string, unknown> = { providers, default_provider: provider }
130
- if (!cfg.model) {
131
- const { AIProvider } = await import("./provider")
132
- const models = Object.values(AIProvider.BUILTIN_MODELS).filter((m) => m.providerID === provider)
133
- if (models.length > 0) update.model = models[0]!.id
134
- }
135
-
136
- await Config.save(update)
137
- return { provider }
138
- }
139
-
140
- export function mask(s: string) {
141
- if (s.length <= 8) return s
142
- return s.slice(0, 4) + "\u2022".repeat(Math.min(s.length - 8, 20)) + s.slice(-4)
143
- }
package/src/ai/models.ts DELETED
@@ -1,26 +0,0 @@
1
- export interface ModelInfo {
2
- id: string
3
- providerID: string
4
- name: string
5
- contextWindow: number
6
- outputTokens: number
7
- }
8
-
9
- export const BUILTIN_MODELS: Record<string, ModelInfo> = {
10
- "claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
11
- "claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
12
- "gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
13
- "gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
14
- "o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
15
- "gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
16
- "gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
17
- }
18
-
19
- export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
20
-
21
- export function inferProviderByModelPrefix(modelID: string): string | undefined {
22
- if (modelID.startsWith("claude-")) return "anthropic"
23
- if (modelID.startsWith("gpt-") || modelID.startsWith("o1-") || modelID.startsWith("o3-")) return "openai"
24
- if (modelID.startsWith("gemini-")) return "google"
25
- return undefined
26
- }
@@ -1,150 +0,0 @@
1
- import { Config } from "../config"
2
- import { Log } from "../util/log"
3
- import { BUILTIN_MODELS, DEFAULT_MODEL, inferProviderByModelPrefix } from "./models"
4
- import { type ModelCompatConfig, resolveCompat } from "./types"
5
-
6
- const log = Log.create({ service: "ai-provider-registry" })
7
-
8
- export const PROVIDER_ENV: Record<string, string[]> = {
9
- anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
10
- openai: ["OPENAI_API_KEY"],
11
- google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
12
- "openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
13
- }
14
-
15
- export const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
16
- anthropic: ["ANTHROPIC_BASE_URL"],
17
- openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
18
- google: ["GOOGLE_API_BASE_URL"],
19
- "openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
20
- }
21
-
22
- export interface ProviderRuntimeConfig {
23
- id: string
24
- apiKey?: string
25
- baseURL?: string
26
- config?: Config.ProviderConfig
27
- }
28
-
29
- export interface ProviderRegistryView {
30
- providers: Record<string, ProviderRuntimeConfig>
31
- defaultProvider?: string
32
- }
33
-
34
- export interface ModelRoute {
35
- requestedModel: string
36
- providerID: string
37
- modelID: string
38
- apiKey: string
39
- baseURL?: string
40
- compat: ModelCompatConfig
41
- }
42
-
43
- function readFirstEnv(keys: string[]): string | undefined {
44
- for (const key of keys) {
45
- if (process.env[key]) return process.env[key]
46
- }
47
- return undefined
48
- }
49
-
50
- export async function loadProviders(cfgInput?: Config.CodeblogConfig): Promise<ProviderRegistryView> {
51
- const cfg = cfgInput || await Config.load()
52
- const user = cfg.providers || {}
53
- const ids = new Set<string>([
54
- ...Object.keys(PROVIDER_ENV),
55
- ...Object.keys(user),
56
- ])
57
-
58
- const providers: Record<string, ProviderRuntimeConfig> = {}
59
-
60
- for (const id of ids) {
61
- const config = user[id]
62
- providers[id] = {
63
- id,
64
- config,
65
- apiKey: readFirstEnv(PROVIDER_ENV[id] || []) || config?.api_key,
66
- baseURL: readFirstEnv(PROVIDER_BASE_URL_ENV[id] || []) || config?.base_url,
67
- }
68
- }
69
-
70
- return { providers, defaultProvider: cfg.default_provider }
71
- }
72
-
73
- function availableProvidersWithKeys(providers: Record<string, ProviderRuntimeConfig>): string[] {
74
- return Object.values(providers)
75
- .filter((p) => p.apiKey)
76
- .map((p) => p.id)
77
- .sort()
78
- }
79
-
80
- function unknownModelError(modelID: string, providers: Record<string, ProviderRuntimeConfig>): Error {
81
- const available = availableProvidersWithKeys(providers)
82
- const base = `Unknown model "${modelID}".`
83
- if (available.length === 0) {
84
- return new Error(`${base} No AI providers are configured. Run: codeblog ai setup`)
85
- }
86
- return new Error(`${base} Available providers with keys: ${available.join(", ")}. Try: codeblog config --model <provider>/<model>`)
87
- }
88
-
89
- function noKeyError(providerID: string, modelID: string): Error {
90
- const envKeys = PROVIDER_ENV[providerID] || []
91
- const envHint = envKeys[0] || `${providerID.toUpperCase().replace(/-/g, "_")}_API_KEY`
92
- return new Error(`No API key for ${providerID} (model: ${modelID}). Set ${envHint} or run: codeblog config --provider ${providerID} --api-key <key>`)
93
- }
94
-
95
- function routeViaProvider(
96
- providers: Record<string, ProviderRuntimeConfig>,
97
- requestedModel: string,
98
- providerID: string,
99
- modelID: string,
100
- ): ModelRoute {
101
- const provider = providers[providerID]
102
- if (!provider) throw unknownModelError(requestedModel, providers)
103
- if (!provider.apiKey) throw noKeyError(providerID, modelID)
104
-
105
- const compat = resolveCompat({ providerID, modelID, providerConfig: provider.config })
106
- return {
107
- requestedModel,
108
- providerID,
109
- modelID,
110
- apiKey: provider.apiKey,
111
- baseURL: provider.baseURL,
112
- compat,
113
- }
114
- }
115
-
116
- export async function routeModel(inputModel?: string, cfgInput?: Config.CodeblogConfig): Promise<ModelRoute> {
117
- const cfg = cfgInput || await Config.load()
118
- const requestedModel = inputModel || cfg.model || DEFAULT_MODEL
119
- const loaded = await loadProviders(cfg)
120
- const providers = loaded.providers
121
-
122
- if (requestedModel.includes("/")) {
123
- const [providerID, ...rest] = requestedModel.split("/")
124
- const modelID = rest.join("/")
125
- return routeViaProvider(providers, requestedModel, providerID!, modelID)
126
- }
127
-
128
- if (BUILTIN_MODELS[requestedModel]) {
129
- const providerID = BUILTIN_MODELS[requestedModel]!.providerID
130
- return routeViaProvider(providers, requestedModel, providerID, requestedModel)
131
- }
132
-
133
- const prefixed = inferProviderByModelPrefix(requestedModel)
134
- if (prefixed) {
135
- return routeViaProvider(providers, requestedModel, prefixed, requestedModel)
136
- }
137
-
138
- if (loaded.defaultProvider) {
139
- return routeViaProvider(providers, requestedModel, loaded.defaultProvider, requestedModel)
140
- }
141
-
142
- log.warn("route failed", { requestedModel })
143
- throw unknownModelError(requestedModel, providers)
144
- }
145
-
146
- export async function resolveProviderCompat(providerID: string, modelID: string, cfgInput?: Config.CodeblogConfig): Promise<ModelCompatConfig> {
147
- const loaded = await loadProviders(cfgInput)
148
- const provider = loaded.providers[providerID]
149
- return resolveCompat({ providerID, modelID, providerConfig: provider?.config })
150
- }