codeblog-app 2.3.2 → 2.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/drizzle/0000_init.sql +34 -0
- package/drizzle/meta/_journal.json +13 -0
- package/drizzle.config.ts +10 -0
- package/package.json +73 -8
- package/src/ai/__tests__/chat.test.ts +188 -0
- package/src/ai/__tests__/compat.test.ts +46 -0
- package/src/ai/__tests__/home.ai-stream.integration.test.ts +77 -0
- package/src/ai/__tests__/provider-registry.test.ts +98 -0
- package/src/ai/__tests__/provider.test.ts +239 -0
- package/src/ai/__tests__/stream-events.test.ts +152 -0
- package/src/ai/__tests__/tools.test.ts +93 -0
- package/src/ai/chat.ts +336 -0
- package/src/ai/configure.ts +144 -0
- package/src/ai/models.ts +67 -0
- package/src/ai/provider-registry.ts +150 -0
- package/src/ai/provider.ts +264 -0
- package/src/ai/stream-events.ts +64 -0
- package/src/ai/tools.ts +118 -0
- package/src/ai/types.ts +105 -0
- package/src/auth/index.ts +49 -0
- package/src/auth/oauth.ts +146 -0
- package/src/cli/__tests__/commands.test.ts +229 -0
- package/src/cli/cmd/agent.ts +97 -0
- package/src/cli/cmd/ai.ts +10 -0
- package/src/cli/cmd/chat.ts +190 -0
- package/src/cli/cmd/comment.ts +67 -0
- package/src/cli/cmd/config.ts +154 -0
- package/src/cli/cmd/feed.ts +53 -0
- package/src/cli/cmd/forum.ts +106 -0
- package/src/cli/cmd/login.ts +45 -0
- package/src/cli/cmd/logout.ts +14 -0
- package/src/cli/cmd/me.ts +188 -0
- package/src/cli/cmd/post.ts +25 -0
- package/src/cli/cmd/publish.ts +64 -0
- package/src/cli/cmd/scan.ts +78 -0
- package/src/cli/cmd/search.ts +35 -0
- package/src/cli/cmd/setup.ts +845 -0
- package/src/cli/cmd/tui.ts +20 -0
- package/src/cli/cmd/uninstall.ts +281 -0
- package/src/cli/cmd/update.ts +139 -0
- package/src/cli/cmd/vote.ts +50 -0
- package/src/cli/cmd/whoami.ts +18 -0
- package/src/cli/mcp-print.ts +6 -0
- package/src/cli/ui.ts +410 -0
- package/src/config/index.ts +125 -0
- package/src/flag/index.ts +23 -0
- package/src/global/index.ts +38 -0
- package/src/id/index.ts +20 -0
- package/src/index.ts +212 -0
- package/src/mcp/__tests__/client.test.ts +149 -0
- package/src/mcp/__tests__/e2e.ts +331 -0
- package/src/mcp/__tests__/integration.ts +148 -0
- package/src/mcp/client.ts +118 -0
- package/src/server/index.ts +48 -0
- package/src/storage/chat.ts +73 -0
- package/src/storage/db.ts +85 -0
- package/src/storage/schema.sql.ts +39 -0
- package/src/storage/schema.ts +1 -0
- package/src/tui/__tests__/input-intent.test.ts +27 -0
- package/src/tui/__tests__/stream-assembler.test.ts +33 -0
- package/src/tui/ai-stream.ts +28 -0
- package/src/tui/app.tsx +224 -0
- package/src/tui/commands.ts +224 -0
- package/src/tui/context/exit.tsx +15 -0
- package/src/tui/context/helper.tsx +25 -0
- package/src/tui/context/route.tsx +24 -0
- package/src/tui/context/theme.tsx +471 -0
- package/src/tui/input-intent.ts +26 -0
- package/src/tui/routes/home.tsx +1053 -0
- package/src/tui/routes/model.tsx +213 -0
- package/src/tui/routes/notifications.tsx +87 -0
- package/src/tui/routes/post.tsx +102 -0
- package/src/tui/routes/search.tsx +105 -0
- package/src/tui/routes/setup.tsx +267 -0
- package/src/tui/routes/trending.tsx +107 -0
- package/src/tui/stream-assembler.ts +49 -0
- package/src/util/__tests__/context.test.ts +31 -0
- package/src/util/__tests__/lazy.test.ts +37 -0
- package/src/util/context.ts +23 -0
- package/src/util/error.ts +46 -0
- package/src/util/lazy.ts +18 -0
- package/src/util/log.ts +144 -0
- package/tsconfig.json +11 -0
package/src/ai/chat.ts
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
import { streamText, stepCountIs } from "ai"
|
|
2
|
+
import { AIProvider } from "./provider"
|
|
3
|
+
import { getChatTools } from "./tools"
|
|
4
|
+
import { Log } from "../util/log"
|
|
5
|
+
import { createRunEventFactory, type StreamEvent } from "./stream-events"
|
|
6
|
+
|
|
7
|
+
const log = Log.create({ service: "ai-chat" })
|
|
8
|
+
|
|
9
|
+
const SYSTEM_PROMPT = `You are CodeBlog AI — an assistant for the CodeBlog developer forum (codeblog.ai).
|
|
10
|
+
|
|
11
|
+
You help developers with everything on the platform:
|
|
12
|
+
- Scan and analyze their local IDE coding sessions
|
|
13
|
+
- Write and publish blog posts from coding sessions
|
|
14
|
+
- Browse, search, read, comment, vote on forum posts
|
|
15
|
+
- Manage bookmarks, notifications, debates, tags, trending topics
|
|
16
|
+
- Manage agents, view dashboard, follow users
|
|
17
|
+
- Generate weekly digests
|
|
18
|
+
|
|
19
|
+
You have 20+ tools. Use them whenever the user's request matches. Chain multiple tools if needed.
|
|
20
|
+
After a tool returns results, summarize them naturally for the user.
|
|
21
|
+
|
|
22
|
+
CRITICAL: When using tools, ALWAYS use the EXACT data returned by previous tool calls.
|
|
23
|
+
- If scan_sessions returns a path like "/Users/zhaoyifei/...", use that EXACT path
|
|
24
|
+
- NEVER modify, guess, or infer file paths — use them exactly as returned
|
|
25
|
+
- If a tool call fails with "file not found", the path is wrong — check the scan results again
|
|
26
|
+
|
|
27
|
+
Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
|
|
28
|
+
Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
|
|
29
|
+
|
|
30
|
+
const IDLE_TIMEOUT_MS = 60_000
|
|
31
|
+
const TOOL_TIMEOUT_MS = 45_000
|
|
32
|
+
const DEFAULT_MAX_STEPS = 10
|
|
33
|
+
|
|
34
|
+
export namespace AIChat {
|
|
35
|
+
export interface Message {
|
|
36
|
+
role: "user" | "assistant" | "system"
|
|
37
|
+
content: string
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
export interface StreamCallbacks {
|
|
41
|
+
onToken?: (token: string) => void
|
|
42
|
+
onFinish?: (text: string) => void
|
|
43
|
+
onError?: (error: Error) => void
|
|
44
|
+
onToolCall?: (name: string, args: unknown, callID: string) => void
|
|
45
|
+
onToolResult?: (name: string, result: unknown, callID: string) => void
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
export interface StreamOptions {
|
|
49
|
+
maxSteps?: number
|
|
50
|
+
runId?: string
|
|
51
|
+
idleTimeoutMs?: number
|
|
52
|
+
toolTimeoutMs?: number
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export async function* streamEvents(
|
|
56
|
+
messages: Message[],
|
|
57
|
+
modelID?: string,
|
|
58
|
+
signal?: AbortSignal,
|
|
59
|
+
options?: StreamOptions,
|
|
60
|
+
): AsyncGenerator<StreamEvent> {
|
|
61
|
+
const history = messages
|
|
62
|
+
.filter((m) => m.role === "user" || m.role === "assistant")
|
|
63
|
+
.map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
|
|
64
|
+
|
|
65
|
+
const routeCompat = await AIProvider.resolveModelCompat(modelID).catch(() => undefined)
|
|
66
|
+
const tools = await getChatTools(routeCompat || "default")
|
|
67
|
+
const model = await AIProvider.getModel(modelID)
|
|
68
|
+
const maxSteps = options?.maxSteps ?? DEFAULT_MAX_STEPS
|
|
69
|
+
const idleTimeoutMs = options?.idleTimeoutMs ?? IDLE_TIMEOUT_MS
|
|
70
|
+
const toolTimeoutMs = options?.toolTimeoutMs ?? TOOL_TIMEOUT_MS
|
|
71
|
+
|
|
72
|
+
const run = createRunEventFactory(options?.runId)
|
|
73
|
+
let full = ""
|
|
74
|
+
let aborted = false
|
|
75
|
+
let externalAbort = false
|
|
76
|
+
let abortError: Error | undefined
|
|
77
|
+
let errorEmitted = false
|
|
78
|
+
const toolQueue = new Map<string, string[]>()
|
|
79
|
+
const activeTools = new Map<string, { name: string; timer?: ReturnType<typeof setTimeout> }>()
|
|
80
|
+
|
|
81
|
+
const internalAbort = new AbortController()
|
|
82
|
+
const abortRun = (error?: Error) => {
|
|
83
|
+
if (aborted) return
|
|
84
|
+
aborted = true
|
|
85
|
+
if (error) abortError = error
|
|
86
|
+
internalAbort.abort()
|
|
87
|
+
}
|
|
88
|
+
const onExternalAbort = () => {
|
|
89
|
+
externalAbort = true
|
|
90
|
+
abortRun()
|
|
91
|
+
}
|
|
92
|
+
signal?.addEventListener("abort", onExternalAbort)
|
|
93
|
+
|
|
94
|
+
yield run.next("run-start", {
|
|
95
|
+
modelID: modelID || AIProvider.DEFAULT_MODEL,
|
|
96
|
+
messageCount: history.length,
|
|
97
|
+
})
|
|
98
|
+
|
|
99
|
+
let idleTimer: ReturnType<typeof setTimeout> | undefined
|
|
100
|
+
const clearAllToolTimers = () => {
|
|
101
|
+
for (const entry of activeTools.values()) {
|
|
102
|
+
if (entry.timer) clearTimeout(entry.timer)
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
const pushToolID = (name: string, callID: string) => {
|
|
107
|
+
const queue = toolQueue.get(name)
|
|
108
|
+
if (!queue) {
|
|
109
|
+
toolQueue.set(name, [callID])
|
|
110
|
+
return
|
|
111
|
+
}
|
|
112
|
+
queue.push(callID)
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const shiftToolID = (name: string) => {
|
|
116
|
+
const queue = toolQueue.get(name)
|
|
117
|
+
if (!queue || queue.length === 0) return undefined
|
|
118
|
+
const callID = queue.shift()
|
|
119
|
+
if (queue.length === 0) toolQueue.delete(name)
|
|
120
|
+
return callID
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const dropToolID = (name: string, callID: string) => {
|
|
124
|
+
const queue = toolQueue.get(name)
|
|
125
|
+
if (!queue || queue.length === 0) return
|
|
126
|
+
const next = queue.filter((id) => id !== callID)
|
|
127
|
+
if (next.length === 0) {
|
|
128
|
+
toolQueue.delete(name)
|
|
129
|
+
return
|
|
130
|
+
}
|
|
131
|
+
toolQueue.set(name, next)
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const armToolTimeout = (name: string, callID: string) => {
|
|
135
|
+
if (toolTimeoutMs <= 0) return
|
|
136
|
+
const timer = setTimeout(() => {
|
|
137
|
+
abortRun(new Error(`Tool call "${name}" timed out after ${toolTimeoutMs}ms`))
|
|
138
|
+
}, toolTimeoutMs)
|
|
139
|
+
const active = activeTools.get(callID)
|
|
140
|
+
if (!active) return
|
|
141
|
+
if (active.timer) clearTimeout(active.timer)
|
|
142
|
+
active.timer = timer
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const startTool = (name: string, callID: string) => {
|
|
146
|
+
activeTools.set(callID, { name })
|
|
147
|
+
armToolTimeout(name, callID)
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
const finishTool = (callID?: string) => {
|
|
151
|
+
if (!callID) return
|
|
152
|
+
const active = activeTools.get(callID)
|
|
153
|
+
if (!active) return
|
|
154
|
+
if (active.timer) clearTimeout(active.timer)
|
|
155
|
+
activeTools.delete(callID)
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const resetIdle = () => {
|
|
159
|
+
if (idleTimer) clearTimeout(idleTimer)
|
|
160
|
+
if (activeTools.size > 0) return
|
|
161
|
+
idleTimer = setTimeout(() => {
|
|
162
|
+
abortRun(new Error(`Stream idle timeout after ${idleTimeoutMs}ms`))
|
|
163
|
+
}, idleTimeoutMs)
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
try {
|
|
167
|
+
const result = streamText({
|
|
168
|
+
model,
|
|
169
|
+
system: SYSTEM_PROMPT,
|
|
170
|
+
messages: history,
|
|
171
|
+
tools,
|
|
172
|
+
stopWhen: stepCountIs(maxSteps),
|
|
173
|
+
toolChoice: "auto",
|
|
174
|
+
abortSignal: internalAbort.signal,
|
|
175
|
+
})
|
|
176
|
+
resetIdle()
|
|
177
|
+
for await (const part of result.fullStream) {
|
|
178
|
+
if (internalAbort.signal.aborted) {
|
|
179
|
+
break
|
|
180
|
+
}
|
|
181
|
+
resetIdle()
|
|
182
|
+
|
|
183
|
+
switch (part.type) {
|
|
184
|
+
case "text-delta": {
|
|
185
|
+
const delta = (part as any).text ?? (part as any).textDelta ?? ""
|
|
186
|
+
if (!delta) break
|
|
187
|
+
full += delta
|
|
188
|
+
yield run.next("text-delta", { text: delta })
|
|
189
|
+
break
|
|
190
|
+
}
|
|
191
|
+
case "tool-call": {
|
|
192
|
+
if (idleTimer) {
|
|
193
|
+
clearTimeout(idleTimer)
|
|
194
|
+
idleTimer = undefined
|
|
195
|
+
}
|
|
196
|
+
const name = (part as any).toolName || "unknown"
|
|
197
|
+
const args = (part as any).args ?? (part as any).input ?? {}
|
|
198
|
+
const callID = (part as any).toolCallId || (part as any).id || `${run.runId}:tool:${crypto.randomUUID()}`
|
|
199
|
+
pushToolID(name, callID)
|
|
200
|
+
startTool(name, callID)
|
|
201
|
+
yield run.next("tool-start", { callID, name, args })
|
|
202
|
+
break
|
|
203
|
+
}
|
|
204
|
+
case "tool-result": {
|
|
205
|
+
const name = (part as any).toolName || "unknown"
|
|
206
|
+
const callID = (part as any).toolCallId || (part as any).id || shiftToolID(name) || `${run.runId}:tool:${crypto.randomUUID()}`
|
|
207
|
+
dropToolID(name, callID)
|
|
208
|
+
finishTool(callID)
|
|
209
|
+
resetIdle()
|
|
210
|
+
const result = (part as any).output ?? (part as any).result ?? {}
|
|
211
|
+
yield run.next("tool-result", { callID, name, result })
|
|
212
|
+
break
|
|
213
|
+
}
|
|
214
|
+
case "tool-error" as any: {
|
|
215
|
+
const name = (part as any).toolName || "unknown"
|
|
216
|
+
const callID = (part as any).toolCallId || (part as any).id || shiftToolID(name)
|
|
217
|
+
if (callID) {
|
|
218
|
+
dropToolID(name, callID)
|
|
219
|
+
finishTool(callID)
|
|
220
|
+
}
|
|
221
|
+
resetIdle()
|
|
222
|
+
const error = new Error(String((part as any).error || "tool error"))
|
|
223
|
+
errorEmitted = true
|
|
224
|
+
yield run.next("error", { error })
|
|
225
|
+
abortRun(error)
|
|
226
|
+
break
|
|
227
|
+
}
|
|
228
|
+
case "error": {
|
|
229
|
+
const err = (part as any).error
|
|
230
|
+
errorEmitted = true
|
|
231
|
+
yield run.next("error", { error: err instanceof Error ? err : new Error(String(err)) })
|
|
232
|
+
break
|
|
233
|
+
}
|
|
234
|
+
default:
|
|
235
|
+
break
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
} catch (err) {
|
|
239
|
+
const error = err instanceof Error ? err : new Error(String(err))
|
|
240
|
+
if (error.name === "AbortError") {
|
|
241
|
+
if (abortError && !externalAbort) {
|
|
242
|
+
errorEmitted = true
|
|
243
|
+
yield run.next("error", { error: abortError })
|
|
244
|
+
}
|
|
245
|
+
} else {
|
|
246
|
+
log.error("stream error", { error: error.message })
|
|
247
|
+
errorEmitted = true
|
|
248
|
+
yield run.next("error", { error })
|
|
249
|
+
}
|
|
250
|
+
} finally {
|
|
251
|
+
if (idleTimer) clearTimeout(idleTimer)
|
|
252
|
+
clearAllToolTimers()
|
|
253
|
+
signal?.removeEventListener("abort", onExternalAbort)
|
|
254
|
+
if (abortError && !externalAbort && !errorEmitted) {
|
|
255
|
+
yield run.next("error", { error: abortError })
|
|
256
|
+
}
|
|
257
|
+
yield run.next("run-finish", { text: full, aborted })
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
export async function stream(
|
|
262
|
+
messages: Message[],
|
|
263
|
+
callbacks: StreamCallbacks,
|
|
264
|
+
modelID?: string,
|
|
265
|
+
signal?: AbortSignal,
|
|
266
|
+
options?: StreamOptions,
|
|
267
|
+
) {
|
|
268
|
+
let full = ""
|
|
269
|
+
try {
|
|
270
|
+
for await (const event of streamEvents(messages, modelID, signal, options)) {
|
|
271
|
+
switch (event.type) {
|
|
272
|
+
case "text-delta":
|
|
273
|
+
full += event.text
|
|
274
|
+
callbacks.onToken?.(event.text)
|
|
275
|
+
break
|
|
276
|
+
case "tool-start":
|
|
277
|
+
callbacks.onToolCall?.(event.name, event.args, event.callID)
|
|
278
|
+
break
|
|
279
|
+
case "tool-result":
|
|
280
|
+
callbacks.onToolResult?.(event.name, event.result, event.callID)
|
|
281
|
+
break
|
|
282
|
+
case "error":
|
|
283
|
+
callbacks.onError?.(event.error)
|
|
284
|
+
break
|
|
285
|
+
case "run-finish":
|
|
286
|
+
callbacks.onFinish?.(event.text || "(No response)")
|
|
287
|
+
return event.text || "(No response)"
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
callbacks.onFinish?.(full || "(No response)")
|
|
291
|
+
return full || "(No response)"
|
|
292
|
+
} catch (err) {
|
|
293
|
+
const error = err instanceof Error ? err : new Error(String(err))
|
|
294
|
+
callbacks.onError?.(error)
|
|
295
|
+
callbacks.onFinish?.(full || "(No response)")
|
|
296
|
+
return full || "(No response)"
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
export async function generate(prompt: string, modelID?: string) {
|
|
301
|
+
let result = ""
|
|
302
|
+
await stream([{ role: "user", content: prompt }], { onFinish: (text) => (result = text) }, modelID)
|
|
303
|
+
return result
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
export async function analyzeAndPost(sessionContent: string, modelID?: string) {
|
|
307
|
+
const prompt = `Analyze this coding session and write a blog post about it.
|
|
308
|
+
|
|
309
|
+
The post should:
|
|
310
|
+
- Have a catchy, dev-friendly title (like HN or Juejin)
|
|
311
|
+
- Tell a story: what you were doing, what went wrong/right, what you learned
|
|
312
|
+
- Include relevant code snippets
|
|
313
|
+
- Be casual and genuine, written in first person
|
|
314
|
+
- End with key takeaways
|
|
315
|
+
|
|
316
|
+
Also provide:
|
|
317
|
+
- 3-8 relevant tags (lowercase, hyphenated)
|
|
318
|
+
- A one-line summary/hook
|
|
319
|
+
|
|
320
|
+
Session content:
|
|
321
|
+
${sessionContent.slice(0, 50000)}
|
|
322
|
+
|
|
323
|
+
Respond in this exact JSON format:
|
|
324
|
+
{
|
|
325
|
+
"title": "...",
|
|
326
|
+
"content": "... (markdown)",
|
|
327
|
+
"tags": ["tag1", "tag2"],
|
|
328
|
+
"summary": "..."
|
|
329
|
+
}`
|
|
330
|
+
|
|
331
|
+
const raw = await generate(prompt, modelID)
|
|
332
|
+
const jsonMatch = raw.match(/\{[\s\S]*\}/)
|
|
333
|
+
if (!jsonMatch) throw new Error("AI did not return valid JSON")
|
|
334
|
+
return JSON.parse(jsonMatch[0])
|
|
335
|
+
}
|
|
336
|
+
}
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
// AI provider auto-detection and configuration
|
|
2
|
+
|
|
3
|
+
function looksLikeApi(r: Response) {
|
|
4
|
+
const ct = r.headers.get("content-type") || ""
|
|
5
|
+
return ct.includes("json") || ct.includes("text/plain")
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export async function probe(base: string, key: string): Promise<"openai" | "anthropic" | null> {
|
|
9
|
+
const clean = base.replace(/\/+$/, "")
|
|
10
|
+
try {
|
|
11
|
+
const r = await fetch(`${clean}/v1/models`, {
|
|
12
|
+
headers: { Authorization: `Bearer ${key}` },
|
|
13
|
+
signal: AbortSignal.timeout(8000),
|
|
14
|
+
})
|
|
15
|
+
if (r.ok || ((r.status === 401 || r.status === 403) && looksLikeApi(r))) return "openai"
|
|
16
|
+
} catch {}
|
|
17
|
+
try {
|
|
18
|
+
const r = await fetch(`${clean}/v1/messages`, {
|
|
19
|
+
method: "POST",
|
|
20
|
+
headers: { "x-api-key": key, "anthropic-version": "2023-06-01", "content-type": "application/json" },
|
|
21
|
+
body: JSON.stringify({ model: "test", max_tokens: 1, messages: [] }),
|
|
22
|
+
signal: AbortSignal.timeout(8000),
|
|
23
|
+
})
|
|
24
|
+
if (r.status !== 404 && looksLikeApi(r)) return "anthropic"
|
|
25
|
+
} catch {}
|
|
26
|
+
return null
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const KEY_PREFIX_MAP: Record<string, string> = {
|
|
30
|
+
"sk-ant-": "anthropic",
|
|
31
|
+
"AIza": "google",
|
|
32
|
+
"xai-": "xai",
|
|
33
|
+
"gsk_": "groq",
|
|
34
|
+
"sk-or-": "openrouter",
|
|
35
|
+
"pplx-": "perplexity",
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const ENV_MAP: Record<string, string> = {
|
|
39
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
40
|
+
openai: "OPENAI_API_KEY",
|
|
41
|
+
google: "GOOGLE_GENERATIVE_AI_API_KEY",
|
|
42
|
+
xai: "XAI_API_KEY",
|
|
43
|
+
groq: "GROQ_API_KEY",
|
|
44
|
+
openrouter: "OPENROUTER_API_KEY",
|
|
45
|
+
perplexity: "PERPLEXITY_API_KEY",
|
|
46
|
+
"openai-compatible": "OPENAI_COMPATIBLE_API_KEY",
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async function fetchFirstModel(base: string, key: string): Promise<string | null> {
|
|
50
|
+
try {
|
|
51
|
+
const clean = base.replace(/\/+$/, "")
|
|
52
|
+
const r = await fetch(`${clean}/v1/models`, {
|
|
53
|
+
headers: { Authorization: `Bearer ${key}` },
|
|
54
|
+
signal: AbortSignal.timeout(8000),
|
|
55
|
+
})
|
|
56
|
+
if (!r.ok) return null
|
|
57
|
+
const data = await r.json() as { data?: Array<{ id: string }> }
|
|
58
|
+
if (!data.data || data.data.length === 0) return null
|
|
59
|
+
|
|
60
|
+
// Prefer stable defaults first; avoid niche/legacy IDs unless explicitly chosen.
|
|
61
|
+
const ids = data.data.map((m) => m.id)
|
|
62
|
+
const preferred = [/^gpt-5\.2$/, /^claude-sonnet-4(?:-5)?/, /^gpt-5(?:\.|$|-)/, /^gpt-4o$/, /^claude-opus-4/, /^gpt-4o-mini$/, /^gemini-2\.5-flash$/]
|
|
63
|
+
for (const pattern of preferred) {
|
|
64
|
+
const match = ids.find((id) => pattern.test(id))
|
|
65
|
+
if (match) return match
|
|
66
|
+
}
|
|
67
|
+
return ids[0] ?? null
|
|
68
|
+
} catch {}
|
|
69
|
+
return null
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
export function detectProvider(key: string) {
|
|
73
|
+
for (const [prefix, provider] of Object.entries(KEY_PREFIX_MAP)) {
|
|
74
|
+
if (key.startsWith(prefix)) return provider
|
|
75
|
+
}
|
|
76
|
+
return "openai"
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
export async function saveProvider(url: string, key: string): Promise<{ provider: string; error?: string }> {
|
|
80
|
+
const { Config } = await import("../config")
|
|
81
|
+
|
|
82
|
+
if (url) {
|
|
83
|
+
const detected = await probe(url, key)
|
|
84
|
+
if (!detected) return { provider: "", error: "Could not connect. Check URL and key." }
|
|
85
|
+
|
|
86
|
+
const provider = detected === "anthropic" ? "anthropic" : "openai-compatible"
|
|
87
|
+
const envKey = detected === "anthropic" ? "ANTHROPIC_API_KEY" : "OPENAI_COMPATIBLE_API_KEY"
|
|
88
|
+
const envBase = detected === "anthropic" ? "ANTHROPIC_BASE_URL" : "OPENAI_COMPATIBLE_BASE_URL"
|
|
89
|
+
process.env[envKey] = key
|
|
90
|
+
process.env[envBase] = url
|
|
91
|
+
|
|
92
|
+
const cfg = await Config.load()
|
|
93
|
+
const providers = cfg.providers || {}
|
|
94
|
+
providers[provider] = {
|
|
95
|
+
api_key: key,
|
|
96
|
+
base_url: url,
|
|
97
|
+
api: detected === "anthropic" ? "anthropic" : "openai-compatible",
|
|
98
|
+
compat_profile: detected === "anthropic" ? "anthropic" : "openai-compatible",
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Auto-set model if not already configured
|
|
102
|
+
const update: Record<string, unknown> = { providers, default_provider: provider }
|
|
103
|
+
if (!cfg.model) {
|
|
104
|
+
const { defaultModelForProvider } = await import("./models")
|
|
105
|
+
if (detected === "anthropic") {
|
|
106
|
+
update.model = defaultModelForProvider("anthropic")
|
|
107
|
+
} else {
|
|
108
|
+
// For openai-compatible with custom URL, try to fetch available models
|
|
109
|
+
const model = await fetchFirstModel(url, key)
|
|
110
|
+
if (model) update.model = `openai-compatible/${model}`
|
|
111
|
+
else update.model = `openai-compatible/${defaultModelForProvider("openai-compatible")}`
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
await Config.save(update)
|
|
116
|
+
return { provider: `${detected} format` }
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
const provider = detectProvider(key)
|
|
120
|
+
if (ENV_MAP[provider]) process.env[ENV_MAP[provider]] = key
|
|
121
|
+
|
|
122
|
+
const cfg = await Config.load()
|
|
123
|
+
const providers = cfg.providers || {}
|
|
124
|
+
providers[provider] = {
|
|
125
|
+
api_key: key,
|
|
126
|
+
api: provider === "anthropic" ? "anthropic" : provider === "google" ? "google" : provider === "openai" ? "openai" : "openai-compatible",
|
|
127
|
+
compat_profile: provider === "anthropic" ? "anthropic" : provider === "google" ? "google" : provider === "openai" ? "openai" : "openai-compatible",
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Auto-set model for known providers
|
|
131
|
+
const update: Record<string, unknown> = { providers, default_provider: provider }
|
|
132
|
+
if (!cfg.model) {
|
|
133
|
+
const { defaultModelForProvider } = await import("./models")
|
|
134
|
+
update.model = defaultModelForProvider(provider)
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
await Config.save(update)
|
|
138
|
+
return { provider }
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
export function mask(s: string) {
|
|
142
|
+
if (s.length <= 8) return s
|
|
143
|
+
return s.slice(0, 4) + "\u2022".repeat(Math.min(s.length - 8, 20)) + s.slice(-4)
|
|
144
|
+
}
|
package/src/ai/models.ts
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
export interface ModelInfo {
|
|
2
|
+
id: string
|
|
3
|
+
providerID: string
|
|
4
|
+
name: string
|
|
5
|
+
contextWindow: number
|
|
6
|
+
outputTokens: number
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export const BUILTIN_MODELS: Record<string, ModelInfo> = {
|
|
10
|
+
"claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
|
|
11
|
+
"claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
|
|
12
|
+
"gpt-5.2": { id: "gpt-5.2", providerID: "openai", name: "GPT-5.2", contextWindow: 400000, outputTokens: 128000 },
|
|
13
|
+
"gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
|
|
14
|
+
"gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
|
|
15
|
+
"o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
|
|
16
|
+
"gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
|
|
17
|
+
"gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
|
|
21
|
+
const LEGACY_MODEL_MAP: Record<string, string> = {
|
|
22
|
+
"4.0Ultra": "gpt-5.2",
|
|
23
|
+
"4.0ultra": "gpt-5.2",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const PROVIDER_DEFAULT_MODEL: Record<string, string> = {
|
|
27
|
+
anthropic: "claude-sonnet-4-20250514",
|
|
28
|
+
openai: "gpt-5.2",
|
|
29
|
+
google: "gemini-2.5-flash",
|
|
30
|
+
"openai-compatible": "gpt-5.2",
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export function normalizeModelID(modelID?: string): string | undefined {
|
|
34
|
+
if (!modelID) return undefined
|
|
35
|
+
const trimmed = modelID.trim()
|
|
36
|
+
if (!trimmed) return undefined
|
|
37
|
+
if (LEGACY_MODEL_MAP[trimmed]) return LEGACY_MODEL_MAP[trimmed]
|
|
38
|
+
if (!trimmed.includes("/")) return trimmed
|
|
39
|
+
const [providerID, ...rest] = trimmed.split("/")
|
|
40
|
+
const raw = rest.join("/")
|
|
41
|
+
if (!raw) return trimmed
|
|
42
|
+
const mapped = LEGACY_MODEL_MAP[raw]
|
|
43
|
+
if (!mapped) return trimmed
|
|
44
|
+
return `${providerID}/${mapped}`
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export function defaultModelForProvider(providerID?: string): string {
|
|
48
|
+
if (!providerID) return DEFAULT_MODEL
|
|
49
|
+
return PROVIDER_DEFAULT_MODEL[providerID] || DEFAULT_MODEL
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
export function resolveModelFromConfig(cfg: { model?: string; default_provider?: string }): string {
|
|
53
|
+
const model = normalizeModelID(cfg.model)
|
|
54
|
+
if (model) return model
|
|
55
|
+
const fallback = defaultModelForProvider(cfg.default_provider)
|
|
56
|
+
if (cfg.default_provider === "openai-compatible" && !fallback.includes("/")) {
|
|
57
|
+
return `openai-compatible/${fallback}`
|
|
58
|
+
}
|
|
59
|
+
return fallback
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
export function inferProviderByModelPrefix(modelID: string): string | undefined {
|
|
63
|
+
if (modelID.startsWith("claude-")) return "anthropic"
|
|
64
|
+
if (modelID.startsWith("gpt-") || modelID.startsWith("o1-") || modelID.startsWith("o3-")) return "openai"
|
|
65
|
+
if (modelID.startsWith("gemini-")) return "google"
|
|
66
|
+
return undefined
|
|
67
|
+
}
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import { Config } from "../config"
|
|
2
|
+
import { Log } from "../util/log"
|
|
3
|
+
import { BUILTIN_MODELS, inferProviderByModelPrefix, resolveModelFromConfig, normalizeModelID } from "./models"
|
|
4
|
+
import { type ModelCompatConfig, resolveCompat } from "./types"
|
|
5
|
+
|
|
6
|
+
const log = Log.create({ service: "ai-provider-registry" })
|
|
7
|
+
|
|
8
|
+
export const PROVIDER_ENV: Record<string, string[]> = {
|
|
9
|
+
anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
|
|
10
|
+
openai: ["OPENAI_API_KEY"],
|
|
11
|
+
google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
|
|
12
|
+
"openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
|
|
16
|
+
anthropic: ["ANTHROPIC_BASE_URL"],
|
|
17
|
+
openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
|
|
18
|
+
google: ["GOOGLE_API_BASE_URL"],
|
|
19
|
+
"openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface ProviderRuntimeConfig {
|
|
23
|
+
id: string
|
|
24
|
+
apiKey?: string
|
|
25
|
+
baseURL?: string
|
|
26
|
+
config?: Config.ProviderConfig
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export interface ProviderRegistryView {
|
|
30
|
+
providers: Record<string, ProviderRuntimeConfig>
|
|
31
|
+
defaultProvider?: string
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export interface ModelRoute {
|
|
35
|
+
requestedModel: string
|
|
36
|
+
providerID: string
|
|
37
|
+
modelID: string
|
|
38
|
+
apiKey: string
|
|
39
|
+
baseURL?: string
|
|
40
|
+
compat: ModelCompatConfig
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
function readFirstEnv(keys: string[]): string | undefined {
|
|
44
|
+
for (const key of keys) {
|
|
45
|
+
if (process.env[key]) return process.env[key]
|
|
46
|
+
}
|
|
47
|
+
return undefined
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export async function loadProviders(cfgInput?: Config.CodeblogConfig): Promise<ProviderRegistryView> {
|
|
51
|
+
const cfg = cfgInput || await Config.load()
|
|
52
|
+
const user = cfg.providers || {}
|
|
53
|
+
const ids = new Set<string>([
|
|
54
|
+
...Object.keys(PROVIDER_ENV),
|
|
55
|
+
...Object.keys(user),
|
|
56
|
+
])
|
|
57
|
+
|
|
58
|
+
const providers: Record<string, ProviderRuntimeConfig> = {}
|
|
59
|
+
|
|
60
|
+
for (const id of ids) {
|
|
61
|
+
const config = user[id]
|
|
62
|
+
providers[id] = {
|
|
63
|
+
id,
|
|
64
|
+
config,
|
|
65
|
+
apiKey: readFirstEnv(PROVIDER_ENV[id] || []) || config?.api_key,
|
|
66
|
+
baseURL: readFirstEnv(PROVIDER_BASE_URL_ENV[id] || []) || config?.base_url,
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
return { providers, defaultProvider: cfg.default_provider }
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
function availableProvidersWithKeys(providers: Record<string, ProviderRuntimeConfig>): string[] {
|
|
74
|
+
return Object.values(providers)
|
|
75
|
+
.filter((p) => p.apiKey)
|
|
76
|
+
.map((p) => p.id)
|
|
77
|
+
.sort()
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function unknownModelError(modelID: string, providers: Record<string, ProviderRuntimeConfig>): Error {
|
|
81
|
+
const available = availableProvidersWithKeys(providers)
|
|
82
|
+
const base = `Unknown model "${modelID}".`
|
|
83
|
+
if (available.length === 0) {
|
|
84
|
+
return new Error(`${base} No AI providers are configured. Run: codeblog ai setup`)
|
|
85
|
+
}
|
|
86
|
+
return new Error(`${base} Available providers with keys: ${available.join(", ")}. Try: codeblog config --model <provider>/<model>`)
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
function noKeyError(providerID: string, modelID: string): Error {
|
|
90
|
+
const envKeys = PROVIDER_ENV[providerID] || []
|
|
91
|
+
const envHint = envKeys[0] || `${providerID.toUpperCase().replace(/-/g, "_")}_API_KEY`
|
|
92
|
+
return new Error(`No API key for ${providerID} (model: ${modelID}). Set ${envHint} or run: codeblog config --provider ${providerID} --api-key <key>`)
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function routeViaProvider(
|
|
96
|
+
providers: Record<string, ProviderRuntimeConfig>,
|
|
97
|
+
requestedModel: string,
|
|
98
|
+
providerID: string,
|
|
99
|
+
modelID: string,
|
|
100
|
+
): ModelRoute {
|
|
101
|
+
const provider = providers[providerID]
|
|
102
|
+
if (!provider) throw unknownModelError(requestedModel, providers)
|
|
103
|
+
if (!provider.apiKey) throw noKeyError(providerID, modelID)
|
|
104
|
+
|
|
105
|
+
const compat = resolveCompat({ providerID, modelID, providerConfig: provider.config })
|
|
106
|
+
return {
|
|
107
|
+
requestedModel,
|
|
108
|
+
providerID,
|
|
109
|
+
modelID,
|
|
110
|
+
apiKey: provider.apiKey,
|
|
111
|
+
baseURL: provider.baseURL,
|
|
112
|
+
compat,
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
export async function routeModel(inputModel?: string, cfgInput?: Config.CodeblogConfig): Promise<ModelRoute> {
|
|
117
|
+
const cfg = cfgInput || await Config.load()
|
|
118
|
+
const requestedModel = normalizeModelID(inputModel) || resolveModelFromConfig(cfg)
|
|
119
|
+
const loaded = await loadProviders(cfg)
|
|
120
|
+
const providers = loaded.providers
|
|
121
|
+
|
|
122
|
+
if (requestedModel.includes("/")) {
|
|
123
|
+
const [providerID, ...rest] = requestedModel.split("/")
|
|
124
|
+
const modelID = rest.join("/")
|
|
125
|
+
return routeViaProvider(providers, requestedModel, providerID!, modelID)
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
if (BUILTIN_MODELS[requestedModel]) {
|
|
129
|
+
const providerID = BUILTIN_MODELS[requestedModel]!.providerID
|
|
130
|
+
return routeViaProvider(providers, requestedModel, providerID, requestedModel)
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const prefixed = inferProviderByModelPrefix(requestedModel)
|
|
134
|
+
if (prefixed) {
|
|
135
|
+
return routeViaProvider(providers, requestedModel, prefixed, requestedModel)
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
if (loaded.defaultProvider) {
|
|
139
|
+
return routeViaProvider(providers, requestedModel, loaded.defaultProvider, requestedModel)
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
log.warn("route failed", { requestedModel })
|
|
143
|
+
throw unknownModelError(requestedModel, providers)
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
export async function resolveProviderCompat(providerID: string, modelID: string, cfgInput?: Config.CodeblogConfig): Promise<ModelCompatConfig> {
|
|
147
|
+
const loaded = await loadProviders(cfgInput)
|
|
148
|
+
const provider = loaded.providers[providerID]
|
|
149
|
+
return resolveCompat({ providerID, modelID, providerConfig: provider?.config })
|
|
150
|
+
}
|