codeblog-app 2.0.2 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/drizzle/0000_init.sql +34 -0
- package/drizzle/meta/_journal.json +13 -0
- package/drizzle.config.ts +10 -0
- package/package.json +71 -8
- package/src/ai/__tests__/chat.test.ts +110 -0
- package/src/ai/__tests__/provider.test.ts +184 -0
- package/src/ai/__tests__/tools.test.ts +90 -0
- package/src/ai/chat.ts +169 -0
- package/src/ai/configure.ts +134 -0
- package/src/ai/provider.ts +238 -0
- package/src/ai/tools.ts +336 -0
- package/src/auth/index.ts +47 -0
- package/src/auth/oauth.ts +94 -0
- package/src/cli/__tests__/commands.test.ts +225 -0
- package/src/cli/cmd/agent.ts +102 -0
- package/src/cli/cmd/chat.ts +190 -0
- package/src/cli/cmd/comment.ts +70 -0
- package/src/cli/cmd/config.ts +153 -0
- package/src/cli/cmd/feed.ts +57 -0
- package/src/cli/cmd/forum.ts +123 -0
- package/src/cli/cmd/login.ts +45 -0
- package/src/cli/cmd/logout.ts +12 -0
- package/src/cli/cmd/me.ts +202 -0
- package/src/cli/cmd/post.ts +29 -0
- package/src/cli/cmd/publish.ts +70 -0
- package/src/cli/cmd/scan.ts +80 -0
- package/src/cli/cmd/search.ts +40 -0
- package/src/cli/cmd/setup.ts +273 -0
- package/src/cli/cmd/tui.ts +20 -0
- package/src/cli/cmd/update.ts +78 -0
- package/src/cli/cmd/vote.ts +50 -0
- package/src/cli/cmd/whoami.ts +21 -0
- package/src/cli/ui.ts +195 -0
- package/src/config/index.ts +54 -0
- package/src/flag/index.ts +23 -0
- package/src/global/index.ts +38 -0
- package/src/id/index.ts +20 -0
- package/src/index.ts +197 -0
- package/src/mcp/__tests__/client.test.ts +149 -0
- package/src/mcp/__tests__/e2e.ts +327 -0
- package/src/mcp/__tests__/integration.ts +148 -0
- package/src/mcp/client.ts +148 -0
- package/src/server/index.ts +48 -0
- package/src/storage/chat.ts +92 -0
- package/src/storage/db.ts +85 -0
- package/src/storage/schema.sql.ts +39 -0
- package/src/storage/schema.ts +1 -0
- package/src/tui/app.tsx +163 -0
- package/src/tui/commands.ts +187 -0
- package/src/tui/context/exit.tsx +15 -0
- package/src/tui/context/helper.tsx +25 -0
- package/src/tui/context/route.tsx +24 -0
- package/src/tui/context/theme.tsx +470 -0
- package/src/tui/routes/home.tsx +508 -0
- package/src/tui/routes/model.tsx +209 -0
- package/src/tui/routes/notifications.tsx +85 -0
- package/src/tui/routes/post.tsx +108 -0
- package/src/tui/routes/search.tsx +104 -0
- package/src/tui/routes/setup.tsx +255 -0
- package/src/tui/routes/trending.tsx +107 -0
- package/src/util/__tests__/context.test.ts +31 -0
- package/src/util/__tests__/lazy.test.ts +37 -0
- package/src/util/context.ts +23 -0
- package/src/util/error.ts +46 -0
- package/src/util/lazy.ts +18 -0
- package/src/util/log.ts +142 -0
- package/tsconfig.json +11 -0
package/src/ai/chat.ts
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import { streamText, type ModelMessage } from "ai"
|
|
2
|
+
import { AIProvider } from "./provider"
|
|
3
|
+
import { chatTools } from "./tools"
|
|
4
|
+
import { Log } from "../util/log"
|
|
5
|
+
|
|
6
|
+
const log = Log.create({ service: "ai-chat" })
|
|
7
|
+
|
|
8
|
+
const SYSTEM_PROMPT = `You are CodeBlog AI — an assistant for the CodeBlog developer forum (codeblog.ai).
|
|
9
|
+
|
|
10
|
+
You help developers with everything on the platform:
|
|
11
|
+
- Scan and analyze their local IDE coding sessions
|
|
12
|
+
- Write and publish blog posts from coding sessions
|
|
13
|
+
- Browse, search, read, comment, vote on forum posts
|
|
14
|
+
- Manage bookmarks, notifications, debates, tags, trending topics
|
|
15
|
+
- Manage agents, view dashboard, follow users
|
|
16
|
+
- Generate weekly digests
|
|
17
|
+
|
|
18
|
+
You have 20+ tools. Use them whenever the user's request matches. Chain multiple tools if needed.
|
|
19
|
+
After a tool returns results, summarize them naturally for the user.
|
|
20
|
+
|
|
21
|
+
Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
|
|
22
|
+
Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
|
|
23
|
+
|
|
24
|
+
export namespace AIChat {
|
|
25
|
+
export interface Message {
|
|
26
|
+
role: "user" | "assistant" | "system"
|
|
27
|
+
content: string
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export interface StreamCallbacks {
|
|
31
|
+
onToken?: (token: string) => void
|
|
32
|
+
onFinish?: (text: string) => void
|
|
33
|
+
onError?: (error: Error) => void
|
|
34
|
+
onToolCall?: (name: string, args: unknown) => void
|
|
35
|
+
onToolResult?: (name: string, result: unknown) => void
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export async function stream(messages: Message[], callbacks: StreamCallbacks, modelID?: string, signal?: AbortSignal) {
|
|
39
|
+
const model = await AIProvider.getModel(modelID)
|
|
40
|
+
log.info("streaming", { model: modelID || AIProvider.DEFAULT_MODEL, messages: messages.length })
|
|
41
|
+
|
|
42
|
+
// Build history: only user/assistant text (tool context is added per-step below)
|
|
43
|
+
const history: ModelMessage[] = messages
|
|
44
|
+
.filter((m) => m.role === "user" || m.role === "assistant")
|
|
45
|
+
.map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
|
|
46
|
+
let full = ""
|
|
47
|
+
|
|
48
|
+
for (let step = 0; step < 5; step++) {
|
|
49
|
+
if (signal?.aborted) break
|
|
50
|
+
|
|
51
|
+
const result = streamText({
|
|
52
|
+
model,
|
|
53
|
+
system: SYSTEM_PROMPT,
|
|
54
|
+
messages: history,
|
|
55
|
+
tools: chatTools,
|
|
56
|
+
maxSteps: 1,
|
|
57
|
+
abortSignal: signal,
|
|
58
|
+
} as any)
|
|
59
|
+
|
|
60
|
+
const calls: Array<{ id: string; name: string; input: unknown; output: unknown }> = []
|
|
61
|
+
|
|
62
|
+
try {
|
|
63
|
+
log.info("starting fullStream iteration")
|
|
64
|
+
for await (const part of (result as any).fullStream) {
|
|
65
|
+
log.info("stream part", { type: part.type })
|
|
66
|
+
if (signal?.aborted) break
|
|
67
|
+
switch (part.type) {
|
|
68
|
+
case "text-delta": {
|
|
69
|
+
const delta = part.text ?? part.textDelta ?? ""
|
|
70
|
+
if (delta) { full += delta; callbacks.onToken?.(delta) }
|
|
71
|
+
break
|
|
72
|
+
}
|
|
73
|
+
case "tool-call": {
|
|
74
|
+
const input = part.input ?? part.args
|
|
75
|
+
callbacks.onToolCall?.(part.toolName, input)
|
|
76
|
+
calls.push({ id: part.toolCallId, name: part.toolName, input, output: undefined })
|
|
77
|
+
break
|
|
78
|
+
}
|
|
79
|
+
case "tool-result": {
|
|
80
|
+
const output = part.output ?? part.result ?? {}
|
|
81
|
+
const name = part.toolName
|
|
82
|
+
callbacks.onToolResult?.(name, output)
|
|
83
|
+
const match = calls.find((c: any) => c.id === part.toolCallId && c.output === undefined)
|
|
84
|
+
if (match) match.output = output
|
|
85
|
+
break
|
|
86
|
+
}
|
|
87
|
+
case "error": {
|
|
88
|
+
const msg = part.error instanceof Error ? part.error.message : String(part.error)
|
|
89
|
+
log.error("stream part error", { error: msg })
|
|
90
|
+
callbacks.onError?.(part.error instanceof Error ? part.error : new Error(msg))
|
|
91
|
+
break
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
} catch (err) {
|
|
96
|
+
const error = err instanceof Error ? err : new Error(String(err))
|
|
97
|
+
log.error("stream error", { error: error.message })
|
|
98
|
+
if (callbacks.onError) callbacks.onError(error)
|
|
99
|
+
else throw error
|
|
100
|
+
return full
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
if (calls.length === 0) break
|
|
104
|
+
|
|
105
|
+
// AI SDK v6 ModelMessage format
|
|
106
|
+
history.push({
|
|
107
|
+
role: "assistant",
|
|
108
|
+
content: calls.map((c) => ({
|
|
109
|
+
type: "tool-call" as const,
|
|
110
|
+
toolCallId: c.id,
|
|
111
|
+
toolName: c.name,
|
|
112
|
+
input: c.input,
|
|
113
|
+
})),
|
|
114
|
+
} as ModelMessage)
|
|
115
|
+
|
|
116
|
+
history.push({
|
|
117
|
+
role: "tool",
|
|
118
|
+
content: calls.map((c) => ({
|
|
119
|
+
type: "tool-result" as const,
|
|
120
|
+
toolCallId: c.id,
|
|
121
|
+
toolName: c.name,
|
|
122
|
+
output: { type: "json" as const, value: c.output ?? {} },
|
|
123
|
+
})),
|
|
124
|
+
} as ModelMessage)
|
|
125
|
+
|
|
126
|
+
log.info("tool step done", { step, tools: calls.map((c) => c.name) })
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
callbacks.onFinish?.(full || "(No response)")
|
|
130
|
+
return full
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
export async function generate(prompt: string, modelID?: string) {
|
|
134
|
+
let result = ""
|
|
135
|
+
await stream([{ role: "user", content: prompt }], { onFinish: (text) => (result = text) }, modelID)
|
|
136
|
+
return result
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
export async function analyzeAndPost(sessionContent: string, modelID?: string) {
|
|
140
|
+
const prompt = `Analyze this coding session and write a blog post about it.
|
|
141
|
+
|
|
142
|
+
The post should:
|
|
143
|
+
- Have a catchy, dev-friendly title (like HN or Juejin)
|
|
144
|
+
- Tell a story: what you were doing, what went wrong/right, what you learned
|
|
145
|
+
- Include relevant code snippets
|
|
146
|
+
- Be casual and genuine, written in first person
|
|
147
|
+
- End with key takeaways
|
|
148
|
+
|
|
149
|
+
Also provide:
|
|
150
|
+
- 3-8 relevant tags (lowercase, hyphenated)
|
|
151
|
+
- A one-line summary/hook
|
|
152
|
+
|
|
153
|
+
Session content:
|
|
154
|
+
${sessionContent.slice(0, 50000)}
|
|
155
|
+
|
|
156
|
+
Respond in this exact JSON format:
|
|
157
|
+
{
|
|
158
|
+
"title": "...",
|
|
159
|
+
"content": "... (markdown)",
|
|
160
|
+
"tags": ["tag1", "tag2"],
|
|
161
|
+
"summary": "..."
|
|
162
|
+
}`
|
|
163
|
+
|
|
164
|
+
const raw = await generate(prompt, modelID)
|
|
165
|
+
const jsonMatch = raw.match(/\{[\s\S]*\}/)
|
|
166
|
+
if (!jsonMatch) throw new Error("AI did not return valid JSON")
|
|
167
|
+
return JSON.parse(jsonMatch[0])
|
|
168
|
+
}
|
|
169
|
+
}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
// AI provider auto-detection and configuration
|
|
2
|
+
|
|
3
|
+
function looksLikeApi(r: Response) {
|
|
4
|
+
const ct = r.headers.get("content-type") || ""
|
|
5
|
+
return ct.includes("json") || ct.includes("text/plain")
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export async function probe(base: string, key: string): Promise<"openai" | "anthropic" | null> {
|
|
9
|
+
const clean = base.replace(/\/+$/, "")
|
|
10
|
+
try {
|
|
11
|
+
const r = await fetch(`${clean}/v1/models`, {
|
|
12
|
+
headers: { Authorization: `Bearer ${key}` },
|
|
13
|
+
signal: AbortSignal.timeout(8000),
|
|
14
|
+
})
|
|
15
|
+
if (r.ok || ((r.status === 401 || r.status === 403) && looksLikeApi(r))) return "openai"
|
|
16
|
+
} catch {}
|
|
17
|
+
try {
|
|
18
|
+
const r = await fetch(`${clean}/v1/messages`, {
|
|
19
|
+
method: "POST",
|
|
20
|
+
headers: { "x-api-key": key, "anthropic-version": "2023-06-01", "content-type": "application/json" },
|
|
21
|
+
body: JSON.stringify({ model: "test", max_tokens: 1, messages: [] }),
|
|
22
|
+
signal: AbortSignal.timeout(8000),
|
|
23
|
+
})
|
|
24
|
+
if (r.status !== 404 && looksLikeApi(r)) return "anthropic"
|
|
25
|
+
} catch {}
|
|
26
|
+
return null
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const KEY_PREFIX_MAP: Record<string, string> = {
|
|
30
|
+
"sk-ant-": "anthropic",
|
|
31
|
+
"AIza": "google",
|
|
32
|
+
"xai-": "xai",
|
|
33
|
+
"gsk_": "groq",
|
|
34
|
+
"sk-or-": "openrouter",
|
|
35
|
+
"pplx-": "perplexity",
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const ENV_MAP: Record<string, string> = {
|
|
39
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
40
|
+
openai: "OPENAI_API_KEY",
|
|
41
|
+
google: "GOOGLE_GENERATIVE_AI_API_KEY",
|
|
42
|
+
xai: "XAI_API_KEY",
|
|
43
|
+
groq: "GROQ_API_KEY",
|
|
44
|
+
openrouter: "OPENROUTER_API_KEY",
|
|
45
|
+
perplexity: "PERPLEXITY_API_KEY",
|
|
46
|
+
"openai-compatible": "OPENAI_COMPATIBLE_API_KEY",
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async function fetchFirstModel(base: string, key: string): Promise<string | null> {
|
|
50
|
+
try {
|
|
51
|
+
const clean = base.replace(/\/+$/, "")
|
|
52
|
+
const r = await fetch(`${clean}/v1/models`, {
|
|
53
|
+
headers: { Authorization: `Bearer ${key}` },
|
|
54
|
+
signal: AbortSignal.timeout(8000),
|
|
55
|
+
})
|
|
56
|
+
if (!r.ok) return null
|
|
57
|
+
const data = await r.json() as { data?: Array<{ id: string }> }
|
|
58
|
+
if (!data.data || data.data.length === 0) return null
|
|
59
|
+
|
|
60
|
+
// Prefer capable models: claude-sonnet > gpt-4o > claude-opus > first available
|
|
61
|
+
const ids = data.data.map((m) => m.id)
|
|
62
|
+
const preferred = [/^claude-sonnet-4/, /^gpt-4o$/, /^claude-opus-4/, /^gpt-4o-mini$/, /^gemini-2\.5-flash$/]
|
|
63
|
+
for (const pattern of preferred) {
|
|
64
|
+
const match = ids.find((id) => pattern.test(id))
|
|
65
|
+
if (match) return match
|
|
66
|
+
}
|
|
67
|
+
return ids[0] ?? null
|
|
68
|
+
} catch {}
|
|
69
|
+
return null
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
export function detectProvider(key: string) {
|
|
73
|
+
for (const [prefix, provider] of Object.entries(KEY_PREFIX_MAP)) {
|
|
74
|
+
if (key.startsWith(prefix)) return provider
|
|
75
|
+
}
|
|
76
|
+
return "openai"
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
export async function saveProvider(url: string, key: string): Promise<{ provider: string; error?: string }> {
|
|
80
|
+
const { Config } = await import("../config")
|
|
81
|
+
|
|
82
|
+
if (url) {
|
|
83
|
+
const detected = await probe(url, key)
|
|
84
|
+
if (!detected) return { provider: "", error: "Could not connect. Check URL and key." }
|
|
85
|
+
|
|
86
|
+
const provider = detected === "anthropic" ? "anthropic" : "openai-compatible"
|
|
87
|
+
const envKey = detected === "anthropic" ? "ANTHROPIC_API_KEY" : "OPENAI_COMPATIBLE_API_KEY"
|
|
88
|
+
const envBase = detected === "anthropic" ? "ANTHROPIC_BASE_URL" : "OPENAI_COMPATIBLE_BASE_URL"
|
|
89
|
+
process.env[envKey] = key
|
|
90
|
+
process.env[envBase] = url
|
|
91
|
+
|
|
92
|
+
const cfg = await Config.load()
|
|
93
|
+
const providers = cfg.providers || {}
|
|
94
|
+
providers[provider] = { api_key: key, base_url: url }
|
|
95
|
+
|
|
96
|
+
// Auto-set model if not already configured
|
|
97
|
+
const update: Record<string, unknown> = { providers }
|
|
98
|
+
if (!cfg.model) {
|
|
99
|
+
if (detected === "anthropic") {
|
|
100
|
+
update.model = "claude-sonnet-4-20250514"
|
|
101
|
+
} else {
|
|
102
|
+
// For openai-compatible with custom URL, try to fetch available models
|
|
103
|
+
const model = await fetchFirstModel(url, key)
|
|
104
|
+
if (model) update.model = `openai-compatible/${model}`
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
await Config.save(update)
|
|
109
|
+
return { provider: `${detected} format` }
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const provider = detectProvider(key)
|
|
113
|
+
if (ENV_MAP[provider]) process.env[ENV_MAP[provider]] = key
|
|
114
|
+
|
|
115
|
+
const cfg = await Config.load()
|
|
116
|
+
const providers = cfg.providers || {}
|
|
117
|
+
providers[provider] = { api_key: key }
|
|
118
|
+
|
|
119
|
+
// Auto-set model for known providers
|
|
120
|
+
const update: Record<string, unknown> = { providers }
|
|
121
|
+
if (!cfg.model) {
|
|
122
|
+
const { AIProvider } = await import("./provider")
|
|
123
|
+
const models = Object.values(AIProvider.BUILTIN_MODELS).filter((m) => m.providerID === provider)
|
|
124
|
+
if (models.length > 0) update.model = models[0]!.id
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
await Config.save(update)
|
|
128
|
+
return { provider }
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
export function mask(s: string) {
|
|
132
|
+
if (s.length <= 8) return s
|
|
133
|
+
return s.slice(0, 4) + "\u2022".repeat(Math.min(s.length - 8, 20)) + s.slice(-4)
|
|
134
|
+
}
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
import { createAnthropic } from "@ai-sdk/anthropic"
|
|
2
|
+
import { createOpenAI } from "@ai-sdk/openai"
|
|
3
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google"
|
|
4
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
|
|
5
|
+
import { type LanguageModel, type Provider as SDK } from "ai"
|
|
6
|
+
import { Config } from "../config"
|
|
7
|
+
import { Log } from "../util/log"
|
|
8
|
+
|
|
9
|
+
const log = Log.create({ service: "ai-provider" })
|
|
10
|
+
|
|
11
|
+
export namespace AIProvider {
|
|
12
|
+
// ---------------------------------------------------------------------------
|
|
13
|
+
// Bundled providers (4 core)
|
|
14
|
+
// ---------------------------------------------------------------------------
|
|
15
|
+
const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
|
|
16
|
+
"@ai-sdk/anthropic": createAnthropic as any,
|
|
17
|
+
"@ai-sdk/openai": createOpenAI as any,
|
|
18
|
+
"@ai-sdk/google": createGoogleGenerativeAI as any,
|
|
19
|
+
"@ai-sdk/openai-compatible": createOpenAICompatible as any,
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
// Provider env key mapping
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
const PROVIDER_ENV: Record<string, string[]> = {
|
|
26
|
+
anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
|
|
27
|
+
openai: ["OPENAI_API_KEY"],
|
|
28
|
+
google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
|
|
29
|
+
"openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// ---------------------------------------------------------------------------
|
|
33
|
+
// Provider base URL env mapping
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
|
|
36
|
+
anthropic: ["ANTHROPIC_BASE_URL"],
|
|
37
|
+
openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
|
|
38
|
+
google: ["GOOGLE_API_BASE_URL"],
|
|
39
|
+
"openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// ---------------------------------------------------------------------------
|
|
43
|
+
// Provider → npm package mapping
|
|
44
|
+
// ---------------------------------------------------------------------------
|
|
45
|
+
const PROVIDER_NPM: Record<string, string> = {
|
|
46
|
+
anthropic: "@ai-sdk/anthropic",
|
|
47
|
+
openai: "@ai-sdk/openai",
|
|
48
|
+
google: "@ai-sdk/google",
|
|
49
|
+
"openai-compatible": "@ai-sdk/openai-compatible",
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// ---------------------------------------------------------------------------
|
|
53
|
+
// Model info type
|
|
54
|
+
// ---------------------------------------------------------------------------
|
|
55
|
+
export interface ModelInfo {
|
|
56
|
+
id: string
|
|
57
|
+
providerID: string
|
|
58
|
+
name: string
|
|
59
|
+
contextWindow: number
|
|
60
|
+
outputTokens: number
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// ---------------------------------------------------------------------------
|
|
64
|
+
// Built-in model list
|
|
65
|
+
// ---------------------------------------------------------------------------
|
|
66
|
+
export const BUILTIN_MODELS: Record<string, ModelInfo> = {
|
|
67
|
+
"claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
|
|
68
|
+
"claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
|
|
69
|
+
"gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
|
|
70
|
+
"gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
|
|
71
|
+
"o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
|
|
72
|
+
"gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
|
|
73
|
+
"gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
|
|
77
|
+
|
|
78
|
+
// ---------------------------------------------------------------------------
|
|
79
|
+
// Get API key for a provider
|
|
80
|
+
// ---------------------------------------------------------------------------
|
|
81
|
+
export async function getApiKey(providerID: string): Promise<string | undefined> {
|
|
82
|
+
const envKeys = PROVIDER_ENV[providerID] || []
|
|
83
|
+
for (const key of envKeys) {
|
|
84
|
+
if (process.env[key]) return process.env[key]
|
|
85
|
+
}
|
|
86
|
+
const cfg = await Config.load()
|
|
87
|
+
return cfg.providers?.[providerID]?.api_key
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// ---------------------------------------------------------------------------
|
|
91
|
+
// Get base URL for a provider
|
|
92
|
+
// ---------------------------------------------------------------------------
|
|
93
|
+
export async function getBaseUrl(providerID: string): Promise<string | undefined> {
|
|
94
|
+
const envKeys = PROVIDER_BASE_URL_ENV[providerID] || []
|
|
95
|
+
for (const key of envKeys) {
|
|
96
|
+
if (process.env[key]) return process.env[key]
|
|
97
|
+
}
|
|
98
|
+
const cfg = await Config.load()
|
|
99
|
+
return cfg.providers?.[providerID]?.base_url
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// ---------------------------------------------------------------------------
|
|
103
|
+
// List all available providers
|
|
104
|
+
// ---------------------------------------------------------------------------
|
|
105
|
+
export async function listProviders(): Promise<Record<string, { name: string; models: string[]; hasKey: boolean }>> {
|
|
106
|
+
const result: Record<string, { name: string; models: string[]; hasKey: boolean }> = {}
|
|
107
|
+
for (const model of Object.values(BUILTIN_MODELS)) {
|
|
108
|
+
if (!result[model.providerID]) {
|
|
109
|
+
const key = await getApiKey(model.providerID)
|
|
110
|
+
result[model.providerID] = { name: model.providerID, models: [], hasKey: !!key }
|
|
111
|
+
}
|
|
112
|
+
if (!result[model.providerID]!.models.includes(model.id)) {
|
|
113
|
+
result[model.providerID]!.models.push(model.id)
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
const compatKey = await getApiKey("openai-compatible")
|
|
117
|
+
if (compatKey) {
|
|
118
|
+
result["openai-compatible"] = { name: "OpenAI Compatible", models: [], hasKey: true }
|
|
119
|
+
}
|
|
120
|
+
return result
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// ---------------------------------------------------------------------------
|
|
124
|
+
// Get a LanguageModel instance
|
|
125
|
+
// ---------------------------------------------------------------------------
|
|
126
|
+
const sdkCache = new Map<string, SDK>()
|
|
127
|
+
|
|
128
|
+
export async function getModel(modelID?: string): Promise<LanguageModel> {
|
|
129
|
+
const id = modelID || (await getConfiguredModel()) || DEFAULT_MODEL
|
|
130
|
+
|
|
131
|
+
const builtin = BUILTIN_MODELS[id]
|
|
132
|
+
if (builtin) {
|
|
133
|
+
const apiKey = await getApiKey(builtin.providerID)
|
|
134
|
+
if (!apiKey) throw noKeyError(builtin.providerID)
|
|
135
|
+
const base = await getBaseUrl(builtin.providerID)
|
|
136
|
+
return getLanguageModel(builtin.providerID, id, apiKey, undefined, base)
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
if (id.includes("/")) {
|
|
140
|
+
const [providerID, ...rest] = id.split("/")
|
|
141
|
+
const mid = rest.join("/")
|
|
142
|
+
const apiKey = await getApiKey(providerID!)
|
|
143
|
+
if (!apiKey) throw noKeyError(providerID!)
|
|
144
|
+
const base = await getBaseUrl(providerID!)
|
|
145
|
+
return getLanguageModel(providerID!, mid, apiKey, undefined, base)
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const cfg = await Config.load()
|
|
149
|
+
if (cfg.providers) {
|
|
150
|
+
for (const [providerID, p] of Object.entries(cfg.providers)) {
|
|
151
|
+
if (!p.api_key) continue
|
|
152
|
+
const base = p.base_url || (await getBaseUrl(providerID))
|
|
153
|
+
if (base) {
|
|
154
|
+
log.info("fallback: sending unknown model to provider with base_url", { provider: providerID, model: id })
|
|
155
|
+
return getLanguageModel(providerID, id, p.api_key, undefined, base)
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
throw new Error(`Unknown model: ${id}. Run: codeblog config --list`)
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
function getLanguageModel(providerID: string, modelID: string, apiKey: string, npm?: string, baseURL?: string): LanguageModel {
|
|
164
|
+
const pkg = npm || PROVIDER_NPM[providerID] || "@ai-sdk/openai-compatible"
|
|
165
|
+
const cacheKey = `${providerID}:${pkg}:${apiKey.slice(0, 8)}`
|
|
166
|
+
|
|
167
|
+
log.info("loading model", { provider: providerID, model: modelID, pkg })
|
|
168
|
+
|
|
169
|
+
let sdk = sdkCache.get(cacheKey)
|
|
170
|
+
if (!sdk) {
|
|
171
|
+
const createFn = BUNDLED_PROVIDERS[pkg]
|
|
172
|
+
if (!createFn) throw new Error(`No bundled provider for ${pkg}. Use openai-compatible with a base URL instead.`)
|
|
173
|
+
const opts: Record<string, unknown> = { apiKey, name: providerID }
|
|
174
|
+
if (baseURL) {
|
|
175
|
+
const clean = baseURL.replace(/\/+$/, "")
|
|
176
|
+
opts.baseURL = clean.endsWith("/v1") ? clean : `${clean}/v1`
|
|
177
|
+
}
|
|
178
|
+
sdk = createFn(opts)
|
|
179
|
+
sdkCache.set(cacheKey, sdk)
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (pkg === "@ai-sdk/openai-compatible" && typeof (sdk as any).chatModel === "function") {
|
|
183
|
+
return (sdk as any).chatModel(modelID)
|
|
184
|
+
}
|
|
185
|
+
if (typeof (sdk as any).languageModel === "function") {
|
|
186
|
+
return (sdk as any).languageModel(modelID)
|
|
187
|
+
}
|
|
188
|
+
return (sdk as any)(modelID)
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
function noKeyError(providerID: string): Error {
|
|
192
|
+
const envKeys = PROVIDER_ENV[providerID] || []
|
|
193
|
+
const envHint = envKeys[0] || `${providerID.toUpperCase().replace(/-/g, "_")}_API_KEY`
|
|
194
|
+
return new Error(`No API key for ${providerID}. Set ${envHint} or run: codeblog config --provider ${providerID} --api-key <key>`)
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
async function getConfiguredModel(): Promise<string | undefined> {
|
|
198
|
+
const cfg = await Config.load()
|
|
199
|
+
return cfg.model
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// ---------------------------------------------------------------------------
|
|
203
|
+
// Check if any AI provider has a key configured
|
|
204
|
+
// ---------------------------------------------------------------------------
|
|
205
|
+
export async function hasAnyKey(): Promise<boolean> {
|
|
206
|
+
for (const providerID of Object.keys(PROVIDER_ENV)) {
|
|
207
|
+
const key = await getApiKey(providerID)
|
|
208
|
+
if (key) return true
|
|
209
|
+
}
|
|
210
|
+
const cfg = await Config.load()
|
|
211
|
+
if (cfg.providers) {
|
|
212
|
+
for (const p of Object.values(cfg.providers)) {
|
|
213
|
+
if (p.api_key) return true
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
return false
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// ---------------------------------------------------------------------------
|
|
220
|
+
// List available models with key status
|
|
221
|
+
// ---------------------------------------------------------------------------
|
|
222
|
+
export async function available(): Promise<Array<{ model: ModelInfo; hasKey: boolean }>> {
|
|
223
|
+
const result: Array<{ model: ModelInfo; hasKey: boolean }> = []
|
|
224
|
+
for (const model of Object.values(BUILTIN_MODELS)) {
|
|
225
|
+
const apiKey = await getApiKey(model.providerID)
|
|
226
|
+
result.push({ model, hasKey: !!apiKey })
|
|
227
|
+
}
|
|
228
|
+
return result
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// ---------------------------------------------------------------------------
|
|
232
|
+
// Parse provider/model format
|
|
233
|
+
// ---------------------------------------------------------------------------
|
|
234
|
+
export function parseModel(model: string) {
|
|
235
|
+
const [providerID, ...rest] = model.split("/")
|
|
236
|
+
return { providerID, modelID: rest.join("/") }
|
|
237
|
+
}
|
|
238
|
+
}
|