codeblog-app 2.2.6 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/package.json +8 -71
  2. package/drizzle/0000_init.sql +0 -34
  3. package/drizzle/meta/_journal.json +0 -13
  4. package/drizzle.config.ts +0 -10
  5. package/src/ai/__tests__/chat.test.ts +0 -179
  6. package/src/ai/__tests__/provider.test.ts +0 -198
  7. package/src/ai/__tests__/tools.test.ts +0 -93
  8. package/src/ai/chat.ts +0 -224
  9. package/src/ai/configure.ts +0 -134
  10. package/src/ai/provider.ts +0 -302
  11. package/src/ai/tools.ts +0 -114
  12. package/src/auth/index.ts +0 -47
  13. package/src/auth/oauth.ts +0 -108
  14. package/src/cli/__tests__/commands.test.ts +0 -225
  15. package/src/cli/cmd/agent.ts +0 -97
  16. package/src/cli/cmd/chat.ts +0 -190
  17. package/src/cli/cmd/comment.ts +0 -67
  18. package/src/cli/cmd/config.ts +0 -153
  19. package/src/cli/cmd/feed.ts +0 -53
  20. package/src/cli/cmd/forum.ts +0 -106
  21. package/src/cli/cmd/login.ts +0 -45
  22. package/src/cli/cmd/logout.ts +0 -12
  23. package/src/cli/cmd/me.ts +0 -188
  24. package/src/cli/cmd/post.ts +0 -25
  25. package/src/cli/cmd/publish.ts +0 -64
  26. package/src/cli/cmd/scan.ts +0 -78
  27. package/src/cli/cmd/search.ts +0 -35
  28. package/src/cli/cmd/setup.ts +0 -352
  29. package/src/cli/cmd/tui.ts +0 -20
  30. package/src/cli/cmd/uninstall.ts +0 -281
  31. package/src/cli/cmd/update.ts +0 -123
  32. package/src/cli/cmd/vote.ts +0 -50
  33. package/src/cli/cmd/whoami.ts +0 -18
  34. package/src/cli/mcp-print.ts +0 -6
  35. package/src/cli/ui.ts +0 -250
  36. package/src/config/index.ts +0 -55
  37. package/src/flag/index.ts +0 -23
  38. package/src/global/index.ts +0 -38
  39. package/src/id/index.ts +0 -20
  40. package/src/index.ts +0 -200
  41. package/src/mcp/__tests__/client.test.ts +0 -149
  42. package/src/mcp/__tests__/e2e.ts +0 -327
  43. package/src/mcp/__tests__/integration.ts +0 -148
  44. package/src/mcp/client.ts +0 -148
  45. package/src/server/index.ts +0 -48
  46. package/src/storage/chat.ts +0 -71
  47. package/src/storage/db.ts +0 -85
  48. package/src/storage/schema.sql.ts +0 -39
  49. package/src/storage/schema.ts +0 -1
  50. package/src/tui/app.tsx +0 -184
  51. package/src/tui/commands.ts +0 -186
  52. package/src/tui/context/exit.tsx +0 -15
  53. package/src/tui/context/helper.tsx +0 -25
  54. package/src/tui/context/route.tsx +0 -24
  55. package/src/tui/context/theme.tsx +0 -470
  56. package/src/tui/routes/home.tsx +0 -660
  57. package/src/tui/routes/model.tsx +0 -210
  58. package/src/tui/routes/notifications.tsx +0 -87
  59. package/src/tui/routes/post.tsx +0 -102
  60. package/src/tui/routes/search.tsx +0 -105
  61. package/src/tui/routes/setup.tsx +0 -255
  62. package/src/tui/routes/trending.tsx +0 -107
  63. package/src/util/__tests__/context.test.ts +0 -31
  64. package/src/util/__tests__/lazy.test.ts +0 -37
  65. package/src/util/context.ts +0 -23
  66. package/src/util/error.ts +0 -46
  67. package/src/util/lazy.ts +0 -18
  68. package/src/util/log.ts +0 -142
  69. package/tsconfig.json +0 -11
package/src/ai/chat.ts DELETED
@@ -1,224 +0,0 @@
1
- import { streamText, stepCountIs } from "ai"
2
- import { AIProvider } from "./provider"
3
- import { getChatTools } from "./tools"
4
- import { Log } from "../util/log"
5
-
6
- const log = Log.create({ service: "ai-chat" })
7
-
8
- const SYSTEM_PROMPT = `You are CodeBlog AI — an assistant for the CodeBlog developer forum (codeblog.ai).
9
-
10
- You help developers with everything on the platform:
11
- - Scan and analyze their local IDE coding sessions
12
- - Write and publish blog posts from coding sessions
13
- - Browse, search, read, comment, vote on forum posts
14
- - Manage bookmarks, notifications, debates, tags, trending topics
15
- - Manage agents, view dashboard, follow users
16
- - Generate weekly digests
17
-
18
- You have 20+ tools. Use them whenever the user's request matches. Chain multiple tools if needed.
19
- After a tool returns results, summarize them naturally for the user.
20
-
21
- CRITICAL: When using tools, ALWAYS use the EXACT data returned by previous tool calls.
22
- - If scan_sessions returns a path like "/Users/zhaoyifei/...", use that EXACT path
23
- - NEVER modify, guess, or infer file paths — use them exactly as returned
24
- - If a tool call fails with "file not found", the path is wrong — check the scan results again
25
-
26
- Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
27
- Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
28
-
29
- const IDLE_TIMEOUT_MS = 15_000 // 15s without any stream event → abort
30
- const DEFAULT_MAX_STEPS = 10 // Allow AI to retry tools up to 10 steps (each tool call + result = 1 step)
31
-
32
- export namespace AIChat {
33
- export interface Message {
34
- role: "user" | "assistant" | "system"
35
- content: string
36
- }
37
-
38
- export interface StreamCallbacks {
39
- onToken?: (token: string) => void
40
- onFinish?: (text: string) => void
41
- onError?: (error: Error) => void
42
- onToolCall?: (name: string, args: unknown) => void
43
- onToolResult?: (name: string, result: unknown) => void
44
- }
45
-
46
- export interface StreamOptions {
47
- maxSteps?: number
48
- }
49
-
50
- export async function stream(
51
- messages: Message[],
52
- callbacks: StreamCallbacks,
53
- modelID?: string,
54
- signal?: AbortSignal,
55
- options?: StreamOptions
56
- ) {
57
- const model = await AIProvider.getModel(modelID)
58
- const tools = await getChatTools()
59
- const maxSteps = options?.maxSteps ?? DEFAULT_MAX_STEPS
60
- log.info("streaming", { model: modelID || AIProvider.DEFAULT_MODEL, messages: messages.length, toolCount: Object.keys(tools).length, maxSteps })
61
-
62
- const history = messages
63
- .filter((m) => m.role === "user" || m.role === "assistant")
64
- .map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
65
- let full = ""
66
-
67
- // Create an internal AbortController that we can trigger on idle timeout
68
- const internalAbort = new AbortController()
69
- const onExternalAbort = () => {
70
- log.info("external abort signal received")
71
- internalAbort.abort()
72
- }
73
- signal?.addEventListener("abort", onExternalAbort)
74
-
75
- const result = streamText({
76
- model,
77
- system: SYSTEM_PROMPT,
78
- messages: history,
79
- tools,
80
- stopWhen: stepCountIs(maxSteps),
81
- toolChoice: "auto",
82
- abortSignal: internalAbort.signal,
83
- experimental_toolCallStreaming: false, // Disable streaming tool calls to avoid incomplete arguments bug
84
- onStepFinish: (stepResult) => {
85
- log.info("onStepFinish", {
86
- stepNumber: stepResult.stepNumber,
87
- finishReason: stepResult.finishReason,
88
- textLength: stepResult.text?.length ?? 0,
89
- toolCallsCount: stepResult.toolCalls?.length ?? 0,
90
- toolResultsCount: stepResult.toolResults?.length ?? 0,
91
- })
92
- },
93
- })
94
-
95
- let partCount = 0
96
- let toolExecuting = false
97
- try {
98
- // Idle timeout: if no stream events arrive for IDLE_TIMEOUT_MS, abort.
99
- // Paused during tool execution (tools can take longer than 15s).
100
- let idleTimer: ReturnType<typeof setTimeout> | undefined
101
- const resetIdle = () => {
102
- if (idleTimer) clearTimeout(idleTimer)
103
- if (toolExecuting) return // Don't start timer while tool is running
104
- idleTimer = setTimeout(() => {
105
- log.info("IDLE TIMEOUT FIRED", { partCount, fullLength: full.length })
106
- internalAbort.abort()
107
- }, IDLE_TIMEOUT_MS)
108
- }
109
- resetIdle()
110
-
111
- for await (const part of result.fullStream) {
112
- partCount++
113
- if (internalAbort.signal.aborted) {
114
- log.info("abort detected in loop, breaking", { partCount })
115
- break
116
- }
117
- resetIdle()
118
-
119
- switch (part.type) {
120
- case "text-delta": {
121
- const delta = (part as any).text ?? (part as any).textDelta ?? ""
122
- if (delta) { full += delta; callbacks.onToken?.(delta) }
123
- break
124
- }
125
- case "tool-call": {
126
- const toolName = (part as any).toolName
127
- const toolArgs = (part as any).args ?? (part as any).input ?? {}
128
- log.info("tool-call", { toolName, args: toolArgs, partCount })
129
- // Pause idle timer — tool execution happens between tool-call and tool-result
130
- toolExecuting = true
131
- if (idleTimer) { clearTimeout(idleTimer); idleTimer = undefined }
132
- callbacks.onToolCall?.(toolName, toolArgs)
133
- break
134
- }
135
- case "tool-result": {
136
- log.info("tool-result", { toolName: (part as any).toolName, partCount })
137
- toolExecuting = false
138
- callbacks.onToolResult?.((part as any).toolName, (part as any).output ?? (part as any).result ?? {})
139
- break
140
- }
141
- case "tool-error" as any: {
142
- const errorMsg = String((part as any).error).slice(0, 500)
143
- log.error("tool-error", { toolName: (part as any).toolName, error: errorMsg })
144
- toolExecuting = false
145
- // Abort the stream on tool error to prevent infinite retry loops
146
- log.info("aborting stream due to tool error")
147
- internalAbort.abort()
148
- break
149
- }
150
- case "error": {
151
- const msg = (part as any).error instanceof Error ? (part as any).error.message : String((part as any).error)
152
- log.error("stream part error", { error: msg })
153
- callbacks.onError?.((part as any).error instanceof Error ? (part as any).error : new Error(msg))
154
- break
155
- }
156
- default:
157
- break
158
- }
159
- }
160
-
161
- if (idleTimer) clearTimeout(idleTimer)
162
- log.info("for-await loop exited normally", { partCount, fullLength: full.length })
163
- } catch (err) {
164
- const error = err instanceof Error ? err : new Error(String(err))
165
- log.info("catch block entered", { name: error.name, message: error.message.slice(0, 200), partCount })
166
- // Don't treat abort as a real error
167
- if (error.name !== "AbortError") {
168
- log.error("stream error (non-abort)", { error: error.message })
169
- if (callbacks.onError) callbacks.onError(error)
170
- else throw error
171
- } else {
172
- log.info("AbortError caught — treating as normal completion")
173
- }
174
- // On abort or error, still call onFinish so UI cleans up
175
- log.info("calling onFinish from catch", { fullLength: full.length })
176
- callbacks.onFinish?.(full || "(No response)")
177
- return full
178
- } finally {
179
- log.info("finally block", { partCount, fullLength: full.length })
180
- signal?.removeEventListener("abort", onExternalAbort)
181
- }
182
-
183
- log.info("calling onFinish from normal path", { fullLength: full.length })
184
- callbacks.onFinish?.(full || "(No response)")
185
- return full
186
- }
187
-
188
- export async function generate(prompt: string, modelID?: string) {
189
- let result = ""
190
- await stream([{ role: "user", content: prompt }], { onFinish: (text) => (result = text) }, modelID)
191
- return result
192
- }
193
-
194
- export async function analyzeAndPost(sessionContent: string, modelID?: string) {
195
- const prompt = `Analyze this coding session and write a blog post about it.
196
-
197
- The post should:
198
- - Have a catchy, dev-friendly title (like HN or Juejin)
199
- - Tell a story: what you were doing, what went wrong/right, what you learned
200
- - Include relevant code snippets
201
- - Be casual and genuine, written in first person
202
- - End with key takeaways
203
-
204
- Also provide:
205
- - 3-8 relevant tags (lowercase, hyphenated)
206
- - A one-line summary/hook
207
-
208
- Session content:
209
- ${sessionContent.slice(0, 50000)}
210
-
211
- Respond in this exact JSON format:
212
- {
213
- "title": "...",
214
- "content": "... (markdown)",
215
- "tags": ["tag1", "tag2"],
216
- "summary": "..."
217
- }`
218
-
219
- const raw = await generate(prompt, modelID)
220
- const jsonMatch = raw.match(/\{[\s\S]*\}/)
221
- if (!jsonMatch) throw new Error("AI did not return valid JSON")
222
- return JSON.parse(jsonMatch[0])
223
- }
224
- }
@@ -1,134 +0,0 @@
1
- // AI provider auto-detection and configuration
2
-
3
- function looksLikeApi(r: Response) {
4
- const ct = r.headers.get("content-type") || ""
5
- return ct.includes("json") || ct.includes("text/plain")
6
- }
7
-
8
- export async function probe(base: string, key: string): Promise<"openai" | "anthropic" | null> {
9
- const clean = base.replace(/\/+$/, "")
10
- try {
11
- const r = await fetch(`${clean}/v1/models`, {
12
- headers: { Authorization: `Bearer ${key}` },
13
- signal: AbortSignal.timeout(8000),
14
- })
15
- if (r.ok || ((r.status === 401 || r.status === 403) && looksLikeApi(r))) return "openai"
16
- } catch {}
17
- try {
18
- const r = await fetch(`${clean}/v1/messages`, {
19
- method: "POST",
20
- headers: { "x-api-key": key, "anthropic-version": "2023-06-01", "content-type": "application/json" },
21
- body: JSON.stringify({ model: "test", max_tokens: 1, messages: [] }),
22
- signal: AbortSignal.timeout(8000),
23
- })
24
- if (r.status !== 404 && looksLikeApi(r)) return "anthropic"
25
- } catch {}
26
- return null
27
- }
28
-
29
- const KEY_PREFIX_MAP: Record<string, string> = {
30
- "sk-ant-": "anthropic",
31
- "AIza": "google",
32
- "xai-": "xai",
33
- "gsk_": "groq",
34
- "sk-or-": "openrouter",
35
- "pplx-": "perplexity",
36
- }
37
-
38
- const ENV_MAP: Record<string, string> = {
39
- anthropic: "ANTHROPIC_API_KEY",
40
- openai: "OPENAI_API_KEY",
41
- google: "GOOGLE_GENERATIVE_AI_API_KEY",
42
- xai: "XAI_API_KEY",
43
- groq: "GROQ_API_KEY",
44
- openrouter: "OPENROUTER_API_KEY",
45
- perplexity: "PERPLEXITY_API_KEY",
46
- "openai-compatible": "OPENAI_COMPATIBLE_API_KEY",
47
- }
48
-
49
- async function fetchFirstModel(base: string, key: string): Promise<string | null> {
50
- try {
51
- const clean = base.replace(/\/+$/, "")
52
- const r = await fetch(`${clean}/v1/models`, {
53
- headers: { Authorization: `Bearer ${key}` },
54
- signal: AbortSignal.timeout(8000),
55
- })
56
- if (!r.ok) return null
57
- const data = await r.json() as { data?: Array<{ id: string }> }
58
- if (!data.data || data.data.length === 0) return null
59
-
60
- // Prefer capable models: claude-sonnet > gpt-4o > claude-opus > first available
61
- const ids = data.data.map((m) => m.id)
62
- const preferred = [/^claude-sonnet-4/, /^gpt-4o$/, /^claude-opus-4/, /^gpt-4o-mini$/, /^gemini-2\.5-flash$/]
63
- for (const pattern of preferred) {
64
- const match = ids.find((id) => pattern.test(id))
65
- if (match) return match
66
- }
67
- return ids[0] ?? null
68
- } catch {}
69
- return null
70
- }
71
-
72
- export function detectProvider(key: string) {
73
- for (const [prefix, provider] of Object.entries(KEY_PREFIX_MAP)) {
74
- if (key.startsWith(prefix)) return provider
75
- }
76
- return "openai"
77
- }
78
-
79
- export async function saveProvider(url: string, key: string): Promise<{ provider: string; error?: string }> {
80
- const { Config } = await import("../config")
81
-
82
- if (url) {
83
- const detected = await probe(url, key)
84
- if (!detected) return { provider: "", error: "Could not connect. Check URL and key." }
85
-
86
- const provider = detected === "anthropic" ? "anthropic" : "openai-compatible"
87
- const envKey = detected === "anthropic" ? "ANTHROPIC_API_KEY" : "OPENAI_COMPATIBLE_API_KEY"
88
- const envBase = detected === "anthropic" ? "ANTHROPIC_BASE_URL" : "OPENAI_COMPATIBLE_BASE_URL"
89
- process.env[envKey] = key
90
- process.env[envBase] = url
91
-
92
- const cfg = await Config.load()
93
- const providers = cfg.providers || {}
94
- providers[provider] = { api_key: key, base_url: url }
95
-
96
- // Auto-set model if not already configured
97
- const update: Record<string, unknown> = { providers }
98
- if (!cfg.model) {
99
- if (detected === "anthropic") {
100
- update.model = "claude-sonnet-4-20250514"
101
- } else {
102
- // For openai-compatible with custom URL, try to fetch available models
103
- const model = await fetchFirstModel(url, key)
104
- if (model) update.model = `openai-compatible/${model}`
105
- }
106
- }
107
-
108
- await Config.save(update)
109
- return { provider: `${detected} format` }
110
- }
111
-
112
- const provider = detectProvider(key)
113
- if (ENV_MAP[provider]) process.env[ENV_MAP[provider]] = key
114
-
115
- const cfg = await Config.load()
116
- const providers = cfg.providers || {}
117
- providers[provider] = { api_key: key }
118
-
119
- // Auto-set model for known providers
120
- const update: Record<string, unknown> = { providers }
121
- if (!cfg.model) {
122
- const { AIProvider } = await import("./provider")
123
- const models = Object.values(AIProvider.BUILTIN_MODELS).filter((m) => m.providerID === provider)
124
- if (models.length > 0) update.model = models[0]!.id
125
- }
126
-
127
- await Config.save(update)
128
- return { provider }
129
- }
130
-
131
- export function mask(s: string) {
132
- if (s.length <= 8) return s
133
- return s.slice(0, 4) + "\u2022".repeat(Math.min(s.length - 8, 20)) + s.slice(-4)
134
- }
@@ -1,302 +0,0 @@
1
- import { createAnthropic } from "@ai-sdk/anthropic"
2
- import { createOpenAI } from "@ai-sdk/openai"
3
- import { createGoogleGenerativeAI } from "@ai-sdk/google"
4
- import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
5
- import { type LanguageModel, type Provider as SDK } from "ai"
6
- import { Config } from "../config"
7
- import { Log } from "../util/log"
8
-
9
- const log = Log.create({ service: "ai-provider" })
10
-
11
- export namespace AIProvider {
12
- // ---------------------------------------------------------------------------
13
- // Bundled providers (4 core)
14
- // ---------------------------------------------------------------------------
15
- const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
16
- "@ai-sdk/anthropic": createAnthropic as any,
17
- "@ai-sdk/openai": createOpenAI as any,
18
- "@ai-sdk/google": createGoogleGenerativeAI as any,
19
- "@ai-sdk/openai-compatible": createOpenAICompatible as any,
20
- }
21
-
22
- // ---------------------------------------------------------------------------
23
- // Provider env key mapping
24
- // ---------------------------------------------------------------------------
25
- const PROVIDER_ENV: Record<string, string[]> = {
26
- anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
27
- openai: ["OPENAI_API_KEY"],
28
- google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
29
- "openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
30
- }
31
-
32
- // ---------------------------------------------------------------------------
33
- // Provider base URL env mapping
34
- // ---------------------------------------------------------------------------
35
- const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
36
- anthropic: ["ANTHROPIC_BASE_URL"],
37
- openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
38
- google: ["GOOGLE_API_BASE_URL"],
39
- "openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
40
- }
41
-
42
- // ---------------------------------------------------------------------------
43
- // Provider → npm package mapping
44
- // ---------------------------------------------------------------------------
45
- const PROVIDER_NPM: Record<string, string> = {
46
- anthropic: "@ai-sdk/anthropic",
47
- openai: "@ai-sdk/openai",
48
- google: "@ai-sdk/google",
49
- "openai-compatible": "@ai-sdk/openai-compatible",
50
- }
51
-
52
- // ---------------------------------------------------------------------------
53
- // Model info type
54
- // ---------------------------------------------------------------------------
55
- export interface ModelInfo {
56
- id: string
57
- providerID: string
58
- name: string
59
- contextWindow: number
60
- outputTokens: number
61
- }
62
-
63
- // ---------------------------------------------------------------------------
64
- // Built-in model list
65
- // ---------------------------------------------------------------------------
66
- export const BUILTIN_MODELS: Record<string, ModelInfo> = {
67
- "claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
68
- "claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
69
- "gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
70
- "gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
71
- "o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
72
- "gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
73
- "gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
74
- }
75
-
76
- export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
77
-
78
- // ---------------------------------------------------------------------------
79
- // Get API key for a provider
80
- // ---------------------------------------------------------------------------
81
- export async function getApiKey(providerID: string): Promise<string | undefined> {
82
- const envKeys = PROVIDER_ENV[providerID] || []
83
- for (const key of envKeys) {
84
- if (process.env[key]) return process.env[key]
85
- }
86
- const cfg = await Config.load()
87
- return cfg.providers?.[providerID]?.api_key
88
- }
89
-
90
- // ---------------------------------------------------------------------------
91
- // Get base URL for a provider
92
- // ---------------------------------------------------------------------------
93
- export async function getBaseUrl(providerID: string): Promise<string | undefined> {
94
- const envKeys = PROVIDER_BASE_URL_ENV[providerID] || []
95
- for (const key of envKeys) {
96
- if (process.env[key]) return process.env[key]
97
- }
98
- const cfg = await Config.load()
99
- return cfg.providers?.[providerID]?.base_url
100
- }
101
-
102
- // ---------------------------------------------------------------------------
103
- // List all available providers
104
- // ---------------------------------------------------------------------------
105
- export async function listProviders(): Promise<Record<string, { name: string; models: string[]; hasKey: boolean }>> {
106
- const result: Record<string, { name: string; models: string[]; hasKey: boolean }> = {}
107
- for (const model of Object.values(BUILTIN_MODELS)) {
108
- if (!result[model.providerID]) {
109
- const key = await getApiKey(model.providerID)
110
- result[model.providerID] = { name: model.providerID, models: [], hasKey: !!key }
111
- }
112
- if (!result[model.providerID]!.models.includes(model.id)) {
113
- result[model.providerID]!.models.push(model.id)
114
- }
115
- }
116
- const compatKey = await getApiKey("openai-compatible")
117
- if (compatKey) {
118
- const compatBase = await getBaseUrl("openai-compatible")
119
- const remoteModels = compatBase ? await fetchRemoteModels(compatBase, compatKey) : []
120
- result["openai-compatible"] = { name: "OpenAI Compatible", models: remoteModels, hasKey: true }
121
- }
122
- return result
123
- }
124
-
125
- // ---------------------------------------------------------------------------
126
- // Get a LanguageModel instance
127
- // ---------------------------------------------------------------------------
128
- const sdkCache = new Map<string, SDK>()
129
-
130
- export async function getModel(modelID?: string): Promise<LanguageModel> {
131
- const id = modelID || (await getConfiguredModel()) || DEFAULT_MODEL
132
-
133
- const builtin = BUILTIN_MODELS[id]
134
- if (builtin) {
135
- const apiKey = await getApiKey(builtin.providerID)
136
- if (!apiKey) throw noKeyError(builtin.providerID)
137
- const base = await getBaseUrl(builtin.providerID)
138
- return getLanguageModel(builtin.providerID, id, apiKey, undefined, base)
139
- }
140
-
141
- if (id.includes("/")) {
142
- const [providerID, ...rest] = id.split("/")
143
- const mid = rest.join("/")
144
- const apiKey = await getApiKey(providerID!)
145
- if (!apiKey) throw noKeyError(providerID!)
146
- const base = await getBaseUrl(providerID!)
147
- return getLanguageModel(providerID!, mid, apiKey, undefined, base)
148
- }
149
-
150
- const cfg = await Config.load()
151
- if (cfg.providers) {
152
- for (const [providerID, p] of Object.entries(cfg.providers)) {
153
- if (!p.api_key) continue
154
- const base = p.base_url || (await getBaseUrl(providerID))
155
- if (base) {
156
- log.info("fallback: sending unknown model to provider with base_url", { provider: providerID, model: id })
157
- return getLanguageModel(providerID, id, p.api_key, undefined, base)
158
- }
159
- }
160
- }
161
-
162
- throw new Error(`Unknown model: ${id}. Run: codeblog config --list`)
163
- }
164
-
165
- function getLanguageModel(providerID: string, modelID: string, apiKey: string, npm?: string, baseURL?: string): LanguageModel {
166
- // Auto-detect Anthropic models and use @ai-sdk/anthropic instead of openai-compatible
167
- // This fixes streaming tool call argument parsing issues with openai-compatible provider
168
- let pkg = npm || PROVIDER_NPM[providerID]
169
-
170
- // Force Anthropic SDK for Claude models, even if provider is openai-compatible
171
- if (modelID.startsWith("claude-") && pkg === "@ai-sdk/openai-compatible") {
172
- pkg = "@ai-sdk/anthropic"
173
- log.info("auto-detected Claude model, switching from openai-compatible to @ai-sdk/anthropic", { model: modelID })
174
- }
175
-
176
- if (!pkg) {
177
- pkg = "@ai-sdk/openai-compatible"
178
- }
179
-
180
- const cacheKey = `${providerID}:${pkg}:${apiKey.slice(0, 8)}`
181
-
182
- log.info("loading model", { provider: providerID, model: modelID, pkg })
183
-
184
- let sdk = sdkCache.get(cacheKey)
185
- if (!sdk) {
186
- const createFn = BUNDLED_PROVIDERS[pkg]
187
- if (!createFn) throw new Error(`No bundled provider for ${pkg}. Use openai-compatible with a base URL instead.`)
188
- const opts: Record<string, unknown> = { apiKey, name: providerID }
189
- if (baseURL) {
190
- const clean = baseURL.replace(/\/+$/, "")
191
- opts.baseURL = clean.endsWith("/v1") ? clean : `${clean}/v1`
192
- }
193
- // For openai-compatible providers, normalize request body for broader compatibility
194
- if (pkg === "@ai-sdk/openai-compatible") {
195
- opts.transformRequestBody = (body: Record<string, any>) => {
196
- // Remove parallel_tool_calls — many proxies/providers don't support it
197
- delete body.parallel_tool_calls
198
-
199
- // Ensure all tool schemas have type: "object" (required by DeepSeek/Qwen/etc.)
200
- if (Array.isArray(body.tools)) {
201
- for (const t of body.tools) {
202
- const params = t?.function?.parameters
203
- if (params && !params.type) {
204
- params.type = "object"
205
- if (!params.properties) params.properties = {}
206
- }
207
- }
208
- }
209
-
210
- return body
211
- }
212
- }
213
- sdk = createFn(opts)
214
- sdkCache.set(cacheKey, sdk)
215
- }
216
-
217
- if (pkg === "@ai-sdk/openai-compatible" && typeof (sdk as any).chatModel === "function") {
218
- return (sdk as any).chatModel(modelID)
219
- }
220
- if (typeof (sdk as any).languageModel === "function") {
221
- return (sdk as any).languageModel(modelID)
222
- }
223
- return (sdk as any)(modelID)
224
- }
225
-
226
- async function fetchRemoteModels(base: string, key: string): Promise<string[]> {
227
- try {
228
- const clean = base.replace(/\/+$/, "")
229
- const url = clean.endsWith("/v1") ? `${clean}/models` : `${clean}/v1/models`
230
- const r = await fetch(url, {
231
- headers: { Authorization: `Bearer ${key}` },
232
- signal: AbortSignal.timeout(8000),
233
- })
234
- if (!r.ok) return []
235
- const data = await r.json() as { data?: Array<{ id: string }> }
236
- return data.data?.map((m) => m.id) ?? []
237
- } catch {
238
- return []
239
- }
240
- }
241
-
242
- function noKeyError(providerID: string): Error {
243
- const envKeys = PROVIDER_ENV[providerID] || []
244
- const envHint = envKeys[0] || `${providerID.toUpperCase().replace(/-/g, "_")}_API_KEY`
245
- return new Error(`No API key for ${providerID}. Set ${envHint} or run: codeblog config --provider ${providerID} --api-key <key>`)
246
- }
247
-
248
- async function getConfiguredModel(): Promise<string | undefined> {
249
- const cfg = await Config.load()
250
- return cfg.model
251
- }
252
-
253
- // ---------------------------------------------------------------------------
254
- // Check if any AI provider has a key configured
255
- // ---------------------------------------------------------------------------
256
- export async function hasAnyKey(): Promise<boolean> {
257
- for (const providerID of Object.keys(PROVIDER_ENV)) {
258
- const key = await getApiKey(providerID)
259
- if (key) return true
260
- }
261
- const cfg = await Config.load()
262
- if (cfg.providers) {
263
- for (const p of Object.values(cfg.providers)) {
264
- if (p.api_key) return true
265
- }
266
- }
267
- return false
268
- }
269
-
270
- // ---------------------------------------------------------------------------
271
- // List available models with key status
272
- // ---------------------------------------------------------------------------
273
- export async function available(): Promise<Array<{ model: ModelInfo; hasKey: boolean }>> {
274
- const result: Array<{ model: ModelInfo; hasKey: boolean }> = []
275
- for (const model of Object.values(BUILTIN_MODELS)) {
276
- const apiKey = await getApiKey(model.providerID)
277
- result.push({ model, hasKey: !!apiKey })
278
- }
279
- // Include remote models from openai-compatible provider
280
- const compatKey = await getApiKey("openai-compatible")
281
- const compatBase = await getBaseUrl("openai-compatible")
282
- if (compatKey && compatBase) {
283
- const remoteModels = await fetchRemoteModels(compatBase, compatKey)
284
- for (const id of remoteModels) {
285
- if (BUILTIN_MODELS[id]) continue
286
- result.push({
287
- model: { id, providerID: "openai-compatible", name: id, contextWindow: 0, outputTokens: 0 },
288
- hasKey: true,
289
- })
290
- }
291
- }
292
- return result
293
- }
294
-
295
- // ---------------------------------------------------------------------------
296
- // Parse provider/model format
297
- // ---------------------------------------------------------------------------
298
- export function parseModel(model: string) {
299
- const [providerID, ...rest] = model.split("/")
300
- return { providerID, modelID: rest.join("/") }
301
- }
302
- }