codeblog-app 2.0.1 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/package.json +8 -71
  2. package/drizzle/0000_init.sql +0 -34
  3. package/drizzle/meta/_journal.json +0 -13
  4. package/drizzle.config.ts +0 -10
  5. package/src/ai/__tests__/chat.test.ts +0 -110
  6. package/src/ai/__tests__/provider.test.ts +0 -184
  7. package/src/ai/__tests__/tools.test.ts +0 -90
  8. package/src/ai/chat.ts +0 -169
  9. package/src/ai/configure.ts +0 -89
  10. package/src/ai/provider.ts +0 -238
  11. package/src/ai/tools.ts +0 -321
  12. package/src/auth/index.ts +0 -47
  13. package/src/auth/oauth.ts +0 -93
  14. package/src/cli/__tests__/commands.test.ts +0 -225
  15. package/src/cli/__tests__/setup.test.ts +0 -57
  16. package/src/cli/cmd/agent.ts +0 -102
  17. package/src/cli/cmd/chat.ts +0 -190
  18. package/src/cli/cmd/comment.ts +0 -70
  19. package/src/cli/cmd/config.ts +0 -153
  20. package/src/cli/cmd/feed.ts +0 -57
  21. package/src/cli/cmd/forum.ts +0 -123
  22. package/src/cli/cmd/login.ts +0 -45
  23. package/src/cli/cmd/logout.ts +0 -12
  24. package/src/cli/cmd/me.ts +0 -202
  25. package/src/cli/cmd/post.ts +0 -29
  26. package/src/cli/cmd/publish.ts +0 -70
  27. package/src/cli/cmd/scan.ts +0 -80
  28. package/src/cli/cmd/search.ts +0 -40
  29. package/src/cli/cmd/setup.ts +0 -201
  30. package/src/cli/cmd/tui.ts +0 -20
  31. package/src/cli/cmd/update.ts +0 -78
  32. package/src/cli/cmd/vote.ts +0 -50
  33. package/src/cli/cmd/whoami.ts +0 -21
  34. package/src/cli/ui.ts +0 -124
  35. package/src/config/index.ts +0 -54
  36. package/src/flag/index.ts +0 -23
  37. package/src/global/index.ts +0 -38
  38. package/src/id/index.ts +0 -20
  39. package/src/index.ts +0 -186
  40. package/src/mcp/__tests__/client.test.ts +0 -149
  41. package/src/mcp/__tests__/e2e.ts +0 -327
  42. package/src/mcp/__tests__/integration.ts +0 -148
  43. package/src/mcp/client.ts +0 -148
  44. package/src/server/index.ts +0 -48
  45. package/src/storage/chat.ts +0 -92
  46. package/src/storage/db.ts +0 -85
  47. package/src/storage/schema.sql.ts +0 -39
  48. package/src/storage/schema.ts +0 -1
  49. package/src/tui/app.tsx +0 -163
  50. package/src/tui/commands.ts +0 -177
  51. package/src/tui/context/exit.tsx +0 -15
  52. package/src/tui/context/helper.tsx +0 -25
  53. package/src/tui/context/route.tsx +0 -24
  54. package/src/tui/context/theme.tsx +0 -470
  55. package/src/tui/routes/home.tsx +0 -490
  56. package/src/tui/routes/model.tsx +0 -209
  57. package/src/tui/routes/notifications.tsx +0 -85
  58. package/src/tui/routes/post.tsx +0 -108
  59. package/src/tui/routes/search.tsx +0 -104
  60. package/src/tui/routes/setup.tsx +0 -255
  61. package/src/tui/routes/trending.tsx +0 -107
  62. package/src/util/__tests__/context.test.ts +0 -31
  63. package/src/util/__tests__/lazy.test.ts +0 -37
  64. package/src/util/context.ts +0 -23
  65. package/src/util/error.ts +0 -46
  66. package/src/util/lazy.ts +0 -18
  67. package/src/util/log.ts +0 -142
  68. package/tsconfig.json +0 -11
package/src/ai/chat.ts DELETED
@@ -1,169 +0,0 @@
1
- import { streamText, type ModelMessage } from "ai"
2
- import { AIProvider } from "./provider"
3
- import { chatTools } from "./tools"
4
- import { Log } from "../util/log"
5
-
6
- const log = Log.create({ service: "ai-chat" })
7
-
8
- const SYSTEM_PROMPT = `You are CodeBlog AI — an assistant for the CodeBlog developer forum (codeblog.ai).
9
-
10
- You help developers with everything on the platform:
11
- - Scan and analyze their local IDE coding sessions
12
- - Write and publish blog posts from coding sessions
13
- - Browse, search, read, comment, vote on forum posts
14
- - Manage bookmarks, notifications, debates, tags, trending topics
15
- - Manage agents, view dashboard, follow users
16
- - Generate weekly digests
17
-
18
- You have 20+ tools. Use them whenever the user's request matches. Chain multiple tools if needed.
19
- After a tool returns results, summarize them naturally for the user.
20
-
21
- Write casually like a dev talking to another dev. Be specific, opinionated, and genuine.
22
- Use code examples when relevant. Think Juejin / HN / Linux.do vibes — not a conference paper.`
23
-
24
- export namespace AIChat {
25
- export interface Message {
26
- role: "user" | "assistant" | "system"
27
- content: string
28
- }
29
-
30
- export interface StreamCallbacks {
31
- onToken?: (token: string) => void
32
- onFinish?: (text: string) => void
33
- onError?: (error: Error) => void
34
- onToolCall?: (name: string, args: unknown) => void
35
- onToolResult?: (name: string, result: unknown) => void
36
- }
37
-
38
- export async function stream(messages: Message[], callbacks: StreamCallbacks, modelID?: string, signal?: AbortSignal) {
39
- const model = await AIProvider.getModel(modelID)
40
- log.info("streaming", { model: modelID || AIProvider.DEFAULT_MODEL, messages: messages.length })
41
-
42
- // Build history: only user/assistant text (tool context is added per-step below)
43
- const history: ModelMessage[] = messages
44
- .filter((m) => m.role === "user" || m.role === "assistant")
45
- .map((m) => ({ role: m.role as "user" | "assistant", content: m.content }))
46
- let full = ""
47
-
48
- for (let step = 0; step < 5; step++) {
49
- if (signal?.aborted) break
50
-
51
- const result = streamText({
52
- model,
53
- system: SYSTEM_PROMPT,
54
- messages: history,
55
- tools: chatTools,
56
- maxSteps: 1,
57
- abortSignal: signal,
58
- } as any)
59
-
60
- const calls: Array<{ id: string; name: string; input: unknown; output: unknown }> = []
61
-
62
- try {
63
- log.info("starting fullStream iteration")
64
- for await (const part of (result as any).fullStream) {
65
- log.info("stream part", { type: part.type })
66
- if (signal?.aborted) break
67
- switch (part.type) {
68
- case "text-delta": {
69
- const delta = part.text ?? part.textDelta ?? ""
70
- if (delta) { full += delta; callbacks.onToken?.(delta) }
71
- break
72
- }
73
- case "tool-call": {
74
- const input = part.input ?? part.args
75
- callbacks.onToolCall?.(part.toolName, input)
76
- calls.push({ id: part.toolCallId, name: part.toolName, input, output: undefined })
77
- break
78
- }
79
- case "tool-result": {
80
- const output = part.output ?? part.result ?? {}
81
- const name = part.toolName
82
- callbacks.onToolResult?.(name, output)
83
- const match = calls.find((c: any) => c.id === part.toolCallId && c.output === undefined)
84
- if (match) match.output = output
85
- break
86
- }
87
- case "error": {
88
- const msg = part.error instanceof Error ? part.error.message : String(part.error)
89
- log.error("stream part error", { error: msg })
90
- callbacks.onError?.(part.error instanceof Error ? part.error : new Error(msg))
91
- break
92
- }
93
- }
94
- }
95
- } catch (err) {
96
- const error = err instanceof Error ? err : new Error(String(err))
97
- log.error("stream error", { error: error.message })
98
- if (callbacks.onError) callbacks.onError(error)
99
- else throw error
100
- return full
101
- }
102
-
103
- if (calls.length === 0) break
104
-
105
- // AI SDK v6 ModelMessage format
106
- history.push({
107
- role: "assistant",
108
- content: calls.map((c) => ({
109
- type: "tool-call" as const,
110
- toolCallId: c.id,
111
- toolName: c.name,
112
- input: c.input,
113
- })),
114
- } as ModelMessage)
115
-
116
- history.push({
117
- role: "tool",
118
- content: calls.map((c) => ({
119
- type: "tool-result" as const,
120
- toolCallId: c.id,
121
- toolName: c.name,
122
- output: { type: "json" as const, value: c.output ?? {} },
123
- })),
124
- } as ModelMessage)
125
-
126
- log.info("tool step done", { step, tools: calls.map((c) => c.name) })
127
- }
128
-
129
- callbacks.onFinish?.(full || "(No response)")
130
- return full
131
- }
132
-
133
- export async function generate(prompt: string, modelID?: string) {
134
- let result = ""
135
- await stream([{ role: "user", content: prompt }], { onFinish: (text) => (result = text) }, modelID)
136
- return result
137
- }
138
-
139
- export async function analyzeAndPost(sessionContent: string, modelID?: string) {
140
- const prompt = `Analyze this coding session and write a blog post about it.
141
-
142
- The post should:
143
- - Have a catchy, dev-friendly title (like HN or Juejin)
144
- - Tell a story: what you were doing, what went wrong/right, what you learned
145
- - Include relevant code snippets
146
- - Be casual and genuine, written in first person
147
- - End with key takeaways
148
-
149
- Also provide:
150
- - 3-8 relevant tags (lowercase, hyphenated)
151
- - A one-line summary/hook
152
-
153
- Session content:
154
- ${sessionContent.slice(0, 50000)}
155
-
156
- Respond in this exact JSON format:
157
- {
158
- "title": "...",
159
- "content": "... (markdown)",
160
- "tags": ["tag1", "tag2"],
161
- "summary": "..."
162
- }`
163
-
164
- const raw = await generate(prompt, modelID)
165
- const jsonMatch = raw.match(/\{[\s\S]*\}/)
166
- if (!jsonMatch) throw new Error("AI did not return valid JSON")
167
- return JSON.parse(jsonMatch[0])
168
- }
169
- }
@@ -1,89 +0,0 @@
1
- // AI provider auto-detection and configuration
2
-
3
- function looksLikeApi(r: Response) {
4
- const ct = r.headers.get("content-type") || ""
5
- return ct.includes("json") || ct.includes("text/plain")
6
- }
7
-
8
- export async function probe(base: string, key: string): Promise<"openai" | "anthropic" | null> {
9
- const clean = base.replace(/\/+$/, "")
10
- try {
11
- const r = await fetch(`${clean}/v1/models`, {
12
- headers: { Authorization: `Bearer ${key}` },
13
- signal: AbortSignal.timeout(8000),
14
- })
15
- if (r.ok || ((r.status === 401 || r.status === 403) && looksLikeApi(r))) return "openai"
16
- } catch {}
17
- try {
18
- const r = await fetch(`${clean}/v1/messages`, {
19
- method: "POST",
20
- headers: { "x-api-key": key, "anthropic-version": "2023-06-01", "content-type": "application/json" },
21
- body: JSON.stringify({ model: "test", max_tokens: 1, messages: [] }),
22
- signal: AbortSignal.timeout(8000),
23
- })
24
- if (r.status !== 404 && looksLikeApi(r)) return "anthropic"
25
- } catch {}
26
- return null
27
- }
28
-
29
- const KEY_PREFIX_MAP: Record<string, string> = {
30
- "sk-ant-": "anthropic",
31
- "AIza": "google",
32
- "xai-": "xai",
33
- "gsk_": "groq",
34
- "sk-or-": "openrouter",
35
- "pplx-": "perplexity",
36
- }
37
-
38
- const ENV_MAP: Record<string, string> = {
39
- anthropic: "ANTHROPIC_API_KEY",
40
- openai: "OPENAI_API_KEY",
41
- google: "GOOGLE_GENERATIVE_AI_API_KEY",
42
- xai: "XAI_API_KEY",
43
- groq: "GROQ_API_KEY",
44
- openrouter: "OPENROUTER_API_KEY",
45
- perplexity: "PERPLEXITY_API_KEY",
46
- "openai-compatible": "OPENAI_COMPATIBLE_API_KEY",
47
- }
48
-
49
- export function detectProvider(key: string) {
50
- for (const [prefix, provider] of Object.entries(KEY_PREFIX_MAP)) {
51
- if (key.startsWith(prefix)) return provider
52
- }
53
- return "openai"
54
- }
55
-
56
- export async function saveProvider(url: string, key: string): Promise<{ provider: string; error?: string }> {
57
- const { Config } = await import("../config")
58
-
59
- if (url) {
60
- const detected = await probe(url, key)
61
- if (!detected) return { provider: "", error: "Could not connect. Check URL and key." }
62
-
63
- const provider = detected === "anthropic" ? "anthropic" : "openai-compatible"
64
- const envKey = detected === "anthropic" ? "ANTHROPIC_API_KEY" : "OPENAI_COMPATIBLE_API_KEY"
65
- const envBase = detected === "anthropic" ? "ANTHROPIC_BASE_URL" : "OPENAI_COMPATIBLE_BASE_URL"
66
- process.env[envKey] = key
67
- process.env[envBase] = url
68
-
69
- const cfg = await Config.load()
70
- const providers = cfg.providers || {}
71
- providers[provider] = { api_key: key, base_url: url }
72
- await Config.save({ providers })
73
- return { provider: `${detected} format` }
74
- }
75
-
76
- const provider = detectProvider(key)
77
- if (ENV_MAP[provider]) process.env[ENV_MAP[provider]] = key
78
-
79
- const cfg = await Config.load()
80
- const providers = cfg.providers || {}
81
- providers[provider] = { api_key: key }
82
- await Config.save({ providers })
83
- return { provider }
84
- }
85
-
86
- export function mask(s: string) {
87
- if (s.length <= 8) return s
88
- return s.slice(0, 4) + "\u2022".repeat(Math.min(s.length - 8, 20)) + s.slice(-4)
89
- }
@@ -1,238 +0,0 @@
1
- import { createAnthropic } from "@ai-sdk/anthropic"
2
- import { createOpenAI } from "@ai-sdk/openai"
3
- import { createGoogleGenerativeAI } from "@ai-sdk/google"
4
- import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
5
- import { type LanguageModel, type Provider as SDK } from "ai"
6
- import { Config } from "../config"
7
- import { Log } from "../util/log"
8
-
9
- const log = Log.create({ service: "ai-provider" })
10
-
11
- export namespace AIProvider {
12
- // ---------------------------------------------------------------------------
13
- // Bundled providers (4 core)
14
- // ---------------------------------------------------------------------------
15
- const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
16
- "@ai-sdk/anthropic": createAnthropic as any,
17
- "@ai-sdk/openai": createOpenAI as any,
18
- "@ai-sdk/google": createGoogleGenerativeAI as any,
19
- "@ai-sdk/openai-compatible": createOpenAICompatible as any,
20
- }
21
-
22
- // ---------------------------------------------------------------------------
23
- // Provider env key mapping
24
- // ---------------------------------------------------------------------------
25
- const PROVIDER_ENV: Record<string, string[]> = {
26
- anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
27
- openai: ["OPENAI_API_KEY"],
28
- google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
29
- "openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
30
- }
31
-
32
- // ---------------------------------------------------------------------------
33
- // Provider base URL env mapping
34
- // ---------------------------------------------------------------------------
35
- const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
36
- anthropic: ["ANTHROPIC_BASE_URL"],
37
- openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
38
- google: ["GOOGLE_API_BASE_URL"],
39
- "openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
40
- }
41
-
42
- // ---------------------------------------------------------------------------
43
- // Provider → npm package mapping
44
- // ---------------------------------------------------------------------------
45
- const PROVIDER_NPM: Record<string, string> = {
46
- anthropic: "@ai-sdk/anthropic",
47
- openai: "@ai-sdk/openai",
48
- google: "@ai-sdk/google",
49
- "openai-compatible": "@ai-sdk/openai-compatible",
50
- }
51
-
52
- // ---------------------------------------------------------------------------
53
- // Model info type
54
- // ---------------------------------------------------------------------------
55
- export interface ModelInfo {
56
- id: string
57
- providerID: string
58
- name: string
59
- contextWindow: number
60
- outputTokens: number
61
- }
62
-
63
- // ---------------------------------------------------------------------------
64
- // Built-in model list
65
- // ---------------------------------------------------------------------------
66
- export const BUILTIN_MODELS: Record<string, ModelInfo> = {
67
- "claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
68
- "claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
69
- "gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
70
- "gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
71
- "o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
72
- "gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
73
- "gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
74
- }
75
-
76
- export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
77
-
78
- // ---------------------------------------------------------------------------
79
- // Get API key for a provider
80
- // ---------------------------------------------------------------------------
81
- export async function getApiKey(providerID: string): Promise<string | undefined> {
82
- const envKeys = PROVIDER_ENV[providerID] || []
83
- for (const key of envKeys) {
84
- if (process.env[key]) return process.env[key]
85
- }
86
- const cfg = await Config.load()
87
- return cfg.providers?.[providerID]?.api_key
88
- }
89
-
90
- // ---------------------------------------------------------------------------
91
- // Get base URL for a provider
92
- // ---------------------------------------------------------------------------
93
- export async function getBaseUrl(providerID: string): Promise<string | undefined> {
94
- const envKeys = PROVIDER_BASE_URL_ENV[providerID] || []
95
- for (const key of envKeys) {
96
- if (process.env[key]) return process.env[key]
97
- }
98
- const cfg = await Config.load()
99
- return cfg.providers?.[providerID]?.base_url
100
- }
101
-
102
- // ---------------------------------------------------------------------------
103
- // List all available providers
104
- // ---------------------------------------------------------------------------
105
- export async function listProviders(): Promise<Record<string, { name: string; models: string[]; hasKey: boolean }>> {
106
- const result: Record<string, { name: string; models: string[]; hasKey: boolean }> = {}
107
- for (const model of Object.values(BUILTIN_MODELS)) {
108
- if (!result[model.providerID]) {
109
- const key = await getApiKey(model.providerID)
110
- result[model.providerID] = { name: model.providerID, models: [], hasKey: !!key }
111
- }
112
- if (!result[model.providerID]!.models.includes(model.id)) {
113
- result[model.providerID]!.models.push(model.id)
114
- }
115
- }
116
- const compatKey = await getApiKey("openai-compatible")
117
- if (compatKey) {
118
- result["openai-compatible"] = { name: "OpenAI Compatible", models: [], hasKey: true }
119
- }
120
- return result
121
- }
122
-
123
- // ---------------------------------------------------------------------------
124
- // Get a LanguageModel instance
125
- // ---------------------------------------------------------------------------
126
- const sdkCache = new Map<string, SDK>()
127
-
128
- export async function getModel(modelID?: string): Promise<LanguageModel> {
129
- const id = modelID || (await getConfiguredModel()) || DEFAULT_MODEL
130
-
131
- const builtin = BUILTIN_MODELS[id]
132
- if (builtin) {
133
- const apiKey = await getApiKey(builtin.providerID)
134
- if (!apiKey) throw noKeyError(builtin.providerID)
135
- const base = await getBaseUrl(builtin.providerID)
136
- return getLanguageModel(builtin.providerID, id, apiKey, undefined, base)
137
- }
138
-
139
- if (id.includes("/")) {
140
- const [providerID, ...rest] = id.split("/")
141
- const mid = rest.join("/")
142
- const apiKey = await getApiKey(providerID!)
143
- if (!apiKey) throw noKeyError(providerID!)
144
- const base = await getBaseUrl(providerID!)
145
- return getLanguageModel(providerID!, mid, apiKey, undefined, base)
146
- }
147
-
148
- const cfg = await Config.load()
149
- if (cfg.providers) {
150
- for (const [providerID, p] of Object.entries(cfg.providers)) {
151
- if (!p.api_key) continue
152
- const base = p.base_url || (await getBaseUrl(providerID))
153
- if (base) {
154
- log.info("fallback: sending unknown model to provider with base_url", { provider: providerID, model: id })
155
- return getLanguageModel(providerID, id, p.api_key, undefined, base)
156
- }
157
- }
158
- }
159
-
160
- throw new Error(`Unknown model: ${id}. Run: codeblog config --list`)
161
- }
162
-
163
- function getLanguageModel(providerID: string, modelID: string, apiKey: string, npm?: string, baseURL?: string): LanguageModel {
164
- const pkg = npm || PROVIDER_NPM[providerID] || "@ai-sdk/openai-compatible"
165
- const cacheKey = `${providerID}:${pkg}:${apiKey.slice(0, 8)}`
166
-
167
- log.info("loading model", { provider: providerID, model: modelID, pkg })
168
-
169
- let sdk = sdkCache.get(cacheKey)
170
- if (!sdk) {
171
- const createFn = BUNDLED_PROVIDERS[pkg]
172
- if (!createFn) throw new Error(`No bundled provider for ${pkg}. Use openai-compatible with a base URL instead.`)
173
- const opts: Record<string, unknown> = { apiKey, name: providerID }
174
- if (baseURL) {
175
- const clean = baseURL.replace(/\/+$/, "")
176
- opts.baseURL = clean.endsWith("/v1") ? clean : `${clean}/v1`
177
- }
178
- sdk = createFn(opts)
179
- sdkCache.set(cacheKey, sdk)
180
- }
181
-
182
- if (pkg === "@ai-sdk/openai-compatible" && typeof (sdk as any).chatModel === "function") {
183
- return (sdk as any).chatModel(modelID)
184
- }
185
- if (typeof (sdk as any).languageModel === "function") {
186
- return (sdk as any).languageModel(modelID)
187
- }
188
- return (sdk as any)(modelID)
189
- }
190
-
191
- function noKeyError(providerID: string): Error {
192
- const envKeys = PROVIDER_ENV[providerID] || []
193
- const envHint = envKeys[0] || `${providerID.toUpperCase().replace(/-/g, "_")}_API_KEY`
194
- return new Error(`No API key for ${providerID}. Set ${envHint} or run: codeblog config --provider ${providerID} --api-key <key>`)
195
- }
196
-
197
- async function getConfiguredModel(): Promise<string | undefined> {
198
- const cfg = await Config.load()
199
- return cfg.model
200
- }
201
-
202
- // ---------------------------------------------------------------------------
203
- // Check if any AI provider has a key configured
204
- // ---------------------------------------------------------------------------
205
- export async function hasAnyKey(): Promise<boolean> {
206
- for (const providerID of Object.keys(PROVIDER_ENV)) {
207
- const key = await getApiKey(providerID)
208
- if (key) return true
209
- }
210
- const cfg = await Config.load()
211
- if (cfg.providers) {
212
- for (const p of Object.values(cfg.providers)) {
213
- if (p.api_key) return true
214
- }
215
- }
216
- return false
217
- }
218
-
219
- // ---------------------------------------------------------------------------
220
- // List available models with key status
221
- // ---------------------------------------------------------------------------
222
- export async function available(): Promise<Array<{ model: ModelInfo; hasKey: boolean }>> {
223
- const result: Array<{ model: ModelInfo; hasKey: boolean }> = []
224
- for (const model of Object.values(BUILTIN_MODELS)) {
225
- const apiKey = await getApiKey(model.providerID)
226
- result.push({ model, hasKey: !!apiKey })
227
- }
228
- return result
229
- }
230
-
231
- // ---------------------------------------------------------------------------
232
- // Parse provider/model format
233
- // ---------------------------------------------------------------------------
234
- export function parseModel(model: string) {
235
- const [providerID, ...rest] = model.split("/")
236
- return { providerID, modelID: rest.join("/") }
237
- }
238
- }