codeblog-app 2.2.6 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/package.json +9 -7
  2. package/src/ai/__tests__/chat.test.ts +11 -2
  3. package/src/ai/__tests__/compat.test.ts +46 -0
  4. package/src/ai/__tests__/home.ai-stream.integration.test.ts +77 -0
  5. package/src/ai/__tests__/provider-registry.test.ts +61 -0
  6. package/src/ai/__tests__/provider.test.ts +58 -18
  7. package/src/ai/__tests__/stream-events.test.ts +152 -0
  8. package/src/ai/chat.ts +200 -88
  9. package/src/ai/configure.ts +13 -4
  10. package/src/ai/models.ts +26 -0
  11. package/src/ai/provider-registry.ts +150 -0
  12. package/src/ai/provider.ts +99 -137
  13. package/src/ai/stream-events.ts +64 -0
  14. package/src/ai/tools.ts +10 -6
  15. package/src/ai/types.ts +105 -0
  16. package/src/auth/index.ts +3 -1
  17. package/src/auth/oauth.ts +17 -2
  18. package/src/cli/__tests__/commands.test.ts +6 -2
  19. package/src/cli/cmd/ai.ts +10 -0
  20. package/src/cli/cmd/setup.ts +275 -5
  21. package/src/cli/ui.ts +131 -24
  22. package/src/config/index.ts +38 -1
  23. package/src/index.ts +4 -1
  24. package/src/mcp/__tests__/client.test.ts +2 -2
  25. package/src/mcp/__tests__/e2e.ts +10 -6
  26. package/src/mcp/client.ts +33 -63
  27. package/src/storage/chat.ts +3 -1
  28. package/src/tui/__tests__/input-intent.test.ts +27 -0
  29. package/src/tui/__tests__/stream-assembler.test.ts +33 -0
  30. package/src/tui/ai-stream.ts +28 -0
  31. package/src/tui/app.tsx +27 -1
  32. package/src/tui/commands.ts +41 -7
  33. package/src/tui/context/theme.tsx +2 -1
  34. package/src/tui/input-intent.ts +26 -0
  35. package/src/tui/routes/home.tsx +590 -190
  36. package/src/tui/routes/setup.tsx +20 -8
  37. package/src/tui/stream-assembler.ts +49 -0
  38. package/src/util/log.ts +3 -1
  39. package/tsconfig.json +1 -1
@@ -5,13 +5,13 @@ import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
5
5
  import { type LanguageModel, type Provider as SDK } from "ai"
6
6
  import { Config } from "../config"
7
7
  import { Log } from "../util/log"
8
+ import { BUILTIN_MODELS as CORE_MODELS, DEFAULT_MODEL as CORE_DEFAULT_MODEL, type ModelInfo as CoreModelInfo } from "./models"
9
+ import { loadProviders, PROVIDER_BASE_URL_ENV, PROVIDER_ENV, routeModel } from "./provider-registry"
10
+ import { patchRequestByCompat, resolveCompat, type ModelApi, type ModelCompatConfig } from "./types"
8
11
 
9
12
  const log = Log.create({ service: "ai-provider" })
10
13
 
11
14
  export namespace AIProvider {
12
- // ---------------------------------------------------------------------------
13
- // Bundled providers (4 core)
14
- // ---------------------------------------------------------------------------
15
15
  const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
16
16
  "@ai-sdk/anthropic": createAnthropic as any,
17
17
  "@ai-sdk/openai": createOpenAI as any,
@@ -19,65 +19,17 @@ export namespace AIProvider {
19
19
  "@ai-sdk/openai-compatible": createOpenAICompatible as any,
20
20
  }
21
21
 
22
- // ---------------------------------------------------------------------------
23
- // Provider env key mapping
24
- // ---------------------------------------------------------------------------
25
- const PROVIDER_ENV: Record<string, string[]> = {
26
- anthropic: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
27
- openai: ["OPENAI_API_KEY"],
28
- google: ["GOOGLE_GENERATIVE_AI_API_KEY", "GOOGLE_API_KEY"],
29
- "openai-compatible": ["OPENAI_COMPATIBLE_API_KEY"],
30
- }
31
-
32
- // ---------------------------------------------------------------------------
33
- // Provider base URL env mapping
34
- // ---------------------------------------------------------------------------
35
- const PROVIDER_BASE_URL_ENV: Record<string, string[]> = {
36
- anthropic: ["ANTHROPIC_BASE_URL"],
37
- openai: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
38
- google: ["GOOGLE_API_BASE_URL"],
39
- "openai-compatible": ["OPENAI_COMPATIBLE_BASE_URL"],
40
- }
41
-
42
- // ---------------------------------------------------------------------------
43
- // Provider → npm package mapping
44
- // ---------------------------------------------------------------------------
45
- const PROVIDER_NPM: Record<string, string> = {
22
+ const PROVIDER_NPM: Record<ModelApi, string> = {
46
23
  anthropic: "@ai-sdk/anthropic",
47
24
  openai: "@ai-sdk/openai",
48
25
  google: "@ai-sdk/google",
49
26
  "openai-compatible": "@ai-sdk/openai-compatible",
50
27
  }
51
28
 
52
- // ---------------------------------------------------------------------------
53
- // Model info type
54
- // ---------------------------------------------------------------------------
55
- export interface ModelInfo {
56
- id: string
57
- providerID: string
58
- name: string
59
- contextWindow: number
60
- outputTokens: number
61
- }
29
+ export const BUILTIN_MODELS = CORE_MODELS
30
+ export const DEFAULT_MODEL = CORE_DEFAULT_MODEL
31
+ export type ModelInfo = CoreModelInfo
62
32
 
63
- // ---------------------------------------------------------------------------
64
- // Built-in model list
65
- // ---------------------------------------------------------------------------
66
- export const BUILTIN_MODELS: Record<string, ModelInfo> = {
67
- "claude-sonnet-4-20250514": { id: "claude-sonnet-4-20250514", providerID: "anthropic", name: "Claude Sonnet 4", contextWindow: 200000, outputTokens: 16384 },
68
- "claude-3-5-haiku-20241022": { id: "claude-3-5-haiku-20241022", providerID: "anthropic", name: "Claude 3.5 Haiku", contextWindow: 200000, outputTokens: 8192 },
69
- "gpt-4o": { id: "gpt-4o", providerID: "openai", name: "GPT-4o", contextWindow: 128000, outputTokens: 16384 },
70
- "gpt-4o-mini": { id: "gpt-4o-mini", providerID: "openai", name: "GPT-4o Mini", contextWindow: 128000, outputTokens: 16384 },
71
- "o3-mini": { id: "o3-mini", providerID: "openai", name: "o3-mini", contextWindow: 200000, outputTokens: 100000 },
72
- "gemini-2.5-flash": { id: "gemini-2.5-flash", providerID: "google", name: "Gemini 2.5 Flash", contextWindow: 1048576, outputTokens: 65536 },
73
- "gemini-2.5-pro": { id: "gemini-2.5-pro", providerID: "google", name: "Gemini 2.5 Pro", contextWindow: 1048576, outputTokens: 65536 },
74
- }
75
-
76
- export const DEFAULT_MODEL = "claude-sonnet-4-20250514"
77
-
78
- // ---------------------------------------------------------------------------
79
- // Get API key for a provider
80
- // ---------------------------------------------------------------------------
81
33
  export async function getApiKey(providerID: string): Promise<string | undefined> {
82
34
  const envKeys = PROVIDER_ENV[providerID] || []
83
35
  for (const key of envKeys) {
@@ -87,9 +39,6 @@ export namespace AIProvider {
87
39
  return cfg.providers?.[providerID]?.api_key
88
40
  }
89
41
 
90
- // ---------------------------------------------------------------------------
91
- // Get base URL for a provider
92
- // ---------------------------------------------------------------------------
93
42
  export async function getBaseUrl(providerID: string): Promise<string | undefined> {
94
43
  const envKeys = PROVIDER_BASE_URL_ENV[providerID] || []
95
44
  for (const key of envKeys) {
@@ -99,9 +48,6 @@ export namespace AIProvider {
99
48
  return cfg.providers?.[providerID]?.base_url
100
49
  }
101
50
 
102
- // ---------------------------------------------------------------------------
103
- // List all available providers
104
- // ---------------------------------------------------------------------------
105
51
  export async function listProviders(): Promise<Record<string, { name: string; models: string[]; hasKey: boolean }>> {
106
52
  const result: Record<string, { name: string; models: string[]; hasKey: boolean }> = {}
107
53
  for (const model of Object.values(BUILTIN_MODELS)) {
@@ -113,102 +59,136 @@ export namespace AIProvider {
113
59
  result[model.providerID]!.models.push(model.id)
114
60
  }
115
61
  }
62
+
116
63
  const compatKey = await getApiKey("openai-compatible")
117
- if (compatKey) {
118
- const compatBase = await getBaseUrl("openai-compatible")
119
- const remoteModels = compatBase ? await fetchRemoteModels(compatBase, compatKey) : []
64
+ const compatBase = await getBaseUrl("openai-compatible")
65
+ if (compatKey && compatBase) {
66
+ const remoteModels = await fetchRemoteModels(compatBase, compatKey)
120
67
  result["openai-compatible"] = { name: "OpenAI Compatible", models: remoteModels, hasKey: true }
121
68
  }
69
+
70
+ const loaded = await loadProviders()
71
+ for (const provider of Object.values(loaded.providers)) {
72
+ if (result[provider.id]) continue
73
+ if (!provider.apiKey) continue
74
+ result[provider.id] = { name: provider.id, models: [], hasKey: true }
75
+ }
76
+
122
77
  return result
123
78
  }
124
79
 
125
- // ---------------------------------------------------------------------------
126
- // Get a LanguageModel instance
127
- // ---------------------------------------------------------------------------
128
80
  const sdkCache = new Map<string, SDK>()
129
81
 
130
82
  export async function getModel(modelID?: string): Promise<LanguageModel> {
131
- const id = modelID || (await getConfiguredModel()) || DEFAULT_MODEL
83
+ const useRegistry = await Config.featureEnabled("ai_provider_registry_v2")
84
+ if (useRegistry) {
85
+ const route = await routeModel(modelID)
86
+ return getLanguageModel(route.providerID, route.modelID, route.apiKey, undefined, route.baseURL, route.compat)
87
+ }
88
+ return getModelLegacy(modelID)
89
+ }
90
+
91
+ export async function resolveModelCompat(modelID?: string): Promise<ModelCompatConfig> {
92
+ const useRegistry = await Config.featureEnabled("ai_provider_registry_v2")
93
+ if (useRegistry) return (await routeModel(modelID)).compat
94
+ return (await resolveLegacyRoute(modelID)).compat
95
+ }
96
+
97
+ async function getModelLegacy(modelID?: string): Promise<LanguageModel> {
98
+ const route = await resolveLegacyRoute(modelID)
99
+ return getLanguageModel(route.providerID, route.modelID, route.apiKey, undefined, route.baseURL, route.compat)
100
+ }
101
+
102
+ async function resolveLegacyRoute(modelID?: string): Promise<{
103
+ providerID: string
104
+ modelID: string
105
+ apiKey: string
106
+ baseURL?: string
107
+ compat: ModelCompatConfig
108
+ }> {
109
+ const requested = modelID || (await getConfiguredModel()) || DEFAULT_MODEL
110
+ const cfg = await Config.load()
132
111
 
133
- const builtin = BUILTIN_MODELS[id]
112
+ const builtin = BUILTIN_MODELS[requested]
134
113
  if (builtin) {
135
114
  const apiKey = await getApiKey(builtin.providerID)
136
115
  if (!apiKey) throw noKeyError(builtin.providerID)
137
- const base = await getBaseUrl(builtin.providerID)
138
- return getLanguageModel(builtin.providerID, id, apiKey, undefined, base)
116
+ const baseURL = await getBaseUrl(builtin.providerID)
117
+ return {
118
+ providerID: builtin.providerID,
119
+ modelID: requested,
120
+ apiKey,
121
+ baseURL,
122
+ compat: resolveCompat({ providerID: builtin.providerID, modelID: requested, providerConfig: cfg.providers?.[builtin.providerID] }),
123
+ }
139
124
  }
140
125
 
141
- if (id.includes("/")) {
142
- const [providerID, ...rest] = id.split("/")
143
- const mid = rest.join("/")
126
+ if (requested.includes("/")) {
127
+ const [providerID, ...rest] = requested.split("/")
128
+ const modelID = rest.join("/")
144
129
  const apiKey = await getApiKey(providerID!)
145
130
  if (!apiKey) throw noKeyError(providerID!)
146
- const base = await getBaseUrl(providerID!)
147
- return getLanguageModel(providerID!, mid, apiKey, undefined, base)
131
+ const baseURL = await getBaseUrl(providerID!)
132
+ return {
133
+ providerID: providerID!,
134
+ modelID,
135
+ apiKey,
136
+ baseURL,
137
+ compat: resolveCompat({ providerID: providerID!, modelID, providerConfig: cfg.providers?.[providerID!] }),
138
+ }
148
139
  }
149
140
 
150
- const cfg = await Config.load()
151
141
  if (cfg.providers) {
152
142
  for (const [providerID, p] of Object.entries(cfg.providers)) {
153
143
  if (!p.api_key) continue
154
- const base = p.base_url || (await getBaseUrl(providerID))
155
- if (base) {
156
- log.info("fallback: sending unknown model to provider with base_url", { provider: providerID, model: id })
157
- return getLanguageModel(providerID, id, p.api_key, undefined, base)
144
+ const baseURL = p.base_url || (await getBaseUrl(providerID))
145
+ if (!baseURL) continue
146
+ log.info("legacy fallback: unknown model routed to first provider with base_url", { provider: providerID, model: requested })
147
+ return {
148
+ providerID,
149
+ modelID: requested,
150
+ apiKey: p.api_key,
151
+ baseURL,
152
+ compat: resolveCompat({ providerID, modelID: requested, providerConfig: p }),
158
153
  }
159
154
  }
160
155
  }
161
156
 
162
- throw new Error(`Unknown model: ${id}. Run: codeblog config --list`)
157
+ throw new Error(`Unknown model: ${requested}. Run: codeblog config --list`)
163
158
  }
164
159
 
165
- function getLanguageModel(providerID: string, modelID: string, apiKey: string, npm?: string, baseURL?: string): LanguageModel {
166
- // Auto-detect Anthropic models and use @ai-sdk/anthropic instead of openai-compatible
167
- // This fixes streaming tool call argument parsing issues with openai-compatible provider
168
- let pkg = npm || PROVIDER_NPM[providerID]
169
-
170
- // Force Anthropic SDK for Claude models, even if provider is openai-compatible
171
- if (modelID.startsWith("claude-") && pkg === "@ai-sdk/openai-compatible") {
160
+ function packageForCompat(compat: ModelCompatConfig): string {
161
+ let pkg = PROVIDER_NPM[compat.api]
162
+ if (compat.modelID.startsWith("claude-") && pkg === "@ai-sdk/openai-compatible") {
172
163
  pkg = "@ai-sdk/anthropic"
173
- log.info("auto-detected Claude model, switching from openai-compatible to @ai-sdk/anthropic", { model: modelID })
164
+ log.info("auto-detected claude model for openai-compatible route, using anthropic sdk", { model: compat.modelID })
174
165
  }
166
+ return pkg
167
+ }
175
168
 
176
- if (!pkg) {
177
- pkg = "@ai-sdk/openai-compatible"
178
- }
179
-
180
- const cacheKey = `${providerID}:${pkg}:${apiKey.slice(0, 8)}`
181
-
182
- log.info("loading model", { provider: providerID, model: modelID, pkg })
169
+ function getLanguageModel(
170
+ providerID: string,
171
+ modelID: string,
172
+ apiKey: string,
173
+ npm?: string,
174
+ baseURL?: string,
175
+ providedCompat?: ModelCompatConfig,
176
+ ): LanguageModel {
177
+ const compat = providedCompat || resolveCompat({ providerID, modelID })
178
+ const pkg = npm || packageForCompat(compat)
179
+ const cacheKey = `${providerID}:${pkg}:${compat.cacheKey}:${apiKey.slice(0, 8)}:${baseURL || ""}`
183
180
 
184
181
  let sdk = sdkCache.get(cacheKey)
185
182
  if (!sdk) {
186
183
  const createFn = BUNDLED_PROVIDERS[pkg]
187
- if (!createFn) throw new Error(`No bundled provider for ${pkg}. Use openai-compatible with a base URL instead.`)
184
+ if (!createFn) throw new Error(`No bundled provider for ${pkg}.`)
188
185
  const opts: Record<string, unknown> = { apiKey, name: providerID }
189
186
  if (baseURL) {
190
187
  const clean = baseURL.replace(/\/+$/, "")
191
188
  opts.baseURL = clean.endsWith("/v1") ? clean : `${clean}/v1`
192
189
  }
193
- // For openai-compatible providers, normalize request body for broader compatibility
194
190
  if (pkg === "@ai-sdk/openai-compatible") {
195
- opts.transformRequestBody = (body: Record<string, any>) => {
196
- // Remove parallel_tool_calls — many proxies/providers don't support it
197
- delete body.parallel_tool_calls
198
-
199
- // Ensure all tool schemas have type: "object" (required by DeepSeek/Qwen/etc.)
200
- if (Array.isArray(body.tools)) {
201
- for (const t of body.tools) {
202
- const params = t?.function?.parameters
203
- if (params && !params.type) {
204
- params.type = "object"
205
- if (!params.properties) params.properties = {}
206
- }
207
- }
208
- }
209
-
210
- return body
211
- }
191
+ opts.transformRequestBody = (body: Record<string, any>) => patchRequestByCompat(compat, body)
212
192
  }
213
193
  sdk = createFn(opts)
214
194
  sdkCache.set(cacheKey, sdk)
@@ -250,33 +230,18 @@ export namespace AIProvider {
250
230
  return cfg.model
251
231
  }
252
232
 
253
- // ---------------------------------------------------------------------------
254
- // Check if any AI provider has a key configured
255
- // ---------------------------------------------------------------------------
256
233
  export async function hasAnyKey(): Promise<boolean> {
257
- for (const providerID of Object.keys(PROVIDER_ENV)) {
258
- const key = await getApiKey(providerID)
259
- if (key) return true
260
- }
261
- const cfg = await Config.load()
262
- if (cfg.providers) {
263
- for (const p of Object.values(cfg.providers)) {
264
- if (p.api_key) return true
265
- }
266
- }
267
- return false
234
+ const loaded = await loadProviders()
235
+ return Object.values(loaded.providers).some((p) => !!p.apiKey)
268
236
  }
269
237
 
270
- // ---------------------------------------------------------------------------
271
- // List available models with key status
272
- // ---------------------------------------------------------------------------
273
238
  export async function available(): Promise<Array<{ model: ModelInfo; hasKey: boolean }>> {
274
239
  const result: Array<{ model: ModelInfo; hasKey: boolean }> = []
275
240
  for (const model of Object.values(BUILTIN_MODELS)) {
276
241
  const apiKey = await getApiKey(model.providerID)
277
242
  result.push({ model, hasKey: !!apiKey })
278
243
  }
279
- // Include remote models from openai-compatible provider
244
+
280
245
  const compatKey = await getApiKey("openai-compatible")
281
246
  const compatBase = await getBaseUrl("openai-compatible")
282
247
  if (compatKey && compatBase) {
@@ -292,9 +257,6 @@ export namespace AIProvider {
292
257
  return result
293
258
  }
294
259
 
295
- // ---------------------------------------------------------------------------
296
- // Parse provider/model format
297
- // ---------------------------------------------------------------------------
298
260
  export function parseModel(model: string) {
299
261
  const [providerID, ...rest] = model.split("/")
300
262
  return { providerID, modelID: rest.join("/") }
@@ -0,0 +1,64 @@
1
+ export type StreamEventType =
2
+ | "run-start"
3
+ | "text-delta"
4
+ | "tool-start"
5
+ | "tool-result"
6
+ | "error"
7
+ | "run-finish"
8
+
9
+ interface StreamEventBase {
10
+ type: StreamEventType
11
+ runId: string
12
+ seq: number
13
+ }
14
+
15
+ export interface RunStartEvent extends StreamEventBase {
16
+ type: "run-start"
17
+ modelID: string
18
+ messageCount: number
19
+ }
20
+
21
+ export interface TextDeltaEvent extends StreamEventBase {
22
+ type: "text-delta"
23
+ text: string
24
+ }
25
+
26
+ export interface ToolStartEvent extends StreamEventBase {
27
+ type: "tool-start"
28
+ callID: string
29
+ name: string
30
+ args: unknown
31
+ }
32
+
33
+ export interface ToolResultEvent extends StreamEventBase {
34
+ type: "tool-result"
35
+ callID: string
36
+ name: string
37
+ result: unknown
38
+ }
39
+
40
+ export interface ErrorEvent extends StreamEventBase {
41
+ type: "error"
42
+ error: Error
43
+ }
44
+
45
+ export interface RunFinishEvent extends StreamEventBase {
46
+ type: "run-finish"
47
+ text: string
48
+ aborted: boolean
49
+ }
50
+
51
+ export type StreamEvent =
52
+ | RunStartEvent
53
+ | TextDeltaEvent
54
+ | ToolStartEvent
55
+ | ToolResultEvent
56
+ | ErrorEvent
57
+ | RunFinishEvent
58
+
59
+ export function createRunEventFactory(runId: string = crypto.randomUUID()) {
60
+ let seq = 0
61
+ const next = <T extends StreamEventType, P extends Record<string, unknown>>(type: T, payload: P) =>
62
+ ({ type, runId, seq: ++seq, ...payload }) as unknown as Extract<StreamEvent, { type: T }>
63
+ return { runId, next }
64
+ }
package/src/ai/tools.ts CHANGED
@@ -1,6 +1,7 @@
1
1
  import { tool, jsonSchema } from "ai"
2
2
  import { McpBridge } from "../mcp/client"
3
3
  import { Log } from "../util/log"
4
+ import type { ModelCompatConfig } from "./types"
4
5
 
5
6
  const log = Log.create({ service: "ai-tools" })
6
7
 
@@ -68,14 +69,17 @@ function normalizeToolSchema(schema: Record<string, unknown>): Record<string, un
68
69
  // ---------------------------------------------------------------------------
69
70
  // Dynamic tool discovery from MCP server
70
71
  // ---------------------------------------------------------------------------
71
- let _cached: Record<string, any> | null = null
72
+ const cache = new Map<string, Record<string, any>>()
72
73
 
73
74
  /**
74
75
  * Build AI SDK tools dynamically from the MCP server's listTools() response.
75
76
  * Results are cached after the first successful call.
76
77
  */
77
- export async function getChatTools(): Promise<Record<string, any>> {
78
- if (_cached) return _cached
78
+ export async function getChatTools(compat?: ModelCompatConfig | string): Promise<Record<string, any>> {
79
+ const key = typeof compat === "string" ? compat : compat?.cacheKey || "default"
80
+ const normalizeSchema = typeof compat === "string" ? true : (compat?.normalizeToolSchema ?? true)
81
+ const cached = cache.get(key)
82
+ if (cached) return cached
79
83
 
80
84
  const { tools: mcpTools } = await McpBridge.listTools()
81
85
  log.info("discovered MCP tools", { count: mcpTools.length, names: mcpTools.map((t) => t.name) })
@@ -88,7 +92,7 @@ export async function getChatTools(): Promise<Record<string, any>> {
88
92
 
89
93
  tools[name] = tool({
90
94
  description: t.description || name,
91
- inputSchema: jsonSchema(normalizeToolSchema(rawSchema)),
95
+ inputSchema: jsonSchema(normalizeSchema ? normalizeToolSchema(rawSchema) : rawSchema),
92
96
  execute: async (args: any) => {
93
97
  log.info("execute tool", { name, args })
94
98
  const result = await mcp(name, clean(args))
@@ -104,11 +108,11 @@ export async function getChatTools(): Promise<Record<string, any>> {
104
108
  })
105
109
  }
106
110
 
107
- _cached = tools
111
+ cache.set(key, tools)
108
112
  return tools
109
113
  }
110
114
 
111
115
  /** Clear the cached tools (useful for testing or reconnection). */
112
116
  export function clearChatToolsCache(): void {
113
- _cached = null
117
+ cache.clear()
114
118
  }
@@ -0,0 +1,105 @@
1
+ import type { Config } from "../config"
2
+
3
+ export type ModelApi = "anthropic" | "openai" | "google" | "openai-compatible"
4
+ export type CompatProfile = "anthropic" | "openai" | "openai-compatible" | "google"
5
+
6
+ export interface ModelCompatConfig {
7
+ providerID: string
8
+ modelID: string
9
+ api: ModelApi
10
+ compatProfile: CompatProfile
11
+ cacheKey: string
12
+ stripParallelToolCalls: boolean
13
+ normalizeToolSchema: boolean
14
+ }
15
+
16
+ export const COMPAT_PRESETS: Record<CompatProfile, Omit<ModelCompatConfig, "providerID" | "modelID" | "cacheKey">> = {
17
+ anthropic: {
18
+ api: "anthropic",
19
+ compatProfile: "anthropic",
20
+ stripParallelToolCalls: false,
21
+ normalizeToolSchema: false,
22
+ },
23
+ openai: {
24
+ api: "openai",
25
+ compatProfile: "openai",
26
+ stripParallelToolCalls: false,
27
+ normalizeToolSchema: false,
28
+ },
29
+ google: {
30
+ api: "google",
31
+ compatProfile: "google",
32
+ stripParallelToolCalls: false,
33
+ normalizeToolSchema: false,
34
+ },
35
+ "openai-compatible": {
36
+ api: "openai-compatible",
37
+ compatProfile: "openai-compatible",
38
+ stripParallelToolCalls: true,
39
+ normalizeToolSchema: true,
40
+ },
41
+ }
42
+
43
+ function isOfficialOpenAIBase(baseURL?: string): boolean {
44
+ if (!baseURL) return false
45
+ try {
46
+ return new URL(baseURL).hostname === "api.openai.com"
47
+ } catch {
48
+ return false
49
+ }
50
+ }
51
+
52
+ function resolveApiFromProvider(providerID: string, cfg?: Config.ProviderConfig): ModelApi {
53
+ if (providerID === "openai" && cfg?.base_url && !isOfficialOpenAIBase(cfg.base_url)) {
54
+ return "openai-compatible"
55
+ }
56
+ if (cfg?.api) return cfg.api
57
+ if (providerID === "anthropic") return "anthropic"
58
+ if (providerID === "openai") return "openai"
59
+ if (providerID === "google") return "google"
60
+ if (providerID === "openai-compatible") return "openai-compatible"
61
+ return "openai-compatible"
62
+ }
63
+
64
+ function defaultCompatForApi(api: ModelApi): CompatProfile {
65
+ if (api === "anthropic") return "anthropic"
66
+ if (api === "openai") return "openai"
67
+ if (api === "google") return "google"
68
+ return "openai-compatible"
69
+ }
70
+
71
+ export function resolveCompat(args: {
72
+ providerID: string
73
+ modelID: string
74
+ providerConfig?: Config.ProviderConfig
75
+ }): ModelCompatConfig {
76
+ const api = resolveApiFromProvider(args.providerID, args.providerConfig)
77
+ const configured = args.providerConfig?.compat_profile
78
+ const compatProfile = api === "openai-compatible" && configured === "openai"
79
+ ? "openai-compatible"
80
+ : configured || defaultCompatForApi(api)
81
+ const preset = COMPAT_PRESETS[compatProfile]
82
+ return {
83
+ ...preset,
84
+ providerID: args.providerID,
85
+ modelID: args.modelID,
86
+ cacheKey: `${api}:${compatProfile}`,
87
+ }
88
+ }
89
+
90
+ export function patchRequestByCompat(compat: ModelCompatConfig, body: Record<string, any>): Record<string, any> {
91
+ if (compat.stripParallelToolCalls) {
92
+ delete body.parallel_tool_calls
93
+ }
94
+
95
+ if (compat.normalizeToolSchema && Array.isArray(body.tools)) {
96
+ for (const t of body.tools) {
97
+ const params = t?.function?.parameters
98
+ if (!params || typeof params !== "object") continue
99
+ if (!params.type) params.type = "object"
100
+ if (params.type === "object" && !params.properties) params.properties = {}
101
+ }
102
+ }
103
+
104
+ return body
105
+ }
package/src/auth/index.ts CHANGED
@@ -1,4 +1,5 @@
1
1
  import path from "path"
2
+ import { chmod, writeFile } from "fs/promises"
2
3
  import { Global } from "../global"
3
4
  import z from "zod"
4
5
 
@@ -25,7 +26,8 @@ export namespace Auth {
25
26
  }
26
27
 
27
28
  export async function set(token: Token) {
28
- await Bun.write(Bun.file(filepath, { mode: 0o600 }), JSON.stringify(token, null, 2))
29
+ await writeFile(filepath, JSON.stringify(token, null, 2))
30
+ await chmod(filepath, 0o600).catch(() => {})
29
31
  }
30
32
 
31
33
  export async function remove() {
package/src/auth/oauth.ts CHANGED
@@ -91,9 +91,24 @@ p{font-size:15px;color:#6a737c;line-height:1.5}
91
91
  },
92
92
  })
93
93
 
94
- Server.start(wrapped, port)
94
+ const picks = [port, port + 1, port + 2, 0]
95
+ let started: ReturnType<typeof Server.start> | null = null
95
96
 
96
- const authUrl = `${base}/auth/cli?port=${port}`
97
+ for (const p of picks) {
98
+ if (started) break
99
+ try {
100
+ started = Server.start(wrapped, p)
101
+ } catch (err) {
102
+ log.warn("failed to start callback server on port", { port: p, error: String(err) })
103
+ }
104
+ }
105
+
106
+ if (!started) {
107
+ reject(new Error(`Failed to start callback server on ports ${picks.slice(0, 3).join(", ")}`))
108
+ return
109
+ }
110
+
111
+ const authUrl = `${base}/auth/cli?port=${started.port ?? port}`
97
112
  log.info("opening browser", { url: authUrl })
98
113
  if (options?.onUrl) options.onUrl(authUrl)
99
114
  open(authUrl)
@@ -1,4 +1,4 @@
1
- import { describe, test, expect, mock, beforeEach } from "bun:test"
1
+ import { describe, test, expect, mock, beforeEach, afterEach } from "bun:test"
2
2
 
3
3
  // ---------------------------------------------------------------------------
4
4
  // Mock dependencies shared by all CLI commands
@@ -53,7 +53,11 @@ describe("CLI Commands", () => {
53
53
  mockCallToolJSON.mockClear()
54
54
  mockError.mockClear()
55
55
  mockInfo.mockClear()
56
- process.exitCode = undefined as any
56
+ process.exitCode = 0
57
+ })
58
+
59
+ afterEach(() => {
60
+ process.exitCode = 0
57
61
  })
58
62
 
59
63
  // ---------------------------------------------------------------------------
@@ -0,0 +1,10 @@
1
+ import type { CommandModule } from "yargs"
2
+ import { runAISetupWizard } from "./setup"
3
+
4
+ export const AISetupCommand: CommandModule = {
5
+ command: "ai setup",
6
+ describe: "Run full AI onboarding wizard",
7
+ handler: async () => {
8
+ await runAISetupWizard("command")
9
+ },
10
+ }