@swarmclawai/swarmclaw 1.5.45 → 1.5.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -396,6 +396,16 @@ Operational docs: https://swarmclaw.ai/docs/observability
396
396
 
397
397
  ## Releases
398
398
 
399
+ ### v1.5.46 Highlights
400
+
401
+ - **Custom base URL for built-in OpenAI and Anthropic providers**: the Endpoint field in provider settings now works for the built-in OpenAI and Anthropic providers (marked as `optionalEndpoint`). Point them at a proxy, gateway, or self-hosted endpoint and the URL persists, auto-resolves on connection test, and flows through both the live chat path and the LangGraph agent path (`ChatAnthropic` now receives `anthropicApiUrl`). Existing installs with no custom URL keep using the defaults.
402
+ - **Test-model selector in provider settings**: when you hit "Test Connection", a new dropdown lets you pick a specific model (for example `gpt-4.1-mini` or `claude-haiku-4-5`) or leave it on Auto-detect. Useful for verifying a specific model is reachable on a given endpoint.
403
+ - **Auto-resolution of credentials and endpoints in the connection test**: the test route now looks up the saved credential and base URL for the provider when they are not explicitly supplied, so the provider sheet's "Test" button works without needing to replay config.
404
+ - **Anthropic streaming refactor**: the streaming handler moved from Node's `https.request()` to `fetch()`. Same behavior, cleaner cancellation, and it now respects `session.apiEndpoint` as a full base URL instead of a hostname.
405
+ - **Connection test body**: Ollama and OpenAI-compatible test requests now send `max_completion_tokens` instead of the legacy `max_tokens`, matching current OpenAI conventions and working correctly with reasoning models that reject `max_tokens`.
406
+
407
+ Thanks to @Llugaes for the contribution.
408
+
399
409
  ### v1.5.45 Highlights
400
410
 
401
411
  - **SwarmVault MCP preset**: a new "SwarmVault" Quick Setup chip in the MCP server sheet pre-fills `npx -y @swarmvaultai/cli mcp` over `stdio` and prompts for the vault directory. One click registers a SwarmVault knowledge vault as an MCP server; agents pick it up via the existing per-agent MCP server selector. SwarmVault docs: https://swarmvault.ai
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@swarmclawai/swarmclaw",
3
- "version": "1.5.45",
3
+ "version": "1.5.46",
4
4
  "description": "Build and run autonomous AI agents with OpenClaw, Hermes, multiple model providers, orchestration, delegation, memory, skills, schedules, and chat connectors.",
5
5
  "main": "electron-dist/main.js",
6
6
  "license": "MIT",
@@ -31,7 +31,7 @@ export async function PUT(req: Request, { params }: { params: Promise<{ id: stri
31
31
  id,
32
32
  name: builtin.name,
33
33
  type: 'builtin',
34
- baseUrl: builtin.defaultEndpoint || '',
34
+ baseUrl: (typeof body.baseUrl === 'string' ? body.baseUrl : builtin.defaultEndpoint) || '',
35
35
  models: [...builtin.models],
36
36
  requiresApiKey: builtin.requiresApiKey,
37
37
  credentialId: null,
@@ -1,5 +1,6 @@
1
1
  import { NextResponse } from 'next/server'
2
- import { loadCredentials, decryptKey } from '@/lib/server/storage'
2
+ import { loadCredentials, decryptKey, loadProviderConfigs } from '@/lib/server/storage'
3
+ import { listCredentialIdsByProvider } from '@/lib/server/credentials/credential-service'
3
4
  import { getDeviceId, wsConnect, rpcOnConnectedGateway } from '@/lib/providers/openclaw'
4
5
  import { buildCliEnv, probeCliAuth, resolveCliBinary } from '@/lib/providers/cli-utils'
5
6
  import { OPENAI_COMPATIBLE_DEFAULTS } from '@/lib/server/provider-health'
@@ -109,7 +110,7 @@ async function checkOpenAiCompatible(
109
110
  },
110
111
  body: JSON.stringify({
111
112
  model: testModel,
112
- max_tokens: 8,
113
+ max_completion_tokens: 8,
113
114
  messages: [{ role: 'user', content: 'Reply OK' }],
114
115
  }),
115
116
  signal: AbortSignal.timeout(15_000),
@@ -126,9 +127,10 @@ async function checkOpenAiCompatible(
126
127
  }
127
128
  }
128
129
 
129
- async function checkAnthropic(apiKey: string, modelRaw: string): Promise<{ ok: boolean; message: string }> {
130
+ async function checkAnthropic(apiKey: string, endpointRaw: string, modelRaw: string): Promise<{ ok: boolean; message: string }> {
130
131
  const model = modelRaw || 'claude-sonnet-4-6'
131
- const res = await fetch('https://api.anthropic.com/v1/messages', {
132
+ const baseUrl = (endpointRaw || 'https://api.anthropic.com').replace(/\/+$/, '')
133
+ const res = await fetch(`${baseUrl}/v1/messages`, {
132
134
  method: 'POST',
133
135
  headers: {
134
136
  'x-api-key': apiKey,
@@ -221,7 +223,7 @@ async function checkOllama(params: {
221
223
  // Test the chat endpoint
222
224
  const label = runtime.useCloud ? 'Ollama Cloud' : 'Ollama'
223
225
  const chatEndpoint = `${normalizedEndpoint}/v1/chat/completions`
224
- const chatBody = JSON.stringify({ model: testModel, max_tokens: 8, messages: [{ role: 'user', content: 'Reply OK' }] })
226
+ const chatBody = JSON.stringify({ model: testModel, max_completion_tokens: 8, messages: [{ role: 'user', content: 'Reply OK' }] })
225
227
 
226
228
  const chatRes = await fetch(chatEndpoint, {
227
229
  method: 'POST',
@@ -312,7 +314,7 @@ export async function POST(req: Request) {
312
314
  const provider = clean(body.provider) as SetupProvider
313
315
  let apiKey = clean(body.apiKey)
314
316
  const credentialId = clean(body.credentialId)
315
- const endpoint = clean(body.endpoint)
317
+ let endpoint = clean(body.endpoint)
316
318
  const model = clean(body.model)
317
319
  const CLI_PROVIDERS = new Set<CliSetupProvider>(['claude-cli', 'codex-cli', 'opencode-cli', 'gemini-cli', 'copilot-cli', 'droid-cli', 'cursor-cli', 'qwen-code-cli', 'goose'])
318
320
 
@@ -329,6 +331,30 @@ export async function POST(req: Request) {
329
331
  }
330
332
  }
331
333
 
334
+ // Auto-resolve credential by provider when no explicit credentialId
335
+ if (!apiKey && !credentialId && provider) {
336
+ try {
337
+ const credIds = listCredentialIdsByProvider(provider)
338
+ if (credIds.length > 0) {
339
+ const creds = loadCredentials()
340
+ for (const cid of credIds) {
341
+ if (creds[cid]?.encryptedKey) {
342
+ try { apiKey = decryptKey(creds[cid].encryptedKey); break } catch { /* skip */ }
343
+ }
344
+ }
345
+ }
346
+ } catch { /* best effort */ }
347
+ }
348
+
349
+ // Auto-resolve endpoint from provider config when not explicitly provided
350
+ if (!endpoint && provider) {
351
+ try {
352
+ const pConfigs = loadProviderConfigs()
353
+ const pConfig = pConfigs[provider]
354
+ if (pConfig?.baseUrl) endpoint = pConfig.baseUrl
355
+ } catch { /* best effort */ }
356
+ }
357
+
332
358
  if (CLI_PROVIDERS.has(provider as CliSetupProvider)) {
333
359
  const result = checkCliProvider(provider as CliSetupProvider)
334
360
  return NextResponse.json(result)
@@ -354,7 +380,7 @@ export async function POST(req: Request) {
354
380
  }
355
381
  case 'anthropic': {
356
382
  if (!apiKey) return NextResponse.json({ ok: false, message: 'Anthropic API key is required.' })
357
- const result = await checkAnthropic(apiKey, model)
383
+ const result = await checkAnthropic(apiKey, endpoint, model)
358
384
  return NextResponse.json(result)
359
385
  }
360
386
  case 'google':
@@ -1656,7 +1656,7 @@ export function AgentSheet() {
1656
1656
  </div>
1657
1657
  )}
1658
1658
 
1659
- {currentProvider?.requiresEndpoint && (provider !== 'ollama' || ollamaMode === 'local') && (
1659
+ {(currentProvider?.requiresEndpoint || currentProvider?.optionalEndpoint) && (provider !== 'ollama' || ollamaMode === 'local') && (
1660
1660
  <div className="mb-8">
1661
1661
  <SectionLabel>{provider === 'openclaw' ? 'OpenClaw Endpoint' : provider === 'hermes' ? 'Hermes API Endpoint' : 'Endpoint'}</SectionLabel>
1662
1662
  <input type="text" value={apiEndpoint || ''} onChange={(e) => setApiEndpoint(e.target.value || null)} placeholder={currentProvider.defaultEndpoint || 'http://localhost:11434'} className={`${inputClass} font-mono text-[14px]`} />
@@ -53,6 +53,7 @@ export function ProviderSheet() {
53
53
  // Test connection state
54
54
  const [testStatus, setTestStatus] = useState<'idle' | 'testing' | 'pass' | 'fail'>('idle')
55
55
  const [testMessage, setTestMessage] = useState('')
56
+ const [testModel, setTestModel] = useState('')
56
57
 
57
58
  const [liveModels, setLiveModels] = useState<string[]>([])
58
59
  const [liveLoading, setLiveLoading] = useState(false)
@@ -85,7 +86,7 @@ export function ProviderSheet() {
85
86
  setIsEnabled(editingCustom.isEnabled)
86
87
  } else if (editingBuiltin) {
87
88
  setName(editingBuiltin.name)
88
- setBaseUrl(editingBuiltin.defaultEndpoint || '')
89
+ setBaseUrl(editingBuiltinOverride?.baseUrl || editingBuiltin.defaultEndpoint || '')
89
90
  setModels(editingBuiltin.models.join(', '))
90
91
  setRequiresApiKey(editingBuiltin.requiresApiKey)
91
92
  // Default to existing credential for this provider
@@ -113,6 +114,7 @@ export function ProviderSheet() {
113
114
  setLiveModels([])
114
115
  setLiveMessage('')
115
116
  setLiveCached(false)
117
+ setTestModel('')
116
118
  }, [editingId, credentialId, baseUrl, requiresApiKey])
117
119
 
118
120
  const handleTestConnection = async () => {
@@ -124,6 +126,7 @@ export function ProviderSheet() {
124
126
  provider: editingId || 'custom',
125
127
  credentialId,
126
128
  endpoint: baseUrl,
129
+ model: testModel || undefined,
127
130
  })
128
131
  if (result.ok) {
129
132
  setTestStatus('pass')
@@ -157,6 +160,7 @@ export function ProviderSheet() {
157
160
  id: editingId || '',
158
161
  models: modelList,
159
162
  isEnabled,
163
+ baseUrl: baseUrl.trim() || undefined,
160
164
  })
161
165
  toast.success('Built-in provider updated')
162
166
  onClose()
@@ -290,7 +294,7 @@ export function ProviderSheet() {
290
294
  </div>
291
295
 
292
296
  {/* Base URL — for custom providers and built-ins with endpoints (Ollama, OpenClaw) */}
293
- {(!isBuiltin || editingBuiltin?.requiresEndpoint) && (
297
+ {(!isBuiltin || editingBuiltin?.requiresEndpoint || editingBuiltin?.optionalEndpoint) && (
294
298
  <div className="mb-8">
295
299
  <label className="block font-display text-[12px] font-600 text-text-2 uppercase tracking-[0.08em] mb-3">
296
300
  {isBuiltin ? 'Endpoint' : 'Base URL'}
@@ -514,6 +518,27 @@ export function ProviderSheet() {
514
518
  </div>
515
519
  )}
516
520
 
521
+ {/* Test model selector */}
522
+ {showTestButton && (
523
+ <div className="mb-4">
524
+ <label className="block font-display text-[12px] font-600 text-text-2 uppercase tracking-[0.08em] mb-3">
525
+ Test Model
526
+ <span className="normal-case tracking-normal font-normal text-text-3 ml-1">(optional)</span>
527
+ </label>
528
+ <select
529
+ value={testModel}
530
+ onChange={(e) => { setTestModel(e.target.value); setTestStatus('idle'); setTestMessage('') }}
531
+ className={`${inputClass} appearance-none cursor-pointer`}
532
+ style={{ fontFamily: 'inherit' }}
533
+ >
534
+ <option value="">Auto-detect</option>
535
+ {modelList.map((m) => (
536
+ <option key={m} value={m}>{m}</option>
537
+ ))}
538
+ </select>
539
+ </div>
540
+ )}
541
+
517
542
  {/* Test connection result */}
518
543
  {isBuiltin && testStatus === 'fail' && (
519
544
  <div className="mb-4 p-3 rounded-[12px] bg-red-500/[0.08] border border-red-500/20">
@@ -25,6 +25,7 @@ interface SaveBuiltinProviderInput {
25
25
  id: string
26
26
  models: string[]
27
27
  isEnabled: boolean
28
+ baseUrl?: string
28
29
  }
29
30
 
30
31
  interface SaveCustomProviderInput {
@@ -36,6 +37,7 @@ interface CheckProviderConnectionInput {
36
37
  provider: string
37
38
  credentialId?: string | null
38
39
  endpoint?: string | null
40
+ model?: string | null
39
41
  }
40
42
 
41
43
  async function invalidateProviderQueries(queryClient: ReturnType<typeof useQueryClient>) {
@@ -80,11 +82,12 @@ export function useToggleProviderMutation() {
80
82
  export function useSaveBuiltinProviderMutation() {
81
83
  const queryClient = useQueryClient()
82
84
  return useMutation({
83
- mutationFn: async ({ id, models, isEnabled }: SaveBuiltinProviderInput) => {
85
+ mutationFn: async ({ id, models, isEnabled, baseUrl }: SaveBuiltinProviderInput) => {
84
86
  await api('PUT', `/providers/${id}/models`, { models })
85
87
  return api('PUT', `/providers/${id}`, {
86
88
  type: 'builtin',
87
89
  isEnabled,
90
+ ...(baseUrl ? { baseUrl } : {}),
88
91
  })
89
92
  },
90
93
  onSuccess: async () => {
@@ -126,11 +129,12 @@ export function useResetProviderModelsMutation() {
126
129
 
127
130
  export function useCheckProviderConnectionMutation() {
128
131
  return useMutation({
129
- mutationFn: ({ provider, credentialId, endpoint }: CheckProviderConnectionInput) =>
132
+ mutationFn: ({ provider, credentialId, endpoint, model }: CheckProviderConnectionInput) =>
130
133
  api<{ ok: boolean; message: string }>('POST', '/setup/check-provider', {
131
134
  provider,
132
135
  credentialId,
133
136
  endpoint,
137
+ model,
134
138
  }),
135
139
  })
136
140
  }
@@ -9,6 +9,7 @@ export interface AgentSelectableProvider {
9
9
  requiresApiKey: boolean
10
10
  optionalApiKey?: boolean
11
11
  requiresEndpoint: boolean
12
+ optionalEndpoint?: boolean
12
13
  defaultEndpoint?: string
13
14
  credentialId?: string | null
14
15
  type: 'builtin' | 'custom'
@@ -1,5 +1,4 @@
1
1
  import fs from 'fs'
2
- import https from 'https'
3
2
  import type { StreamChatOptions } from './index'
4
3
  import { PROVIDER_DEFAULTS, IMAGE_EXTS, TEXT_EXTS, ANTHROPIC_MAX_TOKENS, MAX_HISTORY_MESSAGES, writeSSE } from './provider-defaults'
5
4
  import { log } from '@/lib/server/logger'
@@ -45,55 +44,66 @@ export function streamAnthropicChat({ session, message, imagePath, apiKey, syste
45
44
  }
46
45
 
47
46
  const payload = JSON.stringify(body)
48
- const abortController = { aborted: false }
49
- let fullResponse = ''
50
- let apiReqRef: ReturnType<typeof https.request> | null = null
51
47
 
48
+ // Support custom base URL (e.g. proxy / gateway)
49
+ const baseUrl = (session.apiEndpoint || PROVIDER_DEFAULTS.anthropic).replace(/\/+$/, '')
50
+ const url = `${baseUrl}/v1/messages`
51
+
52
+ const abortController = new AbortController()
52
53
  if (signal) {
53
- if (signal.aborted) {
54
- abortController.aborted = true
55
- } else {
56
- signal.addEventListener('abort', () => {
57
- abortController.aborted = true
58
- apiReqRef?.destroy()
59
- }, { once: true })
60
- }
54
+ if (signal.aborted) abortController.abort()
55
+ else signal.addEventListener('abort', () => abortController.abort(), { once: true })
61
56
  }
57
+ active.set(session.id, { kill: () => abortController.abort() })
62
58
 
63
- const apiReq = https.request({
64
- hostname: PROVIDER_DEFAULTS.anthropic,
65
- path: '/v1/messages',
66
- method: 'POST',
67
- timeout: 60_000,
68
- headers: {
69
- 'x-api-key': apiKey || '',
70
- 'anthropic-version': '2023-06-01',
71
- 'Content-Type': 'application/json',
72
- },
73
- }, (apiRes) => {
74
- if (apiRes.statusCode !== 200) {
75
- let errBody = ''
76
- apiRes.on('data', (c: Buffer) => errBody += c)
77
- apiRes.on('end', () => {
78
- const msg = `Anthropic error ${apiRes.statusCode}: ${errBody.slice(0, 200)}`
79
- log.error(TAG, `[${session.id}] ${msg}`)
80
- let errMsg = `Anthropic API error (${apiRes.statusCode})`
81
- try {
82
- const parsed = JSON.parse(errBody)
83
- if (parsed.error?.message) errMsg = parsed.error.message
84
- } catch {}
85
- writeSSE(write, 'err', errMsg)
86
- active.delete(session.id)
87
- reject(new Error(msg))
88
- })
59
+ let fullResponse = ''
60
+
61
+ try {
62
+ const res = await fetch(url, {
63
+ method: 'POST',
64
+ headers: {
65
+ 'x-api-key': apiKey || '',
66
+ 'anthropic-version': '2023-06-01',
67
+ 'Content-Type': 'application/json',
68
+ },
69
+ body: payload,
70
+ signal: abortController.signal,
71
+ })
72
+
73
+ if (!res.ok) {
74
+ const errBody = await res.text().catch(() => '')
75
+ const msg = `Anthropic error ${res.status}: ${errBody.slice(0, 200)}`
76
+ log.error(TAG, `[${session.id}] ${msg}`)
77
+ let errMsg = `Anthropic API error (${res.status})`
78
+ try {
79
+ const parsed = JSON.parse(errBody)
80
+ if (parsed.error?.message) errMsg = parsed.error.message
81
+ } catch {}
82
+ writeSSE(write, 'err', errMsg)
83
+ active.delete(session.id)
84
+ reject(new Error(msg))
85
+ return
86
+ }
87
+
88
+ if (!res.body) {
89
+ const msg = `No response body from ${baseUrl}`
90
+ log.error(TAG, `[${session.id}] ${msg}`)
91
+ active.delete(session.id)
92
+ reject(new Error(msg))
89
93
  return
90
94
  }
91
95
 
96
+ const reader = res.body.getReader()
97
+ const decoder = new TextDecoder()
92
98
  let buf = ''
93
99
  let malformedChunkLogged = false
94
- apiRes.on('data', (chunk: Buffer) => {
95
- if (abortController.aborted) return
96
- buf += chunk.toString()
100
+
101
+ while (true) {
102
+ const { done, value } = await reader.read()
103
+ if (done) break
104
+ if (abortController.signal.aborted) break
105
+
106
+ buf += decoder.decode(value, { stream: true })
97
107
  const lines = buf.split('\n')
98
108
  buf = lines.pop()!
99
109
 
@@ -122,33 +132,21 @@ export function streamAnthropicChat({ session, message, imagePath, apiKey, syste
122
132
  }
123
133
  }
124
134
  }
125
- })
126
-
127
- apiRes.on('end', () => {
128
- if (onUsage && (usageInput > 0 || usageOutput > 0)) {
129
- onUsage({ inputTokens: usageInput, outputTokens: usageOutput })
130
- }
131
- active.delete(session.id)
132
- resolve(fullResponse)
133
- })
134
- })
135
-
136
- apiReqRef = apiReq
137
- active.set(session.id, { kill: () => { abortController.aborted = true; apiReq.destroy() } })
138
-
139
- apiReq.on('timeout', () => {
140
- log.error(TAG, `[${session.id}] anthropic request timed out after 60s`)
141
- apiReq.destroy(new Error('Request timed out after 60s'))
142
- })
135
+ }
143
136
 
144
- apiReq.on('error', (e) => {
145
- log.error(TAG, `[${session.id}] anthropic request error:`, e.message)
146
- writeSSE(write, 'err', e.message)
147
- active.delete(session.id)
148
- reject(e)
149
- })
137
+ if (onUsage && (usageInput > 0 || usageOutput > 0)) {
138
+ onUsage({ inputTokens: usageInput, outputTokens: usageOutput })
139
+ }
140
+ } catch (err: unknown) {
141
+ const errObj = err as { name?: string; message?: string }
142
+ if (errObj.name !== 'AbortError') {
143
+ log.error(TAG, `[${session.id}] anthropic fetch error:`, errObj.message || '')
144
+ writeSSE(write, 'err', errObj.message || 'Anthropic request failed')
145
+ }
146
+ }
150
147
 
151
- apiReq.end(payload)
148
+ active.delete(session.id)
149
+ resolve(fullResponse)
152
150
  } catch (err) { reject(err) }
153
151
  })()
154
152
  })
@@ -74,6 +74,8 @@ export const PROVIDERS: Record<string, BuiltinProviderConfig> = {
74
74
  models: ['gpt-5.4', 'gpt-5.4-mini', 'gpt-5.4-nano', 'gpt-5.3', 'o3-mini', 'gpt-4.1', 'gpt-4.1-mini'],
75
75
  requiresApiKey: true,
76
76
  requiresEndpoint: false,
77
+ optionalEndpoint: true,
78
+ defaultEndpoint: 'https://api.openai.com/v1',
77
79
  handler: { streamChat: streamOpenAiChat },
78
80
  },
79
81
  openrouter: {
@@ -107,7 +109,17 @@ export const PROVIDERS: Record<string, BuiltinProviderConfig> = {
107
109
  models: ['claude-opus-4-6', 'claude-sonnet-4-6', 'claude-haiku-4-5'],
108
110
  requiresApiKey: true,
109
111
  requiresEndpoint: false,
110
- handler: { streamChat: streamAnthropicChat },
112
+ optionalEndpoint: true,
113
+ defaultEndpoint: 'https://api.anthropic.com',
114
+ handler: {
115
+ streamChat: (opts) => {
116
+ const patchedSession = {
117
+ ...opts.session,
118
+ apiEndpoint: opts.session.apiEndpoint || 'https://api.anthropic.com',
119
+ }
120
+ return streamAnthropicChat({ ...opts, session: patchedSession })
121
+ },
122
+ },
111
123
  },
112
124
  openclaw: {
113
125
  id: 'openclaw',
@@ -518,7 +530,28 @@ function buildCustomProviderConfig(custom: CustomProviderConfig): BuiltinProvide
518
530
  }
519
531
 
520
532
  export function getProvider(id: string): BuiltinProviderConfig | null {
521
- if (PROVIDERS[id]) return PROVIDERS[id]
533
+ // Check builtin providers — inject custom baseUrl from provider config if set
534
+ const builtin = PROVIDERS[id]
535
+ if (builtin) {
536
+ const pConfigs = loadProviderConfigs()
537
+ const pConfig = pConfigs[id]
538
+ if (pConfig?.baseUrl && pConfig.baseUrl !== builtin.defaultEndpoint) {
539
+ const originalHandler = builtin.handler
540
+ return {
541
+ ...builtin,
542
+ handler: {
543
+ streamChat: (opts) => {
544
+ const patchedSession = {
545
+ ...opts.session,
546
+ apiEndpoint: opts.session.apiEndpoint || pConfig.baseUrl,
547
+ }
548
+ return originalHandler.streamChat({ ...opts, session: patchedSession })
549
+ },
550
+ },
551
+ }
552
+ }
553
+ return builtin
554
+ }
522
555
 
523
556
  // Check custom providers
524
557
  const customs = getCustomProviders()
@@ -1,7 +1,7 @@
1
1
  /** Default base URLs for built-in LLM providers */
2
2
  export const PROVIDER_DEFAULTS = {
3
3
  openai: 'https://api.openai.com/v1',
4
- anthropic: 'api.anthropic.com',
4
+ anthropic: 'https://api.anthropic.com',
5
5
  ollama: 'http://localhost:11434',
6
6
  ollamaCloud: 'https://ollama.com',
7
7
  } as const
@@ -79,6 +79,7 @@ export function buildChatModel(opts: {
79
79
  const anthropicOpts: Record<string, unknown> = {
80
80
  model: model || 'claude-sonnet-4-6',
81
81
  anthropicApiKey: resolvedApiKey || undefined,
82
+ ...(endpoint ? { anthropicApiUrl: endpoint } : {}),
82
83
  maxTokens: 8192,
83
84
  maxRetries: OPENAI_COMPAT_MODEL_MAX_RETRIES,
84
85
  }
@@ -3,6 +3,7 @@ import { getProvider } from '@/lib/providers'
3
3
  import { loadCredential } from '@/lib/server/credentials/credential-repository'
4
4
  import { listCredentialIdsByProvider, resolveCredentialSecret } from '@/lib/server/credentials/credential-service'
5
5
  import { resolveOllamaRuntimeConfig } from '@/lib/server/ollama-runtime'
6
+ import { loadProviderConfigs } from '@/lib/server/storage'
6
7
 
7
8
  function clean(value: string | null | undefined): string | null {
8
9
  if (typeof value !== 'string') return null
@@ -16,7 +17,23 @@ export function resolveProviderCredentialId(input: {
16
17
  credentialId?: string | null
17
18
  }): string | null {
18
19
  const normalizedId = clean(input.credentialId)
19
- if (!normalizedId) return null
20
+
21
+ // When no credentialId provided, auto-match by provider
22
+ if (!normalizedId) {
23
+ const provider = clean(input.provider)
24
+ if (!provider) return null
25
+ const byProvider = listCredentialIdsByProvider(provider)
26
+ .map((id) => [id, loadCredential(id)] as const)
27
+ .filter(([, cred]) => Boolean(cred))
28
+ if (byProvider.length === 1) return byProvider[0][0]
29
+ if (byProvider.length > 1) {
30
+ // Pick the most recently created credential
31
+ return [...byProvider]
32
+ .sort((a, b) => ((b[1]?.createdAt as number) || 0) - ((a[1]?.createdAt as number) || 0))[0]?.[0] || null
33
+ }
34
+ return null
35
+ }
36
+
20
37
  if (loadCredential(normalizedId)) return normalizedId
21
38
 
22
39
  const provider = clean(input.provider)
@@ -71,6 +88,15 @@ export function resolveProviderApiEndpoint(input: {
71
88
  return normalizeProviderEndpoint(provider, runtime.endpoint) || runtime.endpoint.replace(/\/+$/, '')
72
89
  }
73
90
 
91
+ // Prefer provider config's custom baseUrl over the hardcoded defaultEndpoint
92
+ const pConfigs = loadProviderConfigs()
93
+ const pConfig = pConfigs[provider]
94
+ if (pConfig?.baseUrl) {
95
+ const customNormalized = normalizeProviderEndpoint(provider, pConfig.baseUrl)
96
+ if (customNormalized) return customNormalized
97
+ return pConfig.baseUrl.replace(/\/+$/, '')
98
+ }
99
+
74
100
  const providerInfo = getProvider(provider)
75
101
  if (!providerInfo?.defaultEndpoint) return null
76
102
  return normalizeProviderEndpoint(provider, providerInfo.defaultEndpoint) || providerInfo.defaultEndpoint.replace(/\/+$/, '')
@@ -10,6 +10,8 @@ export interface ProviderInfo {
10
10
  requiresApiKey: boolean
11
11
  optionalApiKey?: boolean
12
12
  requiresEndpoint: boolean
13
+ /** When true, shows an optional Base URL field in provider settings (e.g. for proxies). */
14
+ optionalEndpoint?: boolean
13
15
  defaultEndpoint?: string
14
16
  }
15
17