@khanglvm/ai-router 1.0.2 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -32,11 +32,10 @@ ai-router setup
32
32
  ```
33
33
 
34
34
  This command:
35
- - asks for provider name/base URL/API key
35
+ - asks for provider name + API key, then endpoint candidates + model list
36
36
  - requires a provider id (slug/camelCase, e.g. `openrouter` or `myProvider`)
37
- - probes the provider with live requests
38
- - auto-detects supported format(s): `openai` and/or `claude`
39
- - tries to discover model list
37
+ - probes endpoint(s) x model(s) with live requests
38
+ - auto-detects supported format(s) per endpoint and model support per format
40
39
  - saves config to `~/.ai-router.json`
41
40
 
42
41
  ### Non-interactive setup
@@ -46,12 +45,14 @@ npx ai-router-proxy setup \
46
45
  --operation=upsert-provider \
47
46
  --provider-id=openrouter \
48
47
  --name=OpenRouter \
49
- --base-url=https://openrouter.ai/api/v1 \
50
48
  --api-key=sk-... \
49
+ --endpoints=https://openrouter.ai/api/v1 \
51
50
  --models=gpt-4o,claude-3-5-sonnet-latest \
52
51
  --headers='{"User-Agent":"Mozilla/5.0"}'
53
52
  ```
54
53
 
54
+ If `--headers` is omitted, setup saves a default `User-Agent` header to reduce provider compatibility issues. To remove it explicitly, set `--headers='{\"User-Agent\":null}'`.
55
+
55
56
  ### Start local proxy (default command)
56
57
 
57
58
  ```bash
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@khanglvm/ai-router",
3
- "version": "1.0.2",
3
+ "version": "1.0.3",
4
4
  "description": "Generic AI Router Proxy (local + Cloudflare Worker)",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -14,11 +14,12 @@ import {
14
14
  removeProvider,
15
15
  writeConfigFile
16
16
  } from "../node/config-store.js";
17
- import { probeProvider } from "../node/provider-probe.js";
17
+ import { probeProvider, probeProviderEndpointMatrix } from "../node/provider-probe.js";
18
18
  import { runStartCommand } from "../node/start-command.js";
19
19
  import { installStartup, startupStatus, uninstallStartup } from "../node/startup-manager.js";
20
20
  import {
21
21
  configHasProvider,
22
+ DEFAULT_PROVIDER_USER_AGENT,
22
23
  maskSecret,
23
24
  PROVIDER_ID_PATTERN,
24
25
  sanitizeConfigForDisplay
@@ -68,6 +69,29 @@ function parseJsonObjectArg(value, fieldName) {
68
69
  }
69
70
  }
70
71
 
72
+ function hasHeaderName(headers, name) {
73
+ const lower = String(name).toLowerCase();
74
+ return Object.keys(headers || {}).some((key) => key.toLowerCase() === lower);
75
+ }
76
+
77
+ function applyDefaultHeaders(headers, { force = true } = {}) {
78
+ const source = headers && typeof headers === "object" && !Array.isArray(headers) ? headers : {};
79
+ const next = { ...source };
80
+ if (force && !hasHeaderName(next, "user-agent")) {
81
+ next["User-Agent"] = DEFAULT_PROVIDER_USER_AGENT;
82
+ }
83
+ return next;
84
+ }
85
+
86
+ function providerEndpointsFromConfig(provider) {
87
+ const values = [
88
+ provider?.baseUrlByFormat?.openai,
89
+ provider?.baseUrlByFormat?.claude,
90
+ provider?.baseUrl
91
+ ];
92
+ return parseModelListInput(values.filter(Boolean).join(","));
93
+ }
94
+
71
95
  async function promptProviderFormat(context, {
72
96
  message = "Primary provider format",
73
97
  initialFormat = ""
@@ -215,6 +239,11 @@ async function resolveUpsertInput(context, existingConfig) {
215
239
  const baseProviderId = argProviderId || selectedExisting?.id || "";
216
240
  const baseName = String(readArg(args, ["name"], selectedExisting?.name || "") || "");
217
241
  const baseUrl = String(readArg(args, ["base-url", "baseUrl"], selectedExisting?.baseUrl || "") || "");
242
+ const baseEndpoints = parseModelListInput(readArg(
243
+ args,
244
+ ["endpoints"],
245
+ providerEndpointsFromConfig(selectedExisting).join(",")
246
+ ));
218
247
  const baseOpenAIBaseUrl = String(readArg(
219
248
  args,
220
249
  ["openai-base-url", "openaiBaseUrl"],
@@ -229,11 +258,15 @@ async function resolveUpsertInput(context, existingConfig) {
229
258
  const baseModels = String(readArg(args, ["models"], (selectedExisting?.models || []).map((m) => m.id).join(",")) || "");
230
259
  const baseFormat = String(readArg(args, ["format"], selectedExisting?.format || "") || "");
231
260
  const baseFormats = parseModelListInput(readArg(args, ["formats"], (selectedExisting?.formats || []).join(",")));
261
+ const hasHeadersArg = args.headers !== undefined;
232
262
  const baseHeaders = readArg(args, ["headers"], selectedExisting?.headers ? JSON.stringify(selectedExisting.headers) : "");
233
263
  const shouldProbe = !toBoolean(readArg(args, ["skip-probe", "skipProbe"], false), false);
234
264
  const setMasterKeyFlag = toBoolean(readArg(args, ["set-master-key", "setMasterKey"], false), false);
235
265
  const providedMasterKey = String(readArg(args, ["master-key", "masterKey"], "") || "");
236
- const parsedHeaders = parseJsonObjectArg(baseHeaders, "--headers");
266
+ const parsedHeaders = applyDefaultHeaders(
267
+ parseJsonObjectArg(baseHeaders, "--headers"),
268
+ { force: !hasHeadersArg }
269
+ );
237
270
 
238
271
  if (!canPrompt()) {
239
272
  return {
@@ -241,6 +274,7 @@ async function resolveUpsertInput(context, existingConfig) {
241
274
  providerId: baseProviderId || slugifyId(baseName || "provider"),
242
275
  name: baseName,
243
276
  baseUrl,
277
+ endpoints: baseEndpoints,
244
278
  openaiBaseUrl: baseOpenAIBaseUrl,
245
279
  claudeBaseUrl: baseClaudeBaseUrl,
246
280
  apiKey: baseApiKey || selectedExisting?.apiKey || "",
@@ -266,22 +300,6 @@ async function resolveUpsertInput(context, existingConfig) {
266
300
  initialValue: slugifyId(name)
267
301
  });
268
302
 
269
- const url = baseUrl || await context.prompts.text({
270
- message: "Provider base URL (shared fallback, optional)",
271
- required: false,
272
- placeholder: "https://api.example.com/v1"
273
- });
274
-
275
- const openaiBaseUrl = String(await context.prompts.text({
276
- message: "OpenAI base URL override (optional)",
277
- initialValue: baseOpenAIBaseUrl
278
- }) || "");
279
-
280
- const claudeBaseUrl = String(await context.prompts.text({
281
- message: "Anthropic/Claude base URL override (optional)",
282
- initialValue: baseClaudeBaseUrl
283
- }) || "");
284
-
285
303
  const askReplaceKey = selectedExisting?.apiKey ? await context.prompts.confirm({
286
304
  message: "Replace saved API key?",
287
305
  initialValue: false
@@ -292,14 +310,30 @@ async function resolveUpsertInput(context, existingConfig) {
292
310
  required: true
293
311
  });
294
312
 
313
+ const endpointsInput = await context.prompts.text({
314
+ message: "Provider endpoints (comma separated)",
315
+ required: true,
316
+ initialValue: baseEndpoints.join(",")
317
+ });
318
+ const endpoints = parseModelListInput(endpointsInput);
319
+
320
+ const modelsInput = await context.prompts.text({
321
+ message: "Provider models (comma separated)",
322
+ required: true,
323
+ initialValue: baseModels
324
+ });
325
+
295
326
  const headersInput = await context.prompts.text({
296
- message: "Custom headers JSON (optional)",
297
- initialValue: String(baseHeaders || "")
327
+ message: "Custom headers JSON (optional; default User-Agent included)",
328
+ initialValue: JSON.stringify(applyDefaultHeaders(
329
+ parseJsonObjectArg(baseHeaders, "Custom headers"),
330
+ { force: true }
331
+ ))
298
332
  });
299
333
  const interactiveHeaders = parseJsonObjectArg(headersInput, "Custom headers");
300
334
 
301
335
  const probe = await context.prompts.confirm({
302
- message: "Auto-detect format and models via live probe?",
336
+ message: "Auto-detect endpoint formats and model support via live probe?",
303
337
  initialValue: shouldProbe
304
338
  });
305
339
 
@@ -311,11 +345,6 @@ async function resolveUpsertInput(context, existingConfig) {
311
345
  });
312
346
  }
313
347
 
314
- const modelsInput = await context.prompts.text({
315
- message: "Model list (comma separated, leave empty to use probe discovery)",
316
- initialValue: baseModels
317
- });
318
-
319
348
  const setMasterKey = setMasterKeyFlag || await context.prompts.confirm({
320
349
  message: "Set/update worker master key?",
321
350
  initialValue: false
@@ -332,9 +361,10 @@ async function resolveUpsertInput(context, existingConfig) {
332
361
  configPath,
333
362
  providerId,
334
363
  name,
335
- baseUrl: url,
336
- openaiBaseUrl,
337
- claudeBaseUrl,
364
+ baseUrl,
365
+ endpoints,
366
+ openaiBaseUrl: baseOpenAIBaseUrl,
367
+ claudeBaseUrl: baseClaudeBaseUrl,
338
368
  apiKey,
339
369
  models: parseModelListInput(modelsInput),
340
370
  format: probe ? "" : manualFormat,
@@ -351,13 +381,19 @@ async function doUpsertProvider(context) {
351
381
  const existingConfig = await readConfigFile(configPath);
352
382
  const input = await resolveUpsertInput(context, existingConfig);
353
383
 
354
- const hasAnyEndpoint = Boolean(input.baseUrl || input.openaiBaseUrl || input.claudeBaseUrl);
384
+ const endpointCandidates = parseModelListInput([
385
+ ...(input.endpoints || []),
386
+ input.openaiBaseUrl,
387
+ input.claudeBaseUrl,
388
+ input.baseUrl
389
+ ].filter(Boolean).join(","));
390
+ const hasAnyEndpoint = endpointCandidates.length > 0;
355
391
  if (!input.name || !hasAnyEndpoint || !input.apiKey) {
356
392
  return {
357
393
  ok: false,
358
394
  mode: context.mode,
359
395
  exitCode: EXIT_VALIDATION,
360
- errorMessage: "Missing provider inputs: provider-id, name, api-key, and at least one endpoint (base-url/openai-base-url/claude-base-url)."
396
+ errorMessage: "Missing provider inputs: provider-id, name, api-key, and at least one endpoint."
361
397
  };
362
398
  }
363
399
 
@@ -372,21 +408,59 @@ async function doUpsertProvider(context) {
372
408
 
373
409
  let probe = null;
374
410
  let selectedFormat = String(input.format || "").trim();
411
+ let effectiveBaseUrl = String(input.baseUrl || "").trim();
412
+ let effectiveOpenAIBaseUrl = String(input.openaiBaseUrl || "").trim();
413
+ let effectiveClaudeBaseUrl = String(input.claudeBaseUrl || "").trim();
414
+ let effectiveModels = [...(input.models || [])];
415
+
416
+ if (input.shouldProbe && endpointCandidates.length > 0 && effectiveModels.length === 0) {
417
+ return {
418
+ ok: false,
419
+ mode: context.mode,
420
+ exitCode: EXIT_VALIDATION,
421
+ errorMessage: "Model list is required for endpoint-model probe. Provide --models=modelA,modelB."
422
+ };
423
+ }
424
+
375
425
  if (input.shouldProbe) {
376
- const probeBaseUrlByFormat = {};
377
- if (input.openaiBaseUrl) probeBaseUrlByFormat.openai = input.openaiBaseUrl;
378
- if (input.claudeBaseUrl) probeBaseUrlByFormat.claude = input.claudeBaseUrl;
379
-
380
- probe = await probeProvider({
381
- baseUrl: input.baseUrl,
382
- baseUrlByFormat: Object.keys(probeBaseUrlByFormat).length > 0 ? probeBaseUrlByFormat : undefined,
383
- apiKey: input.apiKey,
384
- headers: input.headers
385
- });
426
+ const canRunMatrixProbe = endpointCandidates.length > 0 && effectiveModels.length > 0;
427
+ if (canRunMatrixProbe) {
428
+ probe = await probeProviderEndpointMatrix({
429
+ endpoints: endpointCandidates,
430
+ models: effectiveModels,
431
+ apiKey: input.apiKey,
432
+ headers: input.headers
433
+ });
434
+ effectiveOpenAIBaseUrl = probe.baseUrlByFormat?.openai || effectiveOpenAIBaseUrl;
435
+ effectiveClaudeBaseUrl = probe.baseUrlByFormat?.claude || effectiveClaudeBaseUrl;
436
+ effectiveBaseUrl =
437
+ (probe.preferredFormat && probe.baseUrlByFormat?.[probe.preferredFormat]) ||
438
+ effectiveOpenAIBaseUrl ||
439
+ effectiveClaudeBaseUrl ||
440
+ endpointCandidates[0] ||
441
+ effectiveBaseUrl;
442
+ if ((probe.models || []).length > 0) {
443
+ effectiveModels = effectiveModels.length > 0
444
+ ? effectiveModels.filter((model) => (probe.models || []).includes(model))
445
+ : [...probe.models];
446
+ }
447
+ } else {
448
+ const probeBaseUrlByFormat = {};
449
+ if (effectiveOpenAIBaseUrl) probeBaseUrlByFormat.openai = effectiveOpenAIBaseUrl;
450
+ if (effectiveClaudeBaseUrl) probeBaseUrlByFormat.claude = effectiveClaudeBaseUrl;
451
+
452
+ probe = await probeProvider({
453
+ baseUrl: effectiveBaseUrl || endpointCandidates[0],
454
+ baseUrlByFormat: Object.keys(probeBaseUrlByFormat).length > 0 ? probeBaseUrlByFormat : undefined,
455
+ apiKey: input.apiKey,
456
+ headers: input.headers
457
+ });
458
+ }
459
+
386
460
  if (!probe.ok) {
387
461
  if (canPrompt()) {
388
462
  const continueWithoutProbe = await context.prompts.confirm({
389
- message: "Probe failed to confirm a working format. Save provider anyway?",
463
+ message: "Probe failed to confirm working endpoint/model support. Save provider anyway?",
390
464
  initialValue: false
391
465
  });
392
466
  if (!continueWithoutProbe) {
@@ -407,7 +481,7 @@ async function doUpsertProvider(context) {
407
481
  ok: false,
408
482
  mode: context.mode,
409
483
  exitCode: EXIT_FAILURE,
410
- errorMessage: "Provider probe failed. Use --skip-probe=true to force save."
484
+ errorMessage: "Provider probe failed. Provide valid endpoints/models or use --skip-probe=true to force save."
411
485
  };
412
486
  }
413
487
  } else {
@@ -415,16 +489,34 @@ async function doUpsertProvider(context) {
415
489
  }
416
490
  }
417
491
 
492
+ if (!input.shouldProbe) {
493
+ if (!effectiveBaseUrl && endpointCandidates.length > 0) {
494
+ effectiveBaseUrl = endpointCandidates[0];
495
+ }
496
+ if (!effectiveOpenAIBaseUrl && !effectiveClaudeBaseUrl && endpointCandidates.length === 1 && selectedFormat) {
497
+ if (selectedFormat === "openai") effectiveOpenAIBaseUrl = endpointCandidates[0];
498
+ if (selectedFormat === "claude") effectiveClaudeBaseUrl = endpointCandidates[0];
499
+ }
500
+ if (!effectiveOpenAIBaseUrl && !effectiveClaudeBaseUrl && endpointCandidates.length > 1) {
501
+ return {
502
+ ok: false,
503
+ mode: context.mode,
504
+ exitCode: EXIT_VALIDATION,
505
+ errorMessage: "Multiple endpoints require probe mode (recommended) or explicit --openai-base-url/--claude-base-url."
506
+ };
507
+ }
508
+ }
509
+
418
510
  const effectiveFormat = selectedFormat || (input.shouldProbe ? "" : "openai");
419
511
 
420
512
  const provider = buildProviderFromSetupInput({
421
513
  providerId: input.providerId,
422
514
  name: input.name,
423
- baseUrl: input.baseUrl,
424
- openaiBaseUrl: input.openaiBaseUrl,
425
- claudeBaseUrl: input.claudeBaseUrl,
515
+ baseUrl: effectiveBaseUrl,
516
+ openaiBaseUrl: effectiveOpenAIBaseUrl,
517
+ claudeBaseUrl: effectiveClaudeBaseUrl,
426
518
  apiKey: input.apiKey,
427
- models: input.models,
519
+ models: effectiveModels,
428
520
  format: effectiveFormat,
429
521
  formats: input.formats,
430
522
  headers: input.headers,
@@ -456,8 +548,10 @@ async function doUpsertProvider(context) {
456
548
  probe
457
549
  ? `probe preferred=${probe.preferredFormat || "(none)"} working=${(probe.workingFormats || []).join(",") || "(none)"}`
458
550
  : "probe=skipped",
551
+ provider.baseUrlByFormat?.openai ? `openaiBaseUrl=${provider.baseUrlByFormat.openai}` : "",
552
+ provider.baseUrlByFormat?.claude ? `claudeBaseUrl=${provider.baseUrlByFormat.claude}` : "",
459
553
  `formats=${(provider.formats || []).join(", ") || provider.format || "unknown"}`,
460
- `models=${provider.models.map((m) => m.id).join(", ")}`,
554
+ `models=${provider.models.map((m) => `${m.id}${m.formats?.length ? `[${m.formats.join("|")}]` : ""}`).join(", ")}`,
461
555
  `masterKey=${nextConfig.masterKey ? maskSecret(nextConfig.masterKey) : "(not set)"}`
462
556
  ].join("\n")
463
557
  };
@@ -1076,6 +1170,7 @@ const routerModule = {
1076
1170
  "config",
1077
1171
  "provider-id",
1078
1172
  "name",
1173
+ "endpoints",
1079
1174
  "base-url",
1080
1175
  "openai-base-url",
1081
1176
  "claude-base-url",
@@ -1102,6 +1197,7 @@ const routerModule = {
1102
1197
  { name: "operation", required: false, description: "Setup operation (optional; prompts if omitted).", example: "--operation=upsert-provider" },
1103
1198
  { name: "provider-id", required: false, description: "Provider id (slug/camelCase).", example: "--provider-id=openrouter" },
1104
1199
  { name: "name", required: false, description: "Provider display name.", example: "--name=OpenRouter" },
1200
+ { name: "endpoints", required: false, description: "Comma-separated provider endpoint candidates for auto-probe.", example: "--endpoints=https://ramclouds.me,https://ramclouds.me/v1" },
1105
1201
  { name: "base-url", required: false, description: "Provider base URL.", example: "--base-url=https://openrouter.ai/api/v1" },
1106
1202
  { name: "openai-base-url", required: false, description: "OpenAI endpoint base URL (format-specific override).", example: "--openai-base-url=https://ramclouds.me/v1" },
1107
1203
  { name: "claude-base-url", required: false, description: "Anthropic endpoint base URL (format-specific override).", example: "--claude-base-url=https://ramclouds.me" },
@@ -1109,8 +1205,8 @@ const routerModule = {
1109
1205
  { name: "models", required: false, description: "Comma-separated model list.", example: "--models=gpt-4o,claude-3-5-sonnet-latest" },
1110
1206
  { name: "model", required: false, description: "Single model id (used by remove-model).", example: "--model=gpt-4o" },
1111
1207
  { name: "format", required: false, description: "Manual format if probe is skipped.", example: "--format=openai" },
1112
- { name: "headers", required: false, description: "Custom provider headers as JSON object.", example: "--headers={\"User-Agent\":\"Mozilla/5.0\"}" },
1113
- { name: "skip-probe", required: false, description: "Skip live provider probe.", example: "--skip-probe=true" },
1208
+ { name: "headers", required: false, description: "Custom provider headers as JSON object (default User-Agent applied when omitted).", example: "--headers={\"User-Agent\":\"Mozilla/5.0\"}" },
1209
+ { name: "skip-probe", required: false, description: "Skip live endpoint/model probe.", example: "--skip-probe=true" },
1114
1210
  { name: "master-key", required: false, description: "Worker auth token.", example: "--master-key=my-token" },
1115
1211
  { name: "watch-binary", required: false, description: "For startup-install: detect ai-router upgrades and auto-relaunch under OS startup.", example: "--watch-binary=true" },
1116
1212
  { name: "require-auth", required: false, description: "Require masterKey auth for local start/startup-install.", example: "--require-auth=true" },
@@ -1118,7 +1214,7 @@ const routerModule = {
1118
1214
  ],
1119
1215
  examples: [
1120
1216
  "ai-router setup",
1121
- "ai-router setup --operation=upsert-provider --provider-id=openrouter --name=OpenRouter --base-url=https://openrouter.ai/api/v1 --api-key=sk-...",
1217
+ "ai-router setup --operation=upsert-provider --provider-id=ramclouds --name=RamClouds --api-key=sk-... --endpoints=https://ramclouds.me,https://ramclouds.me/v1 --models=claude-opus-4-6-thinking,gpt-5.3-codex",
1122
1218
  "ai-router setup --operation=remove-model --provider-id=openrouter --model=gpt-4o",
1123
1219
  "ai-router setup --operation=startup-install"
1124
1220
  ],
@@ -41,14 +41,53 @@ export function parseModelListInput(raw) {
41
41
  }
42
42
 
43
43
  function normalizeModelArray(models) {
44
- return dedupe(models).map((id) => ({ id }));
44
+ const rows = Array.isArray(models) ? models : dedupe(models).map((id) => ({ id }));
45
+ return rows
46
+ .map((entry) => {
47
+ if (typeof entry === "string") return { id: entry };
48
+ if (!entry || typeof entry !== "object") return null;
49
+ const id = String(entry.id || entry.name || "").trim();
50
+ if (!id) return null;
51
+ const formats = dedupe(entry.formats || entry.format || []).filter((value) => value === "openai" || value === "claude");
52
+ return {
53
+ id,
54
+ ...(formats.length > 0 ? { formats } : {})
55
+ };
56
+ })
57
+ .filter(Boolean);
58
+ }
59
+
60
+ function buildModelsWithSupport(modelIds, modelSupport = {}) {
61
+ return normalizeModelArray(modelIds.map((id) => ({
62
+ id,
63
+ formats: modelSupport[id] || []
64
+ })));
65
+ }
66
+
67
+ function summarizeEndpointMatrix(endpointMatrix) {
68
+ if (!Array.isArray(endpointMatrix)) return undefined;
69
+ return endpointMatrix.map((row) => ({
70
+ endpoint: row.endpoint,
71
+ supportedFormats: row.supportedFormats || [],
72
+ workingFormats: row.workingFormats || [],
73
+ modelsByFormat: row.modelsByFormat || {},
74
+ authByFormat: row.authByFormat || {}
75
+ }));
45
76
  }
46
77
 
47
78
  export function buildProviderFromSetupInput(input) {
48
79
  const providerId = input.providerId || input.id || input.name;
49
80
  const baseUrlByFormat = normalizeBaseUrlByFormatInput(input);
50
- const explicitModels = normalizeModelArray(parseModelListInput(input.models));
51
- const probeModels = normalizeModelArray(input.probe?.models || []);
81
+ const explicitModelIds = parseModelListInput(input.models);
82
+ const probeModelSupport = input.probe?.modelSupport && typeof input.probe.modelSupport === "object"
83
+ ? input.probe.modelSupport
84
+ : {};
85
+ const explicitModels = explicitModelIds.length > 0
86
+ ? buildModelsWithSupport(explicitModelIds, probeModelSupport)
87
+ : [];
88
+ const probeModels = input.probe?.models?.length
89
+ ? buildModelsWithSupport(input.probe.models, probeModelSupport)
90
+ : [];
52
91
  const mergedModels = explicitModels.length > 0 ? explicitModels : probeModels;
53
92
  const endpointFormats = baseUrlByFormat ? Object.keys(baseUrlByFormat) : [];
54
93
 
@@ -86,7 +125,10 @@ export function buildProviderFromSetupInput(input) {
86
125
  at: new Date().toISOString(),
87
126
  formats: input.probe.formats || [],
88
127
  workingFormats: input.probe.workingFormats || [],
89
- models: input.probe.models || []
128
+ models: input.probe.models || [],
129
+ modelSupport: input.probe.modelSupport || undefined,
130
+ endpointMatrix: summarizeEndpointMatrix(input.probe.endpointMatrix),
131
+ warnings: input.probe.warnings || undefined
90
132
  }
91
133
  : undefined
92
134
  }]
@@ -171,6 +171,148 @@ function extractModelIds(result) {
171
171
  return [...new Set(ids)];
172
172
  }
173
173
 
174
+ function dedupeStrings(values) {
175
+ return [...new Set((values || []).filter(Boolean).map((value) => String(value).trim()).filter(Boolean))];
176
+ }
177
+
178
+ function orderAuthVariants(authVariants, preferredAuth) {
179
+ if (!preferredAuth || !Array.isArray(authVariants) || authVariants.length <= 1) return authVariants;
180
+ const normalized = String(preferredAuth).trim().toLowerCase();
181
+ const preferred = authVariants.find((item) => item.type === normalized);
182
+ if (!preferred) return authVariants;
183
+ return [preferred, ...authVariants.filter((item) => item !== preferred)];
184
+ }
185
+
186
+ function getResultMessage(result) {
187
+ return String(getErrorMessage(result.json, result.text) || "").trim();
188
+ }
189
+
190
+ function truncateMessage(value, max = 220) {
191
+ const text = String(value || "").trim();
192
+ if (!text) return "";
193
+ if (text.length <= max) return text;
194
+ return `${text.slice(0, max - 3)}...`;
195
+ }
196
+
197
+ function isUnsupportedModelMessage(message) {
198
+ const text = String(message || "").toLowerCase();
199
+ if (!text) return false;
200
+ const patterns = [
201
+ /model .*not found/,
202
+ /unknown model/,
203
+ /unsupported model/,
204
+ /invalid model/,
205
+ /no such model/,
206
+ /model .*does not exist/,
207
+ /model .*not available/,
208
+ /unrecognized model/
209
+ ];
210
+ return patterns.some((pattern) => pattern.test(text));
211
+ }
212
+
213
+ function looksExpectedFormat(format, result) {
214
+ if (format === FORMATS.CLAUDE) return looksClaude(result);
215
+ return looksOpenAI(result);
216
+ }
217
+
218
+ function buildProbeRequest(format, modelId) {
219
+ if (format === FORMATS.CLAUDE) {
220
+ return {
221
+ model: modelId,
222
+ max_tokens: 1,
223
+ stream: false,
224
+ messages: [{ role: "user", content: "ping" }]
225
+ };
226
+ }
227
+
228
+ return {
229
+ model: modelId,
230
+ messages: [{ role: "user", content: "ping" }],
231
+ max_tokens: 1,
232
+ stream: false
233
+ };
234
+ }
235
+
236
+ function makeProbeHeaders(format, extraHeaders, authHeaders = {}) {
237
+ const headers = {
238
+ "Content-Type": "application/json",
239
+ ...extraHeaders,
240
+ ...authHeaders
241
+ };
242
+ if (format === FORMATS.CLAUDE) {
243
+ if (!headers["anthropic-version"] && !headers["Anthropic-Version"]) {
244
+ headers["anthropic-version"] = "2023-06-01";
245
+ }
246
+ }
247
+ return headers;
248
+ }
249
+
250
+ function modelLooksSupported(format, result) {
251
+ if (result.ok) return true;
252
+ if (!looksExpectedFormat(format, result)) return false;
253
+ if (!authLooksValid(result)) return false;
254
+
255
+ const message = getResultMessage(result);
256
+ if (isUnsupportedModelMessage(message)) return false;
257
+ return true;
258
+ }
259
+
260
+ async function probeModelForFormat({
261
+ baseUrl,
262
+ format,
263
+ apiKey,
264
+ modelId,
265
+ timeoutMs,
266
+ extraHeaders,
267
+ preferredAuthType
268
+ }) {
269
+ const url = resolveProviderUrl(makeProviderShell(baseUrl), format);
270
+ const authVariants = orderAuthVariants(makeAuthVariants(format, apiKey), preferredAuthType);
271
+
272
+ for (const variant of authVariants) {
273
+ const headers = makeProbeHeaders(format, extraHeaders, variant.headers);
274
+ const result = await safeFetchJson(url, {
275
+ method: "POST",
276
+ headers,
277
+ body: JSON.stringify(buildProbeRequest(format, modelId))
278
+ }, timeoutMs);
279
+
280
+ if (modelLooksSupported(format, result)) {
281
+ return {
282
+ supported: true,
283
+ authType: variant.type,
284
+ status: result.status,
285
+ message: result.ok ? "ok" : truncateMessage(getResultMessage(result)),
286
+ error: result.error || null
287
+ };
288
+ }
289
+
290
+ if (!authLooksValid(result)) {
291
+ continue;
292
+ }
293
+
294
+ // Endpoint/format looks valid and auth seems valid, model is likely not available here.
295
+ const msg = getResultMessage(result);
296
+ if (isUnsupportedModelMessage(msg)) {
297
+ return {
298
+ supported: false,
299
+ authType: variant.type,
300
+ status: result.status,
301
+ message: truncateMessage(msg || "Model is not supported on this endpoint."),
302
+ error: result.error || null
303
+ };
304
+ }
305
+ }
306
+
307
+ return {
308
+ supported: false,
309
+ authType: null,
310
+ status: 0,
311
+ message: "Could not validate model support for this endpoint/format.",
312
+ error: null
313
+ };
314
+ }
315
+
174
316
  async function probeOpenAI(baseUrl, apiKey, timeoutMs, extraHeaders = {}) {
175
317
  const authVariants = makeAuthVariants(FORMATS.OPENAI, apiKey);
176
318
  const modelsUrl = resolveModelsUrl(baseUrl, FORMATS.OPENAI);
@@ -341,3 +483,181 @@ export async function probeProvider(options) {
341
483
  }
342
484
  };
343
485
  }
486
+
487
+ function normalizeEndpointList(rawEndpoints, fallbackBaseUrl = "") {
488
+ const values = [];
489
+ if (Array.isArray(rawEndpoints)) {
490
+ values.push(...rawEndpoints);
491
+ } else if (typeof rawEndpoints === "string") {
492
+ values.push(...rawEndpoints.split(/[,\n]/g));
493
+ }
494
+ if (fallbackBaseUrl) values.push(fallbackBaseUrl);
495
+ return dedupeStrings(values);
496
+ }
497
+
498
+ function pickBestEndpointForFormat(endpointRows, format) {
499
+ const endpointPreferenceScore = (endpoint) => {
500
+ const clean = String(endpoint || "").trim().replace(/\/+$/, "");
501
+ const looksVersioned = /\/v\d+(?:\.\d+)?$/i.test(clean);
502
+ if (format === FORMATS.OPENAI) return looksVersioned ? 1 : 0;
503
+ if (format === FORMATS.CLAUDE) return looksVersioned ? 0 : 1;
504
+ return 0;
505
+ };
506
+
507
+ const candidates = endpointRows
508
+ .filter((row) => (row.workingFormats || []).includes(format))
509
+ .map((row) => ({
510
+ row,
511
+ score: (row.modelsByFormat?.[format] || []).length,
512
+ pref: endpointPreferenceScore(row.endpoint)
513
+ }))
514
+ .sort((a, b) => {
515
+ if (b.score !== a.score) return b.score - a.score;
516
+ if (b.pref !== a.pref) return b.pref - a.pref;
517
+ return 0;
518
+ });
519
+ return candidates[0]?.row || null;
520
+ }
521
+
522
+ export async function probeProviderEndpointMatrix(options) {
523
+ const apiKey = String(options?.apiKey || "").trim();
524
+ const timeoutMs = Number.isFinite(options?.timeoutMs) ? Number(options.timeoutMs) : DEFAULT_TIMEOUT_MS;
525
+ const extraHeaders = options?.headers && typeof options.headers === "object" && !Array.isArray(options.headers)
526
+ ? options.headers
527
+ : {};
528
+ const endpoints = normalizeEndpointList(options?.endpoints, options?.baseUrl);
529
+ const models = dedupeStrings(options?.models || []);
530
+
531
+ if (!apiKey) throw new Error("Provider apiKey is required for probing.");
532
+ if (endpoints.length === 0) throw new Error("At least one endpoint is required for probing.");
533
+ if (models.length === 0) throw new Error("At least one model is required for endpoint-model probing.");
534
+
535
+ const endpointRows = [];
536
+ const modelFormatsMap = {};
537
+ const warnings = [];
538
+
539
+ for (const endpoint of endpoints) {
540
+ const endpointProbe = await probeProvider({
541
+ baseUrl: endpoint,
542
+ apiKey,
543
+ timeoutMs,
544
+ headers: extraHeaders
545
+ });
546
+ const rowAuthByFormat = { ...(endpointProbe.authByFormat || {}) };
547
+ const initialWorkingFormats = endpointProbe.workingFormats || [];
548
+ const initialSupportedFormats = endpointProbe.formats || [];
549
+ const formatsToTest = dedupeStrings([
550
+ FORMATS.OPENAI,
551
+ FORMATS.CLAUDE,
552
+ ...initialWorkingFormats,
553
+ ...initialSupportedFormats
554
+ ]).filter((value) => value === FORMATS.OPENAI || value === FORMATS.CLAUDE);
555
+ const modelsByFormat = {};
556
+ const modelChecks = [];
557
+
558
+ if (formatsToTest.length === 0) {
559
+ warnings.push(`No supported format detected for endpoint ${endpoint}.`);
560
+ }
561
+
562
+ for (const format of formatsToTest) {
563
+ const workingModels = [];
564
+ modelsByFormat[format] = workingModels;
565
+ const preferredAuthType = endpointProbe.authByFormat?.[format]?.type;
566
+
567
+ for (const modelId of models) {
568
+ const check = await probeModelForFormat({
569
+ baseUrl: endpoint,
570
+ format,
571
+ apiKey,
572
+ modelId,
573
+ timeoutMs,
574
+ extraHeaders,
575
+ preferredAuthType
576
+ });
577
+ modelChecks.push({
578
+ endpoint,
579
+ format,
580
+ model: modelId,
581
+ supported: check.supported,
582
+ status: check.status,
583
+ authType: check.authType,
584
+ message: check.message,
585
+ error: check.error
586
+ });
587
+
588
+ if (!check.supported) continue;
589
+ workingModels.push(modelId);
590
+ if (!rowAuthByFormat[format] && check.authType) {
591
+ rowAuthByFormat[format] = { type: check.authType === "x-api-key" ? "x-api-key" : "bearer" };
592
+ }
593
+ if (!modelFormatsMap[modelId]) modelFormatsMap[modelId] = new Set();
594
+ modelFormatsMap[modelId].add(format);
595
+ }
596
+ }
597
+
598
+ const inferredWorkingFormats = dedupeStrings(formatsToTest.filter((format) => (modelsByFormat[format] || []).length > 0));
599
+ const inferredSupportedFormats = dedupeStrings([
600
+ ...initialSupportedFormats,
601
+ ...inferredWorkingFormats
602
+ ]);
603
+
604
+ endpointRows.push({
605
+ endpoint,
606
+ supportedFormats: inferredSupportedFormats,
607
+ workingFormats: inferredWorkingFormats,
608
+ preferredFormat: endpointProbe.preferredFormat,
609
+ authByFormat: rowAuthByFormat,
610
+ modelsByFormat,
611
+ modelChecks,
612
+ details: endpointProbe.details
613
+ });
614
+ }
615
+
616
+ const openaiEndpoint = pickBestEndpointForFormat(endpointRows, FORMATS.OPENAI);
617
+ const claudeEndpoint = pickBestEndpointForFormat(endpointRows, FORMATS.CLAUDE);
618
+
619
+ const baseUrlByFormat = {};
620
+ if (openaiEndpoint) baseUrlByFormat[FORMATS.OPENAI] = openaiEndpoint.endpoint;
621
+ if (claudeEndpoint) baseUrlByFormat[FORMATS.CLAUDE] = claudeEndpoint.endpoint;
622
+
623
+ const authByFormat = {};
624
+ if (openaiEndpoint?.authByFormat?.[FORMATS.OPENAI]) {
625
+ authByFormat[FORMATS.OPENAI] = openaiEndpoint.authByFormat[FORMATS.OPENAI];
626
+ }
627
+ if (claudeEndpoint?.authByFormat?.[FORMATS.CLAUDE]) {
628
+ authByFormat[FORMATS.CLAUDE] = claudeEndpoint.authByFormat[FORMATS.CLAUDE];
629
+ }
630
+
631
+ const workingFormats = Object.keys(baseUrlByFormat);
632
+ const formats = dedupeStrings(endpointRows.flatMap((row) => row.supportedFormats || []));
633
+ const modelSupport = Object.fromEntries(
634
+ Object.entries(modelFormatsMap).map(([model, formatsSet]) => [model, [...formatsSet]])
635
+ );
636
+ const supportedModels = dedupeStrings(Object.keys(modelSupport));
637
+ const preferredFormat =
638
+ (workingFormats.includes(FORMATS.CLAUDE) && FORMATS.CLAUDE) ||
639
+ (workingFormats.includes(FORMATS.OPENAI) && FORMATS.OPENAI) ||
640
+ null;
641
+
642
+ if (workingFormats.length === 0) {
643
+ warnings.push("No working endpoint format detected with provided API key.");
644
+ }
645
+ if (supportedModels.length === 0) {
646
+ warnings.push("No provided model was confirmed as working on the detected endpoints.");
647
+ }
648
+
649
+ return {
650
+ ok: workingFormats.length > 0 && supportedModels.length > 0,
651
+ endpoints,
652
+ formats,
653
+ workingFormats,
654
+ preferredFormat,
655
+ baseUrlByFormat,
656
+ authByFormat,
657
+ auth: preferredFormat ? authByFormat[preferredFormat] || null : null,
658
+ models: supportedModels,
659
+ modelSupport,
660
+ endpointMatrix: endpointRows,
661
+ warnings
662
+ };
663
+ }
@@ -7,6 +7,7 @@ import { FORMATS } from "../translator/index.js";
7
7
 
8
8
  export const CONFIG_VERSION = 1;
9
9
  export const PROVIDER_ID_PATTERN = /^[a-z][a-zA-Z0-9-]*$/;
10
+ export const DEFAULT_PROVIDER_USER_AGENT = "ai-router (+https://github.com/khanglvm/ai-router)";
10
11
 
11
12
  const DEFAULT_ANTHROPIC_VERSION = "2023-06-01";
12
13
  let runtimeEnvCache = null;
@@ -61,6 +62,8 @@ function normalizeModelEntry(model) {
61
62
  return {
62
63
  id,
63
64
  aliases: dedupeStrings(model.aliases || model.alias || []),
65
+ formats: dedupeStrings(model.formats || model.format || [])
66
+ .filter((value) => value === FORMATS.OPENAI || value === FORMATS.CLAUDE),
64
67
  enabled: model.enabled !== false,
65
68
  contextWindow: Number.isFinite(model.contextWindow) ? Number(model.contextWindow) : undefined,
66
69
  cost: model.cost,
@@ -263,13 +266,52 @@ function pickProviderAuth(provider, targetFormat) {
263
266
  return { type: "bearer" };
264
267
  }
265
268
 
269
+ function hasHeaderName(headers, name) {
270
+ const lower = String(name).toLowerCase();
271
+ return Object.keys(headers || {}).some((key) => key.toLowerCase() === lower);
272
+ }
273
+
274
+ function normalizeCustomHeaders(rawHeaders) {
275
+ const out = {};
276
+ let userAgentExplicitlyDisabled = false;
277
+
278
+ if (!rawHeaders || typeof rawHeaders !== "object" || Array.isArray(rawHeaders)) {
279
+ return { headers: out, userAgentExplicitlyDisabled };
280
+ }
281
+
282
+ for (const [name, value] of Object.entries(rawHeaders)) {
283
+ const lower = name.toLowerCase();
284
+ const isUserAgent = lower === "user-agent";
285
+
286
+ if (value === undefined || value === null || value === false) {
287
+ if (isUserAgent) userAgentExplicitlyDisabled = true;
288
+ continue;
289
+ }
290
+
291
+ const text = String(value);
292
+ if (!text && isUserAgent) {
293
+ userAgentExplicitlyDisabled = true;
294
+ continue;
295
+ }
296
+ if (!text) continue;
297
+ out[name] = text;
298
+ }
299
+
300
+ return { headers: out, userAgentExplicitlyDisabled };
301
+ }
302
+
266
303
  export function buildProviderHeaders(provider, env = undefined, targetFormat = undefined) {
267
304
  const format = targetFormat || resolveProviderFormat(provider);
305
+ const { headers: customHeaders, userAgentExplicitlyDisabled } = normalizeCustomHeaders(provider?.headers);
268
306
  const headers = {
269
307
  "Content-Type": "application/json",
270
- ...(provider?.headers || {})
308
+ ...customHeaders
271
309
  };
272
310
 
311
+ if (!userAgentExplicitlyDisabled && !hasHeaderName(headers, "user-agent")) {
312
+ headers["User-Agent"] = DEFAULT_PROVIDER_USER_AGENT;
313
+ }
314
+
273
315
  const apiKey = resolveProviderApiKey(provider, env);
274
316
  const auth = pickProviderAuth(provider, format);
275
317
 
@@ -313,7 +355,32 @@ export function sanitizeConfigForDisplay(config) {
313
355
  }
314
356
 
315
357
  function buildTargetCandidate(provider, model, sourceFormat) {
316
- const targetFormat = resolveProviderFormat(provider, sourceFormat);
358
+ const providerFormats = dedupeStrings([...(provider?.formats || []), provider?.format])
359
+ .filter((value) => value === FORMATS.OPENAI || value === FORMATS.CLAUDE);
360
+ const modelFormats = dedupeStrings([...(model?.formats || []), model?.format])
361
+ .filter((value) => value === FORMATS.OPENAI || value === FORMATS.CLAUDE);
362
+ const supportedFormats = modelFormats.length > 0
363
+ ? providerFormats.filter((fmt) => modelFormats.includes(fmt))
364
+ : providerFormats;
365
+
366
+ let targetFormat = sourceFormat && supportedFormats.includes(sourceFormat)
367
+ ? sourceFormat
368
+ : undefined;
369
+
370
+ if (!targetFormat && supportedFormats.length > 0) {
371
+ if (sourceFormat === FORMATS.CLAUDE && supportedFormats.includes(FORMATS.CLAUDE)) {
372
+ targetFormat = FORMATS.CLAUDE;
373
+ } else if (sourceFormat === FORMATS.OPENAI && supportedFormats.includes(FORMATS.OPENAI)) {
374
+ targetFormat = FORMATS.OPENAI;
375
+ } else {
376
+ targetFormat = supportedFormats[0];
377
+ }
378
+ }
379
+
380
+ if (!targetFormat) {
381
+ targetFormat = resolveProviderFormat(provider, sourceFormat);
382
+ }
383
+
317
384
  return {
318
385
  providerId: provider.id,
319
386
  providerName: provider.name,
@@ -375,6 +442,20 @@ export function resolveRequestModel(config, requestedModel, sourceFormat = FORMA
375
442
  };
376
443
  }
377
444
 
445
+ const providerFormats = dedupeStrings([...(provider.formats || []), provider.format])
446
+ .filter((value) => value === FORMATS.OPENAI || value === FORMATS.CLAUDE);
447
+ const modelFormats = dedupeStrings([...(model.formats || []), model.format])
448
+ .filter((value) => value === FORMATS.OPENAI || value === FORMATS.CLAUDE);
449
+ if (modelFormats.length > 0 && !providerFormats.some((fmt) => modelFormats.includes(fmt))) {
450
+ return {
451
+ requestedModel: normalizedRequested,
452
+ resolvedModel: null,
453
+ primary: null,
454
+ fallbacks: [],
455
+ error: `Model '${modelName}' is configured for unsupported endpoint formats under provider '${providerId}'.`
456
+ };
457
+ }
458
+
378
459
  const primary = buildTargetCandidate(provider, model, sourceFormat);
379
460
  return {
380
461
  requestedModel: normalizedRequested,
@@ -401,12 +482,13 @@ export function listConfiguredModels(config, { endpointFormat } = {}) {
401
482
  owned_by: provider.id,
402
483
  provider_id: provider.id,
403
484
  provider_name: provider.name,
404
- formats: provider.formats || [],
485
+ formats: (model.formats && model.formats.length > 0) ? model.formats : (provider.formats || []),
405
486
  endpoint_format_supported: endpointFormat
406
- ? (provider.formats || []).includes(endpointFormat)
487
+ ? ((model.formats && model.formats.length > 0) ? model.formats.includes(endpointFormat) : (provider.formats || []).includes(endpointFormat))
407
488
  : undefined,
408
489
  context_window: model.contextWindow,
409
- cost: model.cost
490
+ cost: model.cost,
491
+ model_formats: model.formats || []
410
492
  });
411
493
  }
412
494
  }