omegon 0.6.3 → 0.6.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/README.md +12 -10
  2. package/bin/omegon.mjs +40 -0
  3. package/bin/pi.mjs +5 -26
  4. package/extensions/00-secrets/index.ts +146 -39
  5. package/extensions/01-auth/auth.ts +1 -1
  6. package/extensions/01-auth/index.ts +3 -3
  7. package/extensions/auto-compact.ts +1 -1
  8. package/extensions/bootstrap/deps.ts +42 -0
  9. package/extensions/bootstrap/index.ts +326 -110
  10. package/extensions/chronos/index.ts +1 -1
  11. package/extensions/cleave/dispatcher.ts +6 -6
  12. package/extensions/cleave/index.ts +6 -6
  13. package/extensions/cleave/planner.ts +1 -1
  14. package/extensions/cleave/worktree.ts +1 -1
  15. package/extensions/core-renderers.ts +24 -84
  16. package/extensions/dashboard/footer.ts +184 -40
  17. package/extensions/dashboard/git.ts +2 -2
  18. package/extensions/dashboard/index.ts +4 -4
  19. package/extensions/dashboard/overlay-data.ts +5 -5
  20. package/extensions/dashboard/overlay.ts +5 -5
  21. package/extensions/dashboard/render-utils.ts +1 -1
  22. package/extensions/dashboard/types.ts +15 -0
  23. package/extensions/defaults.ts +4 -12
  24. package/extensions/design-tree/dashboard-state.ts +6 -6
  25. package/extensions/design-tree/design-card.ts +3 -3
  26. package/extensions/design-tree/index.ts +64 -44
  27. package/extensions/design-tree/types.ts +4 -2
  28. package/extensions/distill.ts +1 -1
  29. package/extensions/effort/index.ts +137 -10
  30. package/extensions/lib/model-routing.ts +304 -32
  31. package/extensions/lib/operator-fallback.ts +1 -1
  32. package/extensions/lib/operator-profile.ts +1 -1
  33. package/extensions/lib/provider-env.ts +163 -0
  34. package/extensions/{sci-ui.ts → lib/sci-ui.ts} +119 -2
  35. package/extensions/{shared-state.ts → lib/shared-state.ts} +13 -9
  36. package/extensions/lib/slash-command-bridge.ts +1 -1
  37. package/extensions/{types.d.ts → lib/types.d.ts} +3 -3
  38. package/extensions/local-inference/index.ts +1 -1
  39. package/extensions/mcp-bridge/index.ts +1 -1
  40. package/extensions/model-budget.ts +10 -10
  41. package/extensions/offline-driver.ts +11 -4
  42. package/extensions/openspec/archive-gate.ts +1 -1
  43. package/extensions/openspec/branch-cleanup.ts +1 -1
  44. package/extensions/openspec/dashboard-state.ts +3 -3
  45. package/extensions/openspec/index.ts +5 -5
  46. package/extensions/project-memory/factstore.ts +5 -11
  47. package/extensions/project-memory/index.ts +48 -34
  48. package/extensions/project-memory/package.json +1 -1
  49. package/extensions/project-memory/sci-renderers.ts +1 -1
  50. package/extensions/render/index.ts +1 -1
  51. package/extensions/session-log.ts +1 -1
  52. package/extensions/spinner-verbs.ts +1 -1
  53. package/extensions/style.ts +1 -1
  54. package/extensions/terminal-title.ts +3 -3
  55. package/extensions/tool-profile/index.ts +1 -1
  56. package/extensions/vault/index.ts +1 -1
  57. package/extensions/version-check.ts +13 -9
  58. package/extensions/view/index.ts +4 -4
  59. package/extensions/web-search/index.ts +5 -2
  60. package/extensions/web-ui/index.ts +1 -1
  61. package/extensions/web-ui/state.ts +1 -1
  62. package/package.json +8 -7
  63. package/scripts/preinstall.sh +19 -3
  64. package/scripts/publish-pi-mono.sh +92 -0
  65. package/skills/pi-extensions/SKILL.md +2 -2
  66. package/skills/pi-tui/SKILL.md +17 -17
  67. package/skills/typescript/SKILL.md +1 -1
  68. package/themes/alpharius.json +7 -6
  69. /package/extensions/{debug.ts → lib/debug.ts} +0 -0
@@ -13,7 +13,12 @@ import { PREFERRED_ORDER } from "./local-models.ts";
13
13
  // ---------------------------------------------------------------------------
14
14
 
15
15
  export type ModelTier = "local" | "retribution" | "victory" | "gloriana";
16
- export type ProviderName = "openai" | "anthropic" | "local";
16
+ /**
17
+ * Well-known provider names for routing policy ordering and preference.
18
+ * Any string is accepted at runtime (unknown providers participate in
19
+ * capability-based matching) but these have explicit routing support.
20
+ */
21
+ export type ProviderName = "anthropic" | "openai" | "github-copilot" | "google" | "amazon-bedrock" | "azure-openai-responses" | "xai" | "groq" | "mistral" | "openrouter" | "local" | (string & {});
17
22
  export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high";
18
23
  export type CapabilityRole = "archmagos" | "magos" | "adept" | "servitor" | "servoskull";
19
24
  export type CandidateSource = "upstream" | "local";
@@ -164,19 +169,101 @@ const ROLE_DISPLAY_LABELS: Record<CapabilityRole, string> = {
164
169
  };
165
170
 
166
171
  // ---------------------------------------------------------------------------
167
- // Anthropic/OpenAI defaults
172
+ // Universal model-to-tier classification (provider-transparent)
173
+ // ---------------------------------------------------------------------------
174
+ // These patterns match model IDs regardless of provider. A github-copilot
175
+ // model "claude-opus-4-6" matches the same rules as anthropic's.
176
+ // Order within each tier matters — earlier entries are preferred.
168
177
  // ---------------------------------------------------------------------------
169
178
 
179
+ interface TierRule {
180
+ exact?: string;
181
+ prefix?: string;
182
+ weight: CandidateWeight;
183
+ maxThinking: ThinkingLevel;
184
+ source: CandidateSource;
185
+ }
186
+
187
+ const TIER_RULES: Record<Exclude<ModelTier, "local">, TierRule[]> = {
188
+ gloriana: [
189
+ { prefix: "claude-opus", weight: "heavy", maxThinking: "high", source: "upstream" },
190
+ { exact: "gpt-5.4", weight: "heavy", maxThinking: "high", source: "upstream" },
191
+ { prefix: "gpt-5.4-", weight: "heavy", maxThinking: "high", source: "upstream" },
192
+ { prefix: "gemini-3-pro", weight: "heavy", maxThinking: "high", source: "upstream" },
193
+ { prefix: "gemini-3.1-pro", weight: "heavy", maxThinking: "high", source: "upstream" },
194
+ ],
195
+ victory: [
196
+ { prefix: "claude-sonnet", weight: "normal", maxThinking: "high", source: "upstream" },
197
+ { prefix: "gpt-5.3-codex", weight: "normal", maxThinking: "medium", source: "upstream" },
198
+ { exact: "gpt-5.3", weight: "normal", maxThinking: "medium", source: "upstream" },
199
+ { prefix: "gpt-5.2-codex", weight: "normal", maxThinking: "medium", source: "upstream" },
200
+ { exact: "gpt-5.2", weight: "normal", maxThinking: "medium", source: "upstream" },
201
+ { prefix: "gemini-3-flash", weight: "normal", maxThinking: "medium", source: "upstream" },
202
+ { prefix: "gemini-2.5-pro", weight: "normal", maxThinking: "medium", source: "upstream" },
203
+ { prefix: "grok-", weight: "normal", maxThinking: "medium", source: "upstream" },
204
+ ],
205
+ retribution: [
206
+ { prefix: "claude-haiku", weight: "light", maxThinking: "low", source: "upstream" },
207
+ { prefix: "gpt-5.1-codex", weight: "light", maxThinking: "low", source: "upstream" },
208
+ { exact: "gpt-5.1", weight: "light", maxThinking: "low", source: "upstream" },
209
+ { prefix: "gpt-5-mini", weight: "light", maxThinking: "low", source: "upstream" },
210
+ { prefix: "gpt-5-nano", weight: "light", maxThinking: "low", source: "upstream" },
211
+ { prefix: "gemini-2.0-flash", weight: "light", maxThinking: "low", source: "upstream" },
212
+ { prefix: "mistral-large", weight: "light", maxThinking: "low", source: "upstream" },
213
+ { prefix: "codestral", weight: "light", maxThinking: "low", source: "upstream" },
214
+ ],
215
+ };
216
+
217
+ // Legacy aliases — used only by matchAnthropicTier/matchOpenAITier for
218
+ // backward compat with operator profiles that reference these constants.
170
219
  const ANTHROPIC_TIER_PREFIXES: Record<Exclude<ModelTier, "local">, string[]> = {
171
220
  retribution: ["claude-haiku"],
172
221
  victory: ["claude-sonnet"],
173
222
  gloriana: ["claude-opus"],
174
223
  };
175
224
 
225
+ // Deprecated models that should never be selected even if the provider
226
+ // still lists them. Prevents routing to dead or EOL endpoints.
227
+ // Updated 2026-03-14: GPT-4.1, GPT-4.1 mini, o4-mini retired Feb 13 2026.
228
+ const DEPRECATED_MODELS = new Set([
229
+ "gpt-4o", "gpt-4o-mini",
230
+ "gpt-4-turbo", "gpt-4",
231
+ "gpt-3.5-turbo",
232
+ "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano",
233
+ "o4-mini",
234
+ "gpt-5", "gpt-5-instant", "gpt-5-thinking", // retired Feb 13 2026
235
+ "claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229",
236
+ "claude-3-5-sonnet-20240620", // ancient snapshots
237
+ ]);
238
+
239
+ // o3 is a specialized reasoning model — not suitable as a general-purpose
240
+ // archmagos candidate. It belongs in a dedicated "reasoning" role if needed.
241
+
242
+ /**
243
+ * Filter deprecated models from a registry snapshot.
244
+ *
245
+ * Call this on `getAvailable()` output before passing to `resolveTier()`.
246
+ * Centralizes deprecation logic so callsites don't need to know the list.
247
+ */
248
+ export function filterDeprecated(models: RegistryModel[]): RegistryModel[] {
249
+ return models.filter((m) => !DEPRECATED_MODELS.has(m.id));
250
+ }
251
+
252
+ /**
253
+ * Get the viable model pool: auth'd + non-deprecated.
254
+ *
255
+ * Single entry point for all routing callsites. Uses `getAvailable()` (only
256
+ * models with configured auth) then strips deprecated models.
257
+ *
258
+ * Pass the result to `resolveTier()`, `getDefaultCapabilityProfile()`, etc.
259
+ */
260
+ export function getViableModels(registry: { getAvailable(): { id: string; provider: string }[] }): RegistryModel[] {
261
+ return filterDeprecated(registry.getAvailable() as unknown as RegistryModel[]);
262
+ }
176
263
  const OPENAI_TIER_MODELS: Record<Exclude<ModelTier, "local">, string[]> = {
177
- retribution: ["gpt-5.1-codex", "gpt-4o-mini", "gpt-4.1-mini"],
178
- victory: ["gpt-5.3-codex-spark", "gpt-4.1", "gpt-4o"],
179
- gloriana: ["gpt-5.4", "gpt-4.5", "o3"],
264
+ retribution: ["gpt-5.1-codex", "gpt-5.1"],
265
+ victory: ["gpt-5.3-codex-spark", "gpt-5.3", "gpt-5.2-codex", "gpt-5.2"],
266
+ gloriana: ["gpt-5.4"],
180
267
  };
181
268
 
182
269
  // ---------------------------------------------------------------------------
@@ -218,18 +305,20 @@ function matchAnthropicTier(models: RegistryModel[], tier: Exclude<ModelTier, "l
218
305
  function matchOpenAITier(models: RegistryModel[], tier: Exclude<ModelTier, "local">): RegistryModel | undefined {
219
306
  const exactIds = OPENAI_TIER_MODELS[tier];
220
307
  for (const modelId of exactIds) {
308
+ if (DEPRECATED_MODELS.has(modelId)) continue;
221
309
  const match = models.find((m) => m.provider === "openai" && m.id === modelId);
222
310
  if (match) return match;
223
311
  }
224
312
  const exactIdSet = new Set(exactIds);
225
313
  const prefixFallbacks: Record<string, string[]> = {
226
- retribution: ["gpt-4o-mini-", "gpt-4.1-mini-"],
227
- victory: ["gpt-4o-", "gpt-4.1-"],
228
- gloriana: ["gpt-4.5-", "o3-", "gpt-5."],
314
+ retribution: ["gpt-5.1-"],
315
+ victory: ["gpt-5.2-", "gpt-5.3-"],
316
+ gloriana: ["gpt-5.4-"],
229
317
  };
230
318
  for (const prefix of prefixFallbacks[tier] ?? []) {
231
319
  const found = models.find(
232
- (m) => m.provider === "openai" && m.id.startsWith(prefix) && !exactIdSet.has(m.id),
320
+ (m) => m.provider === "openai" && m.id.startsWith(prefix)
321
+ && !exactIdSet.has(m.id) && !DEPRECATED_MODELS.has(m.id),
233
322
  );
234
323
  if (found) return found;
235
324
  }
@@ -246,6 +335,60 @@ function matchLocalTier(models: RegistryModel[]): RegistryModel | undefined {
246
335
  return locals[0];
247
336
  }
248
337
 
338
+ // ---------------------------------------------------------------------------
339
+ // Provider-transparent tier matching
340
+ // ---------------------------------------------------------------------------
341
+
342
+ /**
343
+ * Classify a model into a tier using TIER_RULES.
344
+ * Returns the matching rule or undefined if no rule matches.
345
+ */
346
+ export function classifyModelTier(modelId: string): { tier: Exclude<ModelTier, "local">; rule: TierRule } | undefined {
347
+ for (const tier of ["gloriana", "victory", "retribution"] as const) {
348
+ for (const rule of TIER_RULES[tier]) {
349
+ if (rule.exact && modelId === rule.exact) return { tier, rule };
350
+ if (rule.prefix && modelId.startsWith(rule.prefix)) return { tier, rule };
351
+ }
352
+ }
353
+ return undefined;
354
+ }
355
+
356
+ /**
357
+ * Match ALL viable models to a tier, across every provider.
358
+ * Returns candidates sorted by TIER_RULES preference order (earlier rules = preferred).
359
+ * Within the same rule, models are sorted by version descending (newest first).
360
+ */
361
+ export function matchTierUniversal(
362
+ models: RegistryModel[],
363
+ tier: Exclude<ModelTier, "local">,
364
+ ): Array<{ model: RegistryModel; rule: TierRule }> {
365
+ const rules = TIER_RULES[tier];
366
+ const results: Array<{ model: RegistryModel; rule: TierRule; ruleIndex: number }> = [];
367
+
368
+ for (const model of models) {
369
+ if (model.provider === "local") continue; // local handled separately
370
+ if (DEPRECATED_MODELS.has(model.id)) continue;
371
+ for (let i = 0; i < rules.length; i++) {
372
+ const rule = rules[i];
373
+ const matches = rule.exact
374
+ ? model.id === rule.exact
375
+ : rule.prefix ? model.id.startsWith(rule.prefix) : false;
376
+ if (matches) {
377
+ results.push({ model, rule, ruleIndex: i });
378
+ break; // first matching rule wins for this model
379
+ }
380
+ }
381
+ }
382
+
383
+ // Sort: rule priority first, then newest version within same rule
384
+ results.sort((a, b) => {
385
+ if (a.ruleIndex !== b.ruleIndex) return a.ruleIndex - b.ruleIndex;
386
+ return compareModelVersionsDesc(a.model.id, b.model.id);
387
+ });
388
+
389
+ return results.map(({ model, rule }) => ({ model, rule }));
390
+ }
391
+
249
392
  function dedupeProviderOrder(order: ProviderName[], avoided: ProviderName[]): ProviderName[] {
250
393
  const seen = new Set<ProviderName>();
251
394
  const result: ProviderName[] = [];
@@ -541,35 +684,48 @@ export function withCandidateCooldown(
541
684
  }
542
685
 
543
686
  export function getDefaultCapabilityProfile(models: RegistryModel[] = []): CapabilityProfile {
544
- const archmagosCandidates: CapabilityCandidate[] = [];
545
- const magosCandidates: CapabilityCandidate[] = [];
546
- const adeptCandidates: CapabilityCandidate[] = [];
547
- const servitorCandidates: CapabilityCandidate[] = [];
548
- const servoskullCandidates: CapabilityCandidate[] = [];
549
-
550
- const anthropicOpus = matchAnthropicTier(models, "gloriana");
551
- const openaiOpus = matchOpenAITier(models, "gloriana");
552
- const anthropicSonnet = matchAnthropicTier(models, "victory");
553
- const openaiSonnet = matchOpenAITier(models, "victory");
554
- const anthropicHaiku = matchAnthropicTier(models, "retribution");
555
- const openaiHaiku = matchOpenAITier(models, "retribution");
556
687
  const local = matchLocalTier(models);
557
688
 
558
- if (anthropicOpus) archmagosCandidates.push({ id: anthropicOpus.id, provider: "anthropic", source: "upstream", weight: "heavy", maxThinking: "high" });
559
- if (openaiOpus) archmagosCandidates.push({ id: openaiOpus.id, provider: "openai", source: "upstream", weight: "heavy", maxThinking: "high" });
560
-
561
- if (anthropicSonnet) magosCandidates.push({ id: anthropicSonnet.id, provider: "anthropic", source: "upstream", weight: "normal", maxThinking: "high" });
562
- if (openaiSonnet) magosCandidates.push({ id: openaiSonnet.id, provider: "openai", source: "upstream", weight: "normal", maxThinking: "medium" });
689
+ // Build candidate lists from ALL available models using universal tier rules.
690
+ // Deduplicate by model ID a model may appear from multiple providers
691
+ // (e.g. claude-opus-4-6 via both 'anthropic' and 'github-copilot'),
692
+ // but we want each unique provider+id pair as a separate candidate so the
693
+ // policy engine can prefer one provider over another.
694
+ function buildCandidates(tier: Exclude<ModelTier, "local">): CapabilityCandidate[] {
695
+ const matches = matchTierUniversal(models, tier);
696
+ const seen = new Set<string>();
697
+ const candidates: CapabilityCandidate[] = [];
698
+ for (const { model, rule } of matches) {
699
+ const key = `${model.provider}/${model.id}`;
700
+ if (seen.has(key)) continue;
701
+ seen.add(key);
702
+ candidates.push({
703
+ id: model.id,
704
+ provider: model.provider as ProviderName,
705
+ source: rule.source,
706
+ weight: rule.weight,
707
+ maxThinking: rule.maxThinking,
708
+ });
709
+ }
710
+ return candidates;
711
+ }
563
712
 
564
- if (openaiHaiku) adeptCandidates.push({ id: openaiHaiku.id, provider: "openai", source: "upstream", weight: "light", maxThinking: "low" });
565
- if (anthropicHaiku) adeptCandidates.push({ id: anthropicHaiku.id, provider: "anthropic", source: "upstream", weight: "light", maxThinking: "low" });
713
+ const archmagosCandidates = buildCandidates("gloriana");
714
+ const magosCandidates = buildCandidates("victory");
715
+ const adeptCandidates = buildCandidates("retribution");
566
716
 
567
- if (anthropicHaiku) servitorCandidates.push({ id: anthropicHaiku.id, provider: "anthropic", source: "upstream", weight: "light", maxThinking: "low" });
568
- if (openaiHaiku) servitorCandidates.push({ id: openaiHaiku.id, provider: "openai", source: "upstream", weight: "light", maxThinking: "low" });
717
+ // Servitor: reuse adept candidates (cheapest cloud) + local
718
+ const servitorCandidates: CapabilityCandidate[] = [
719
+ ...adeptCandidates.map((c) => ({ ...c, maxThinking: "low" as ThinkingLevel })),
720
+ ];
569
721
  if (local) servitorCandidates.push({ id: local.id, provider: "local", source: "local", weight: inferWeightFromModel(local), maxThinking: "medium" });
570
722
 
723
+ // Servoskull: local first, then cheapest cloud
724
+ const servoskullCandidates: CapabilityCandidate[] = [];
571
725
  if (local) servoskullCandidates.push({ id: local.id, provider: "local", source: "local", weight: inferWeightFromModel(local), maxThinking: "off" });
572
- if (openaiHaiku) servoskullCandidates.push({ id: openaiHaiku.id, provider: "openai", source: "upstream", weight: "light", maxThinking: "off" });
726
+ servoskullCandidates.push(
727
+ ...adeptCandidates.map((c) => ({ ...c, maxThinking: "off" as ThinkingLevel })),
728
+ );
573
729
 
574
730
  return {
575
731
  roles: {
@@ -712,9 +868,125 @@ export function getRoleDisplayLabel(role: CapabilityRole): string {
712
868
 
713
869
  export function getDefaultPolicy(): ProviderRoutingPolicy {
714
870
  return {
715
- providerOrder: ["anthropic", "openai", "local"],
871
+ providerOrder: ["anthropic", "openai", "github-copilot", "google", "xai", "groq", "mistral", "amazon-bedrock", "azure-openai-responses", "openrouter", "local"],
716
872
  avoidProviders: [],
717
873
  cheapCloudPreferredOverLocal: false,
718
874
  requirePreflightForLargeRuns: true,
719
875
  };
720
876
  }
877
+
878
+ // ---------------------------------------------------------------------------
879
+ // Provider summary — startup diagnostics
880
+ // ---------------------------------------------------------------------------
881
+
882
+ export type TierStatus = "operational" | "degraded" | "unavailable";
883
+
884
+ export interface TierSummary {
885
+ tier: Exclude<ModelTier, "local">;
886
+ role: CapabilityRole;
887
+ status: TierStatus;
888
+ /** Best candidate for this tier, if any */
889
+ topCandidate?: { provider: string; modelId: string };
890
+ /** Total number of candidates across all providers */
891
+ candidateCount: number;
892
+ }
893
+
894
+ export interface ProviderSummary {
895
+ /** Providers with auth configured */
896
+ authProviders: string[];
897
+ /** Providers found in registry but no auth */
898
+ unauthProviders: string[];
899
+ /** Per-tier operational status */
900
+ tiers: TierSummary[];
901
+ /** Human-readable one-liner */
902
+ headline: string;
903
+ /** Degradation level (0=nothing, 1=local-only, 2=partial-cloud, 3=full) */
904
+ level: number;
905
+ }
906
+
907
+ /**
908
+ * Build a startup provider summary showing which tiers are operational,
909
+ * degraded, or unavailable based on current auth and model availability.
910
+ *
911
+ * @param allModels - ALL models from registry (getAll), used to identify unauth'd providers
912
+ * @param viableModels - Auth'd + non-deprecated models (getViableModels output)
913
+ * @param policy - Routing policy for provider ordering
914
+ */
915
+ export function buildProviderSummary(
916
+ allModels: RegistryModel[],
917
+ viableModels: RegistryModel[],
918
+ policy: ProviderRoutingPolicy = getDefaultPolicy(),
919
+ ): ProviderSummary {
920
+ // Identify auth'd vs unauth'd providers
921
+ const allProviders = new Set(allModels.map((m) => m.provider));
922
+ const viableProviders = new Set(viableModels.map((m) => m.provider));
923
+ const authProviders = [...viableProviders].filter((p) => p !== "local").sort();
924
+ const unauthProviders = [...allProviders].filter((p) => !viableProviders.has(p) && p !== "local").sort();
925
+
926
+ // Build per-tier status
927
+ const tierDefs: Array<{ tier: Exclude<ModelTier, "local">; role: CapabilityRole }> = [
928
+ { tier: "gloriana", role: "archmagos" },
929
+ { tier: "victory", role: "magos" },
930
+ { tier: "retribution", role: "adept" },
931
+ ];
932
+
933
+ const tiers: TierSummary[] = tierDefs.map(({ tier, role }) => {
934
+ const candidates = matchTierUniversal(viableModels, tier);
935
+ const profile = getDefaultCapabilityProfile(viableModels);
936
+ const resolution = resolveCapabilityRole(role, viableModels, policy, profile);
937
+
938
+ let status: TierStatus;
939
+ let topCandidate: TierSummary["topCandidate"];
940
+
941
+ if (resolution.ok && resolution.selected) {
942
+ status = "operational";
943
+ topCandidate = {
944
+ provider: resolution.selected.candidate.provider,
945
+ modelId: resolution.selected.candidate.id,
946
+ };
947
+ } else if (candidates.length > 0) {
948
+ // Candidates exist but blocked by policy
949
+ status = "degraded";
950
+ topCandidate = {
951
+ provider: candidates[0].model.provider,
952
+ modelId: candidates[0].model.id,
953
+ };
954
+ } else {
955
+ status = "unavailable";
956
+ }
957
+
958
+ return { tier, role, status, topCandidate, candidateCount: candidates.length };
959
+ });
960
+
961
+ // Determine overall level
962
+ const operational = tiers.filter((t) => t.status === "operational").length;
963
+ const hasLocal = viableModels.some((m) => m.provider === "local");
964
+ let level: number;
965
+ if (authProviders.length === 0 && !hasLocal) {
966
+ level = 0;
967
+ } else if (authProviders.length === 0 && hasLocal) {
968
+ level = 1;
969
+ } else if (operational < 3) {
970
+ level = 2;
971
+ } else {
972
+ level = 3;
973
+ }
974
+
975
+ // Build headline
976
+ let headline: string;
977
+ if (level === 0) {
978
+ headline = "No providers configured. Run /bootstrap to set up API keys.";
979
+ } else if (level === 1) {
980
+ headline = "Local inference only — no cloud providers configured.";
981
+ } else {
982
+ const opTiers = tiers.filter((t) => t.status === "operational").map((t) => ROLE_DISPLAY_LABELS[t.role]);
983
+ const unavail = tiers.filter((t) => t.status === "unavailable").map((t) => ROLE_DISPLAY_LABELS[t.role]);
984
+ if (unavail.length === 0) {
985
+ headline = `All tiers operational via ${authProviders.join(", ")}.`;
986
+ } else {
987
+ headline = `${opTiers.join(", ")} operational. ${unavail.join(", ")} unavailable.`;
988
+ }
989
+ }
990
+
991
+ return { authProviders, unauthProviders, tiers, headline, level };
992
+ }
@@ -1,4 +1,4 @@
1
- import type { Model } from "@cwilson613/pi-ai";
1
+ import type { Model } from "@styrene-lab/pi-ai";
2
2
  import {
3
3
  classifyUpstreamFailure,
4
4
  resolveCapabilityRole,
@@ -88,8 +88,8 @@ const DEFAULT_PROFILE: OperatorCapabilityProfile = {
88
88
  { id: "gpt-5.1-codex", provider: "openai", source: "upstream", weight: "light", maxThinking: "low" },
89
89
  ],
90
90
  servitor: [
91
- { id: "gpt-4o-mini", provider: "openai", source: "upstream", weight: "light", maxThinking: "minimal" },
92
91
  { id: "claude-haiku-3-5", provider: "anthropic", source: "upstream", weight: "light", maxThinking: "minimal" },
92
+ { id: "gpt-5.1-codex", provider: "openai", source: "upstream", weight: "light", maxThinking: "minimal" },
93
93
  ],
94
94
  servoskull: [
95
95
  { id: "qwen3:8b", provider: "local", source: "local", weight: "light", maxThinking: "off" },
@@ -0,0 +1,163 @@
1
+ import { existsSync } from "node:fs";
2
+ import { join } from "node:path";
3
+ import { homedir } from "node:os";
4
+
5
+ // @secret ANTHROPIC_API_KEY "Anthropic API key for Claude models"
6
+ // @secret ANTHROPIC_OAUTH_TOKEN "Anthropic OAuth token (takes precedence over API key)"
7
+ // @secret OPENAI_API_KEY "OpenAI API key for GPT models"
8
+ // @secret COPILOT_GITHUB_TOKEN "GitHub Copilot token (primary, set by Copilot extension)"
9
+ // @secret GH_TOKEN "GitHub CLI token (also used as Copilot fallback)"
10
+ // @secret GEMINI_API_KEY "Google Gemini API key"
11
+ // @secret XAI_API_KEY "xAI API key for Grok models"
12
+ // @secret GROQ_API_KEY "Groq API key for fast inference"
13
+ // @secret MISTRAL_API_KEY "Mistral API key for Mistral/Codestral models"
14
+ // @secret OPENROUTER_API_KEY "OpenRouter API key for multi-provider routing"
15
+ // @secret AZURE_OPENAI_API_KEY "Azure OpenAI API key"
16
+ // @secret CEREBRAS_API_KEY "Cerebras API key for fast inference"
17
+ // @secret HF_TOKEN "HuggingFace token for gated model access"
18
+
19
+ /**
20
+ * Mapping from pi model provider names to their env var API keys.
21
+ *
22
+ * Covers common providers from pi-ai's env-api-keys.js. Niche providers
23
+ * (vercel-ai-gateway, zai, minimax, opencode, kimi-coding) are omitted —
24
+ * add them here if they gain routing relevance.
25
+ *
26
+ * SYNC CHECK: compare against vendor/pi-mono/packages/ai/dist/env-api-keys.js
27
+ * when updating.
28
+ */
29
+ export interface ProviderEnvEntry {
30
+ /** Primary env var name (the one users should configure) */
31
+ envVar: string;
32
+ /** All env vars checked by pi (in priority order) */
33
+ allEnvVars: string[];
34
+ /** Human-readable description */
35
+ description: string;
36
+ }
37
+
38
+ export const PROVIDER_ENV_VARS: Record<string, ProviderEnvEntry> = {
39
+ anthropic: {
40
+ // envVar is ANTHROPIC_API_KEY (what users should configure via /secrets),
41
+ // but allEnvVars lists ANTHROPIC_OAUTH_TOKEN first because pi checks it
42
+ // with higher priority at runtime (OAuth login takes precedence over key).
43
+ envVar: "ANTHROPIC_API_KEY",
44
+ allEnvVars: ["ANTHROPIC_OAUTH_TOKEN", "ANTHROPIC_API_KEY"],
45
+ description: "Claude models (opus, sonnet, haiku)",
46
+ },
47
+ openai: {
48
+ envVar: "OPENAI_API_KEY",
49
+ allEnvVars: ["OPENAI_API_KEY"],
50
+ description: "GPT models",
51
+ },
52
+ "github-copilot": {
53
+ envVar: "COPILOT_GITHUB_TOKEN",
54
+ allEnvVars: ["COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"],
55
+ description: "GitHub Copilot (Claude, GPT, Gemini, Grok via OAuth)",
56
+ },
57
+ google: {
58
+ envVar: "GEMINI_API_KEY",
59
+ allEnvVars: ["GEMINI_API_KEY"],
60
+ description: "Google Gemini models",
61
+ },
62
+ "google-vertex": {
63
+ // ADC auth requires credentials file + project + location (all three).
64
+ // GOOGLE_CLOUD_API_KEY is the simple path; ADC is complex — see
65
+ // isProviderEnvConfigured() special case below.
66
+ envVar: "GOOGLE_CLOUD_API_KEY",
67
+ allEnvVars: ["GOOGLE_CLOUD_API_KEY"],
68
+ description: "Google Vertex AI (or gcloud ADC credentials)",
69
+ },
70
+ xai: {
71
+ envVar: "XAI_API_KEY",
72
+ allEnvVars: ["XAI_API_KEY"],
73
+ description: "xAI Grok models",
74
+ },
75
+ groq: {
76
+ envVar: "GROQ_API_KEY",
77
+ allEnvVars: ["GROQ_API_KEY"],
78
+ description: "Groq fast inference",
79
+ },
80
+ mistral: {
81
+ envVar: "MISTRAL_API_KEY",
82
+ allEnvVars: ["MISTRAL_API_KEY"],
83
+ description: "Mistral / Codestral",
84
+ },
85
+ openrouter: {
86
+ envVar: "OPENROUTER_API_KEY",
87
+ allEnvVars: ["OPENROUTER_API_KEY"],
88
+ description: "OpenRouter multi-provider gateway",
89
+ },
90
+ "azure-openai-responses": {
91
+ envVar: "AZURE_OPENAI_API_KEY",
92
+ allEnvVars: ["AZURE_OPENAI_API_KEY"],
93
+ description: "Azure OpenAI",
94
+ },
95
+ "amazon-bedrock": {
96
+ envVar: "AWS_PROFILE",
97
+ allEnvVars: ["AWS_PROFILE", "AWS_ACCESS_KEY_ID", "AWS_BEARER_TOKEN_BEDROCK", "AWS_WEB_IDENTITY_TOKEN_FILE"],
98
+ description: "AWS Bedrock (profile, IAM keys, bearer token, or IRSA)",
99
+ },
100
+ cerebras: {
101
+ envVar: "CEREBRAS_API_KEY",
102
+ allEnvVars: ["CEREBRAS_API_KEY"],
103
+ description: "Cerebras fast inference",
104
+ },
105
+ huggingface: {
106
+ envVar: "HF_TOKEN",
107
+ allEnvVars: ["HF_TOKEN"],
108
+ description: "HuggingFace gated model access",
109
+ },
110
+ };
111
+
112
+ /**
113
+ * Get remediation hint for an unconfigured provider.
114
+ * Returns actionable text with the most appropriate fix path.
115
+ */
116
+ export function getProviderRemediationHint(provider: string): string | undefined {
117
+ const entry = PROVIDER_ENV_VARS[provider];
118
+ if (!entry) return undefined;
119
+
120
+ // Providers with CLI/OAuth auth paths get special handling
121
+ switch (provider) {
122
+ case "github-copilot":
123
+ return "Run `/login github`, or set via `/secrets configure COPILOT_GITHUB_TOKEN`";
124
+ case "amazon-bedrock":
125
+ return "Run `aws sso login --profile <profile>` or `/secrets configure AWS_PROFILE`";
126
+ case "google-vertex":
127
+ return "Run `gcloud auth application-default login` or `/secrets configure GOOGLE_CLOUD_API_KEY`";
128
+ case "anthropic":
129
+ return "`/secrets configure ANTHROPIC_API_KEY` (or ANTHROPIC_OAUTH_TOKEN for OAuth)";
130
+ default:
131
+ return `\`/secrets configure ${entry.envVar}\``;
132
+ }
133
+ }
134
+
135
+ /**
136
+ * Check if a provider has any of its env vars set in process.env.
137
+ * Useful for quick auth detection without going through pi's registry.
138
+ */
139
+ export function isProviderEnvConfigured(provider: string): boolean {
140
+ const entry = PROVIDER_ENV_VARS[provider];
141
+ if (!entry) return false;
142
+
143
+ // google-vertex ADC requires credentials + project + location (conjunction).
144
+ // Credentials can come from GOOGLE_APPLICATION_CREDENTIALS env var OR from the
145
+ // default ADC path (~/.config/gcloud/application_default_credentials.json) written
146
+ // by `gcloud auth application-default login`. Matches pi-ai's hasVertexAdcCredentials().
147
+ if (provider === "google-vertex") {
148
+ if (process.env.GOOGLE_CLOUD_API_KEY) return true;
149
+ const hasAdcEnv = !!(process.env.GOOGLE_APPLICATION_CREDENTIALS);
150
+ let hasAdcFile = false;
151
+ if (!hasAdcEnv) {
152
+ try {
153
+ hasAdcFile = existsSync(join(homedir(), ".config", "gcloud", "application_default_credentials.json"));
154
+ } catch {}
155
+ }
156
+ const hasAdc = hasAdcEnv || hasAdcFile;
157
+ const hasProject = !!(process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT);
158
+ const hasLocation = !!(process.env.GOOGLE_CLOUD_LOCATION);
159
+ return hasAdc && hasProject && hasLocation;
160
+ }
161
+
162
+ return entry.allEnvVars.some(v => !!process.env[v]);
163
+ }