@oh-my-pi/pi-coding-agent 13.5.7 → 13.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +30 -1
  2. package/package.json +7 -7
  3. package/src/cli/args.ts +7 -0
  4. package/src/cli/stats-cli.ts +5 -0
  5. package/src/config/model-registry.ts +99 -9
  6. package/src/config/settings-schema.ts +22 -2
  7. package/src/extensibility/extensions/types.ts +2 -0
  8. package/src/internal-urls/docs-index.generated.ts +2 -2
  9. package/src/internal-urls/index.ts +2 -1
  10. package/src/internal-urls/mcp-protocol.ts +156 -0
  11. package/src/internal-urls/router.ts +1 -1
  12. package/src/internal-urls/types.ts +3 -3
  13. package/src/ipy/prelude.py +1 -0
  14. package/src/mcp/client.ts +235 -2
  15. package/src/mcp/index.ts +1 -1
  16. package/src/mcp/manager.ts +399 -5
  17. package/src/mcp/oauth-flow.ts +26 -1
  18. package/src/mcp/smithery-auth.ts +104 -0
  19. package/src/mcp/smithery-connect.ts +145 -0
  20. package/src/mcp/smithery-registry.ts +455 -0
  21. package/src/mcp/types.ts +140 -0
  22. package/src/modes/components/footer.ts +10 -4
  23. package/src/modes/components/settings-defs.ts +15 -1
  24. package/src/modes/components/status-line/git-utils.ts +42 -0
  25. package/src/modes/components/status-line/presets.ts +6 -6
  26. package/src/modes/components/status-line/segments.ts +27 -4
  27. package/src/modes/components/status-line/types.ts +2 -0
  28. package/src/modes/components/status-line-segment-editor.ts +1 -0
  29. package/src/modes/components/status-line.ts +109 -5
  30. package/src/modes/controllers/command-controller.ts +12 -2
  31. package/src/modes/controllers/extension-ui-controller.ts +12 -21
  32. package/src/modes/controllers/mcp-command-controller.ts +577 -14
  33. package/src/modes/controllers/selector-controller.ts +5 -0
  34. package/src/modes/theme/theme.ts +6 -0
  35. package/src/prompts/tools/hashline.md +4 -3
  36. package/src/sdk.ts +115 -3
  37. package/src/session/agent-session.ts +19 -4
  38. package/src/session/session-manager.ts +17 -5
  39. package/src/slash-commands/builtin-registry.ts +10 -0
  40. package/src/task/executor.ts +37 -3
  41. package/src/task/index.ts +37 -5
  42. package/src/task/isolation-backend.ts +72 -0
  43. package/src/task/render.ts +6 -1
  44. package/src/task/types.ts +1 -0
  45. package/src/task/worktree.ts +67 -5
  46. package/src/tools/index.ts +1 -1
  47. package/src/tools/path-utils.ts +2 -1
  48. package/src/tools/read.ts +3 -7
  49. package/src/utils/open.ts +1 -1
package/CHANGELOG.md CHANGED
@@ -2,6 +2,36 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [13.6.0] - 2026-03-03
6
+ ### Added
7
+
8
+ - Added `mcp://` internal URL protocol for reading MCP server resources directly via the read tool (e.g., `read(path="mcp://resource-uri")`)
9
+ - Added LM Studio integration to the model registry and discovery flow.
10
+ - Added support for authenticating with LM Studio using the `/login lm-studio` command.
11
+ - Added `fuse-projfs` task isolation mode for Windows ProjFS-backed overlays.
12
+ - Added `/mcp registry search <keyword>` integration with Smithery, including interactive result selection, editable server naming before deploy, Smithery `configSchema` prompts, and immediate runtime reload so selected MCP tools are available without restarting
13
+ - Added OAuth failure fallback in `/mcp registry search` deploy flow to prompt for manual bearer tokens and validate them before saving configuration
14
+ - Added Smithery auth support for `/mcp registry search` with cached API key login (`/mcp registry login`, `/mcp registry logout`) and automatic login prompt/retry on auth or rate-limit responses
15
+
16
+ ### Changed
17
+
18
+ - Updated MCP resource update notifications to recommend using `read(path="mcp://<uri>")` instead of the deprecated `read_resource` tool
19
+ - Updated Anthropic Foundry environment variable documentation and CLI help text to the canonical names: `CLAUDE_CODE_USE_FOUNDRY`, `CLAUDE_CODE_CLIENT_CERT`, and `CLAUDE_CODE_CLIENT_KEY`
20
+ - Documented Foundry-specific Anthropic runtime configuration (`FOUNDRY_BASE_URL`, `ANTHROPIC_FOUNDRY_API_KEY`, `ANTHROPIC_CUSTOM_HEADERS`, `NODE_EXTRA_CA_CERTS`) in environment variable reference docs
21
+ - `fuse-overlay` task isolation now targets `fuse-overlayfs` on Unix hosts only; on Windows it falls back to `worktree` with a `<system-notification>` suggesting `fuse-projfs`.
22
+ - `fuse-projfs` now performs Windows ProjFS preflight checks and falls back to `worktree` when host or repository prerequisites are unavailable.
23
+ - Cross-repo patch capture now uses the platform null device (`NUL` on Windows, `/dev/null` elsewhere) for `git diff --no-index`.
24
+
25
+ ### Removed
26
+
27
+ - Removed `read_resource` tool; MCP resource reading is now integrated into the `read` tool via `mcp://` URLs
28
+
29
+ ### Fixed
30
+
31
+ - Fixed MCP resource subscription handling to prevent unsubscribing when notifications are re-enabled after being disabled
32
+ - Fixed LM Studio base URL validation to preserve invalid configured URLs instead of silently falling back to localhost
33
+ - Fixed URI template matching to correctly handle expressions that expand to empty strings
34
+
5
35
  ## [13.5.6] - 2026-03-01
6
36
  ### Changed
7
37
 
@@ -1051,7 +1081,6 @@
1051
1081
  - Improved error reporting in fetch tool to include HTTP status codes when URL fetching fails
1052
1082
  - Fixed fetch tool to preserve actual response metadata (finalUrl, contentType) instead of defaults when requests fail
1053
1083
 
1054
- ||||||| parent of a70a34c8b (fix(coding-agent/debug): Sanitized debug log rendering)
1055
1084
 
1056
1085
  ## [12.1.0] - 2026-02-13
1057
1086
 
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "type": "module",
3
3
  "name": "@oh-my-pi/pi-coding-agent",
4
- "version": "13.5.7",
4
+ "version": "13.6.0",
5
5
  "description": "Coding agent CLI with read, bash, edit, write tools and session management",
6
6
  "homepage": "https://github.com/can1357/oh-my-pi",
7
7
  "author": "Can Boluk",
@@ -41,12 +41,12 @@
41
41
  },
42
42
  "dependencies": {
43
43
  "@mozilla/readability": "^0.6",
44
- "@oh-my-pi/omp-stats": "13.5.7",
45
- "@oh-my-pi/pi-agent-core": "13.5.7",
46
- "@oh-my-pi/pi-ai": "13.5.7",
47
- "@oh-my-pi/pi-natives": "13.5.7",
48
- "@oh-my-pi/pi-tui": "13.5.7",
49
- "@oh-my-pi/pi-utils": "13.5.7",
44
+ "@oh-my-pi/omp-stats": "13.6.0",
45
+ "@oh-my-pi/pi-agent-core": "13.6.0",
46
+ "@oh-my-pi/pi-ai": "13.6.0",
47
+ "@oh-my-pi/pi-natives": "13.6.0",
48
+ "@oh-my-pi/pi-tui": "13.6.0",
49
+ "@oh-my-pi/pi-utils": "13.6.0",
50
50
  "@sinclair/typebox": "^0.34",
51
51
  "@xterm/headless": "^6.0",
52
52
  "ajv": "^8.18",
package/src/cli/args.ts CHANGED
@@ -191,6 +191,13 @@ export function getExtraHelpText(): string {
191
191
  ${chalk.dim("# Core Providers")}
192
192
  ANTHROPIC_API_KEY - Anthropic Claude models
193
193
  ANTHROPIC_OAUTH_TOKEN - Anthropic OAuth (takes precedence over API key)
194
+ CLAUDE_CODE_USE_FOUNDRY - Enable Anthropic Foundry mode (uses Foundry endpoint + mTLS)
195
+ FOUNDRY_BASE_URL - Anthropic Foundry base URL (e.g., https://<foundry-host>)
196
+ ANTHROPIC_FOUNDRY_API_KEY - Anthropic token used as Authorization: Bearer <token> in Foundry mode
197
+ ANTHROPIC_CUSTOM_HEADERS - Extra Foundry headers (e.g., "user-id: USERNAME")
198
+ CLAUDE_CODE_CLIENT_CERT - Client certificate (PEM path or inline PEM) for mTLS
199
+ CLAUDE_CODE_CLIENT_KEY - Client private key (PEM path or inline PEM) for mTLS
200
+ NODE_EXTRA_CA_CERTS - CA bundle path (or inline PEM) for server certificate validation
194
201
  OPENAI_API_KEY - OpenAI GPT models
195
202
  GEMINI_API_KEY - Google Gemini models
196
203
  GITHUB_TOKEN - GitHub Copilot (or GH_TOKEN, COPILOT_GITHUB_TOKEN)
@@ -59,6 +59,10 @@ function formatCost(n: number): string {
59
59
  return `$${n.toFixed(2)}`;
60
60
  }
61
61
 
62
+ function normalizePremiumRequests(n: number): number {
63
+ return Math.round((n + Number.EPSILON) * 100) / 100;
64
+ }
65
+
62
66
  // =============================================================================
63
67
  // Command Handler
64
68
  // =============================================================================
@@ -120,6 +124,7 @@ async function printStatsSummary(): Promise<void> {
120
124
  console.log(` Total Tokens: ${formatNumber(overall.totalInputTokens + overall.totalOutputTokens)}`);
121
125
  console.log(` Cache Rate: ${formatPercent(overall.cacheRate)}`);
122
126
  console.log(` Total Cost: ${formatCost(overall.totalCost)}`);
127
+ console.log(` Premium Requests: ${formatNumber(normalizePremiumRequests(overall.totalPremiumRequests ?? 0))}`);
123
128
  console.log(` Avg Duration: ${overall.avgDuration !== null ? formatDuration(overall.avgDuration) : "-"}`);
124
129
  console.log(` Avg TTFT: ${overall.avgTtft !== null ? formatDuration(overall.avgTtft) : "-"}`);
125
130
  if (overall.avgTokensPerSecond !== null) {
@@ -3,6 +3,7 @@ import {
3
3
  type AssistantMessageEventStream,
4
4
  type Context,
5
5
  createModelManager,
6
+ DEFAULT_LOCAL_TOKEN,
6
7
  getBundledModels,
7
8
  getBundledProviders,
8
9
  getGitHubCopilotBaseUrl,
@@ -99,6 +100,7 @@ const ModelDefinitionSchema = Type.Object({
99
100
  cacheWrite: Type.Number(),
100
101
  }),
101
102
  ),
103
+ premiumMultiplier: Type.Optional(Type.Number()),
102
104
  contextWindow: Type.Optional(Type.Number()),
103
105
  maxTokens: Type.Optional(Type.Number()),
104
106
  headers: Type.Optional(Type.Record(Type.String(), Type.String())),
@@ -119,6 +121,7 @@ const ModelOverrideSchema = Type.Object({
119
121
  cacheWrite: Type.Optional(Type.Number()),
120
122
  }),
121
123
  ),
124
+ premiumMultiplier: Type.Optional(Type.Number()),
122
125
  contextWindow: Type.Optional(Type.Number()),
123
126
  maxTokens: Type.Optional(Type.Number()),
124
127
  headers: Type.Optional(Type.Record(Type.String(), Type.String())),
@@ -129,7 +132,7 @@ const ModelOverrideSchema = Type.Object({
129
132
  type ModelOverride = Static<typeof ModelOverrideSchema>;
130
133
 
131
134
  const ProviderDiscoverySchema = Type.Object({
132
- type: Type.Union([Type.Literal("ollama")]),
135
+ type: Type.Union([Type.Literal("ollama"), Type.Literal("lm-studio")]),
133
136
  });
134
137
 
135
138
  const ProviderAuthSchema = Type.Union([Type.Literal("apiKey"), Type.Literal("none")]);
@@ -378,6 +381,7 @@ function applyModelOverride(model: Model<Api>, override: ModelOverride): Model<A
378
381
  if (override.contextWindow !== undefined) result.contextWindow = override.contextWindow;
379
382
  if (override.maxTokens !== undefined) result.maxTokens = override.maxTokens;
380
383
  if (override.contextPromotionTarget !== undefined) result.contextPromotionTarget = override.contextPromotionTarget;
384
+ if (override.premiumMultiplier !== undefined) result.premiumMultiplier = override.premiumMultiplier;
381
385
  if (override.cost) {
382
386
  result.cost = {
383
387
  input: override.cost.input ?? model.cost.input,
@@ -405,6 +409,7 @@ interface CustomModelDefinitionLike {
405
409
  headers?: Record<string, string>;
406
410
  compat?: Model<Api>["compat"];
407
411
  contextPromotionTarget?: string;
412
+ premiumMultiplier?: number;
408
413
  }
409
414
 
410
415
  interface CustomModelBuildOptions {
@@ -456,6 +461,7 @@ function buildCustomModel(
456
461
  headers: mergeCustomModelHeaders(providerHeaders, modelDef.headers, authHeader, providerApiKey),
457
462
  compat: modelDef.compat,
458
463
  contextPromotionTarget: modelDef.contextPromotionTarget,
464
+ premiumMultiplier: modelDef.premiumMultiplier,
459
465
  } as Model<Api>;
460
466
  }
461
467
 
@@ -589,14 +595,24 @@ export class ModelRegistry {
589
595
  }
590
596
 
591
597
  #addImplicitDiscoverableProviders(configuredProviders: Set<string>): void {
592
- if (configuredProviders.has("ollama")) return;
593
- this.#discoverableProviders.push({
594
- provider: "ollama",
595
- api: "openai-completions",
596
- baseUrl: Bun.env.OLLAMA_BASE_URL || "http://127.0.0.1:11434",
597
- discovery: { type: "ollama" },
598
- });
599
- this.#keylessProviders.add("ollama");
598
+ if (!configuredProviders.has("ollama")) {
599
+ this.#discoverableProviders.push({
600
+ provider: "ollama",
601
+ api: "openai-completions",
602
+ baseUrl: Bun.env.OLLAMA_BASE_URL || "http://127.0.0.1:11434",
603
+ discovery: { type: "ollama" },
604
+ });
605
+ this.#keylessProviders.add("ollama");
606
+ }
607
+ if (!configuredProviders.has("lm-studio")) {
608
+ this.#discoverableProviders.push({
609
+ provider: "lm-studio",
610
+ api: "openai-completions",
611
+ baseUrl: Bun.env.LM_STUDIO_BASE_URL || "http://127.0.0.1:1234/v1",
612
+ discovery: { type: "lm-studio" },
613
+ });
614
+ this.#keylessProviders.add("lm-studio");
615
+ }
600
616
  }
601
617
 
602
618
  #loadCustomModels(): CustomModelsResult {
@@ -719,6 +735,8 @@ export class ModelRegistry {
719
735
  switch (providerConfig.discovery.type) {
720
736
  case "ollama":
721
737
  return this.#discoverOllamaModels(providerConfig);
738
+ case "lm-studio":
739
+ return this.#discoverLmStudioModels(providerConfig);
722
740
  }
723
741
  }
724
742
 
@@ -872,6 +890,77 @@ export class ModelRegistry {
872
890
  }
873
891
  }
874
892
 
893
+ async #discoverLmStudioModels(providerConfig: DiscoveryProviderConfig): Promise<Model<Api>[]> {
894
+ const baseUrl = this.#normalizeLmStudioBaseUrl(providerConfig.baseUrl);
895
+ const modelsUrl = `${baseUrl}/models`;
896
+
897
+ const headers: Record<string, string> = { ...(providerConfig.headers ?? {}) };
898
+ const apiKey = await this.authStorage.getApiKey("lm-studio");
899
+ if (apiKey && apiKey !== DEFAULT_LOCAL_TOKEN && apiKey !== kNoAuth) {
900
+ headers.Authorization = `Bearer ${apiKey}`;
901
+ }
902
+
903
+ try {
904
+ const response = await fetch(modelsUrl, {
905
+ headers,
906
+ signal: AbortSignal.timeout(3000),
907
+ });
908
+ if (!response.ok) {
909
+ logger.warn("model discovery failed for provider", {
910
+ provider: providerConfig.provider,
911
+ status: response.status,
912
+ url: modelsUrl,
913
+ });
914
+ return [];
915
+ }
916
+ const payload = (await response.json()) as { data?: Array<{ id: string }> };
917
+ const models = payload.data ?? [];
918
+ const discovered: Model<Api>[] = [];
919
+ for (const item of models) {
920
+ const id = item.id;
921
+ if (!id) continue;
922
+ discovered.push({
923
+ id,
924
+ name: id,
925
+ api: providerConfig.api,
926
+ provider: providerConfig.provider,
927
+ baseUrl,
928
+ reasoning: false,
929
+ input: ["text"],
930
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
931
+ contextWindow: 128000,
932
+ maxTokens: 8192,
933
+ headers,
934
+ compat: {
935
+ supportsStore: false,
936
+ supportsDeveloperRole: false,
937
+ supportsReasoningEffort: false,
938
+ },
939
+ });
940
+ }
941
+ return this.#applyProviderModelOverrides(providerConfig.provider, discovered);
942
+ } catch (error) {
943
+ logger.warn("model discovery failed for provider", {
944
+ provider: providerConfig.provider,
945
+ url: modelsUrl,
946
+ error: error instanceof Error ? error.message : String(error),
947
+ });
948
+ return [];
949
+ }
950
+ }
951
+
952
+ #normalizeLmStudioBaseUrl(baseUrl?: string): string {
953
+ const defaultBaseUrl = "http://127.0.0.1:1234/v1";
954
+ const raw = baseUrl || defaultBaseUrl;
955
+ try {
956
+ const parsed = new URL(raw);
957
+ const trimmedPath = parsed.pathname.replace(/\/+$/g, "");
958
+ parsed.pathname = trimmedPath.endsWith("/v1") ? trimmedPath || "/v1" : `${trimmedPath}/v1`;
959
+ return `${parsed.protocol}//${parsed.host}${parsed.pathname}`;
960
+ } catch {
961
+ return raw;
962
+ }
963
+ }
875
964
  #normalizeOllamaBaseUrl(baseUrl?: string): string {
876
965
  const raw = baseUrl || "http://127.0.0.1:11434";
877
966
  try {
@@ -1136,5 +1225,6 @@ export interface ProviderConfigInput {
1136
1225
  headers?: Record<string, string>;
1137
1226
  compat?: Model<Api>["compat"];
1138
1227
  contextPromotionTarget?: string;
1228
+ premiumMultiplier?: number;
1139
1229
  }>;
1140
1230
  }
@@ -63,6 +63,7 @@ export type StatusLineSegmentId =
63
63
  | "plan_mode"
64
64
  | "path"
65
65
  | "git"
66
+ | "pr"
66
67
  | "subagents"
67
68
  | "token_in"
68
69
  | "token_out"
@@ -569,12 +570,13 @@ export const SETTINGS_SCHEMA = {
569
570
  // ─────────────────────────────────────────────────────────────────────────
570
571
  "task.isolation.mode": {
571
572
  type: "enum",
572
- values: ["none", "worktree", "fuse-overlay"] as const,
573
+ values: ["none", "worktree", "fuse-overlay", "fuse-projfs"] as const,
573
574
  default: "none",
574
575
  ui: {
575
576
  tab: "tools",
576
577
  label: "Task isolation",
577
- description: "Isolation mode for subagents (none, git worktree, or fuse-overlay)",
578
+ description:
579
+ "Isolation mode for subagents (none, git worktree, fuse-overlayfs on Unix, or ProjFS on Windows via fuse-projfs; unsupported modes fall back to worktree)",
578
580
  submenu: true,
579
581
  },
580
582
  },
@@ -858,6 +860,24 @@ export const SETTINGS_SCHEMA = {
858
860
  default: true,
859
861
  ui: { tab: "tools", label: "MCP project config", description: "Load .mcp.json/mcp.json from project root" },
860
862
  },
863
+ "mcp.notifications": {
864
+ type: "boolean",
865
+ default: false,
866
+ ui: {
867
+ tab: "tools",
868
+ label: "MCP update injection",
869
+ description: "Inject MCP resource updates into the agent conversation",
870
+ },
871
+ },
872
+ "mcp.notificationDebounceMs": {
873
+ type: "number",
874
+ default: 500,
875
+ ui: {
876
+ tab: "tools",
877
+ label: "MCP notification debounce (ms)",
878
+ description: "Debounce window for MCP resource update notifications before injecting into conversation",
879
+ },
880
+ },
861
881
 
862
882
  // ─────────────────────────────────────────────────────────────────────────
863
883
  // LSP settings
@@ -1144,6 +1144,8 @@ export interface ProviderModelConfig {
1144
1144
  input: ("text" | "image")[];
1145
1145
  /** Cost per million tokens. */
1146
1146
  cost: { input: number; output: number; cacheRead: number; cacheWrite: number };
1147
+ /** Premium Copilot requests charged per user-initiated request. */
1148
+ premiumMultiplier?: number;
1147
1149
  /** Maximum context window size in tokens. */
1148
1150
  contextWindow: number;
1149
1151
  /** Maximum output tokens. */