@oh-my-pi/pi-coding-agent 14.5.9 → 14.5.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,7 +14,17 @@ import {
14
14
  type UsageReport,
15
15
  } from "@oh-my-pi/pi-ai";
16
16
  import type { Component, SlashCommand } from "@oh-my-pi/pi-tui";
17
- import { Container, Loader, Markdown, ProcessTerminal, Spacer, Text, TUI, visibleWidth } from "@oh-my-pi/pi-tui";
17
+ import {
18
+ Container,
19
+ clearRenderCache,
20
+ Loader,
21
+ Markdown,
22
+ ProcessTerminal,
23
+ Spacer,
24
+ Text,
25
+ TUI,
26
+ visibleWidth,
27
+ } from "@oh-my-pi/pi-tui";
18
28
  import { APP_NAME, getProjectDir, hsvToRgb, isEnoent, logger, postmortem, prompt } from "@oh-my-pi/pi-utils";
19
29
  import chalk from "chalk";
20
30
  import { KeybindingsManager } from "../config/keybindings";
@@ -442,6 +452,7 @@ export class InteractiveMode implements InteractiveModeContext {
442
452
 
443
453
  // Set up theme file watcher
444
454
  onThemeChange(() => {
455
+ clearRenderCache();
445
456
  this.ui.invalidate();
446
457
  this.updateEditorBorderColor();
447
458
  this.ui.requestRender();
@@ -867,6 +878,19 @@ export class InteractiveMode implements InteractiveModeContext {
867
878
  } else {
868
879
  await this.session.setModelTemporary(prev.model, prev.thinkingLevel);
869
880
  }
881
+ // If #applyPlanModeModel queued a deferred switch to the plan-role model
882
+ // (because the session was streaming on entry), drop it now: we are
883
+ // leaving plan mode, so flushing it on the next agent_end would land the
884
+ // session on the plan-role model after the user has exited plan mode
885
+ // (issue #816). Only clear when the pending target matches the plan-role
886
+ // model — leave any unrelated user-queued switch intact.
887
+ const pending = this.#pendingModelSwitch;
888
+ if (pending) {
889
+ const planResolution = this.session.resolveRoleModelWithThinking("plan");
890
+ if (planResolution.model && modelsAreEqual(pending.model, planResolution.model)) {
891
+ this.#pendingModelSwitch = undefined;
892
+ }
893
+ }
870
894
  }
871
895
  this.session.setPlanModeState(undefined);
872
896
  this.planModeEnabled = false;
@@ -1334,8 +1358,8 @@ export class InteractiveMode implements InteractiveModeContext {
1334
1358
  this.#uiHelpers.renderSessionContext(sessionContext, options);
1335
1359
  }
1336
1360
 
1337
- renderInitialMessages(): void {
1338
- this.#uiHelpers.renderInitialMessages();
1361
+ renderInitialMessages(prebuiltContext?: SessionContext): void {
1362
+ this.#uiHelpers.renderInitialMessages(prebuiltContext);
1339
1363
  }
1340
1364
 
1341
1365
  getUserMessageText(message: Message): string {
@@ -2328,8 +2328,14 @@ export function getSymbolTheme(): SymbolTheme {
2328
2328
  };
2329
2329
  }
2330
2330
 
2331
+ let _markdownTheme: MarkdownTheme | undefined;
2332
+ let _markdownThemeRef: Theme | undefined;
2333
+
2331
2334
  export function getMarkdownTheme(): MarkdownTheme {
2332
- return {
2335
+ if (_markdownTheme !== undefined && _markdownThemeRef === theme) {
2336
+ return _markdownTheme;
2337
+ }
2338
+ const markdownTheme: MarkdownTheme = {
2333
2339
  heading: (text: string) => theme.fg("mdHeading", text),
2334
2340
  link: (text: string) => theme.fg("mdLink", text),
2335
2341
  linkUrl: (text: string) => theme.fg("mdLinkUrl", text),
@@ -2355,6 +2361,9 @@ export function getMarkdownTheme(): MarkdownTheme {
2355
2361
  }
2356
2362
  },
2357
2363
  };
2364
+ _markdownTheme = markdownTheme;
2365
+ _markdownThemeRef = theme;
2366
+ return markdownTheme;
2358
2367
  }
2359
2368
 
2360
2369
  export function getSelectListTheme(): SelectListTheme {
@@ -159,7 +159,7 @@ export interface InteractiveModeContext {
159
159
  sessionContext: SessionContext,
160
160
  options?: { updateFooter?: boolean; populateHistory?: boolean },
161
161
  ): void;
162
- renderInitialMessages(): void;
162
+ renderInitialMessages(prebuiltContext?: SessionContext): void;
163
163
  getUserMessageText(message: Message): string;
164
164
  findLastAssistantMessage(): AssistantMessage | undefined;
165
165
  extractAssistantText(message: AssistantMessage): string;
@@ -414,7 +414,7 @@ export class UiHelpers {
414
414
  this.ctx.ui.requestRender();
415
415
  }
416
416
 
417
- renderInitialMessages(): void {
417
+ renderInitialMessages(prebuiltContext?: SessionContext): void {
418
418
  // This path is used to rebuild the visible chat transcript (e.g. after custom/debug UI).
419
419
  // Clear existing rendered chat first to avoid duplicating the full session in the container.
420
420
  this.ctx.chatContainer.clear();
@@ -422,8 +422,8 @@ export class UiHelpers {
422
422
  this.ctx.pendingBashComponents = [];
423
423
  this.ctx.pendingPythonComponents = [];
424
424
 
425
- // Get aligned messages and entries from session context
426
- const context = this.ctx.sessionManager.buildSessionContext();
425
+ // Reuse a pre-built context when available (e.g. from navigateTree) to avoid a second O(N) walk.
426
+ const context = prebuiltContext ?? this.ctx.sessionManager.buildSessionContext();
427
427
  this.ctx.renderSessionContext(context, {
428
428
  updateFooter: true,
429
429
  populateHistory: true,
@@ -610,9 +610,22 @@ export class UiHelpers {
610
610
  await this.ctx.session.prompt(message.text);
611
611
  }
612
612
 
613
- const promptPromise = this.ctx.session.prompt(firstPrompt.text).catch((error: unknown) => {
614
- restoreQueue(error);
615
- });
613
+ // Pass streamingBehavior so that if the session is still streaming when
614
+ // compaction-end fires (race window between isStreaming flipping false and
615
+ // the event landing here), prompt() routes the message into the steer/
616
+ // follow-up queue instead of throwing AgentBusyError. When the session is
617
+ // genuinely idle, streamingBehavior is ignored and a fresh prompt runs as
618
+ // before. This keeps the steer preview honest: if delivery has to be
619
+ // deferred, the message lands in the same queue every other consumer
620
+ // (Alt+Up dequeue, post-stream drain) already drains, instead of being
621
+ // stranded in compactionQueuedMessages with no drainer.
622
+ const promptPromise = this.ctx.session
623
+ .prompt(firstPrompt.text, {
624
+ streamingBehavior: firstPrompt.mode === "followUp" ? "followUp" : "steer",
625
+ })
626
+ .catch((error: unknown) => {
627
+ restoreQueue(error);
628
+ });
616
629
 
617
630
  for (const message of rest) {
618
631
  if (this.ctx.isKnownSlashCommand(message.text)) {
@@ -0,0 +1 @@
1
+ Resume work on the user's most recent intent. Re-read the kept recent messages above the summary to confirm what the user asked for last; if their latest request supersedes earlier plans recorded in the summary, follow the latest request. If there is nothing left to do, say so briefly instead of inventing further work.
@@ -4,9 +4,9 @@ GitHub CLI tool with a single op-based dispatch. Wraps `gh` for repository, issu
4
4
  Pick the operation via `op`. Each op uses a subset of the parameters:
5
5
  - `repo_view` — Read repository metadata. Optional `repo` (owner/repo) and `branch`. Falls back to the current checkout or default `gh` repo.
6
6
  - `issue_view` — Read an issue. Required `issue` (number or URL). Optional `repo`. Set `comments: false` to skip discussion.
7
- - `pr_view` — Read a pull request, including reviews and inline review comments. Optional `pr` (number, URL, or branch); omitting it targets the current branch's PR. Optional `repo`. Set `comments: false` for a lighter summary.
8
- - `pr_diff` — Read a pull request diff. Optional `pr`, `repo`. Set `nameOnly: true` for changed file names. Use `exclude` to drop generated paths from the diff.
9
- - `pr_checkout` — Check a pull request out into a dedicated git worktree. Optional `pr`, `repo`, `branch` (local), `worktree` (path), `force` (reset existing local branch).
7
+ - `pr_view` — Read one or more pull requests, including reviews and inline review comments. Optional `pr` (number, URL, branch, or array of any — pass an array to fetch multiple PRs in one call); omitting it targets the current branch's PR. Optional `repo`. Set `comments: false` for a lighter summary.
8
+ - `pr_diff` — Read one or more pull request diffs. Optional `pr` (single identifier or array for batch). Optional `repo`. Set `nameOnly: true` for changed file names. Use `exclude` to drop generated paths from the diff.
9
+ - `pr_checkout` — Check one or more pull requests out into dedicated git worktrees. Optional `pr` (number, URL, branch, or array of any of those — pass an array to batch-check-out multiple PRs in one call), `repo`, `force` (reset existing local branch).
10
10
  - `pr_push` — Push a checked-out PR branch back to its source branch. Requires the branch to have been checked out via `op: pr_checkout` (carries push metadata). Optional `branch`; defaults to the current checked-out git branch. Optional `forceWithLease`.
11
11
  - `search_issues` — Search issues using normal GitHub issue search syntax. Required `query`. Optional `repo`, `limit`.
12
12
  - `search_prs` — Search pull requests using normal GitHub PR search syntax. Required `query`. Optional `repo`, `limit`.
package/src/sdk.ts CHANGED
@@ -6,7 +6,7 @@ import {
6
6
  INTENT_FIELD,
7
7
  type ThinkingLevel,
8
8
  } from "@oh-my-pi/pi-agent-core";
9
- import type { Message, Model } from "@oh-my-pi/pi-ai";
9
+ import type { Message, Model, SimpleStreamOptions } from "@oh-my-pi/pi-ai";
10
10
  import {
11
11
  getOpenAICodexTransportDetails,
12
12
  prewarmOpenAICodexResponses,
@@ -793,7 +793,11 @@ export async function createAgentSession(options: CreateAgentSessionOptions = {}
793
793
  thinkingLevel = defaultRoleSpec.thinkingLevel;
794
794
  }
795
795
 
796
- // Fall back to settings default
796
+ // Prefer the selected model's configured defaultLevel, otherwise fall back
797
+ // to the global settings default.
798
+ if (thinkingLevel === undefined && model?.thinking?.defaultLevel !== undefined) {
799
+ thinkingLevel = model.thinking.defaultLevel;
800
+ }
797
801
  if (thinkingLevel === undefined) {
798
802
  thinkingLevel = settings.get("defaultThinkingLevel");
799
803
  }
@@ -1498,6 +1502,11 @@ export async function createAgentSession(options: CreateAgentSessionOptions = {}
1498
1502
  return await extensionRunner.emitBeforeProviderRequest(payload);
1499
1503
  }
1500
1504
  : undefined;
1505
+ const onResponse: SimpleStreamOptions["onResponse"] | undefined = extensionRunner
1506
+ ? async (response, model) => {
1507
+ await extensionRunner.emitAfterProviderResponse(response, model);
1508
+ }
1509
+ : undefined;
1501
1510
 
1502
1511
  const setToolUIContext = (uiContext: ExtensionUIContext, hasUI: boolean) => {
1503
1512
  toolContextStore.setUIContext(uiContext, hasUI);
@@ -1527,6 +1536,7 @@ export async function createAgentSession(options: CreateAgentSessionOptions = {}
1527
1536
  },
1528
1537
  convertToLlm: convertToLlmFinal,
1529
1538
  onPayload,
1539
+ onResponse,
1530
1540
  sessionId: providerSessionId,
1531
1541
  transformContext,
1532
1542
  steeringMode: settings.get("steeringMode") ?? "one-at-a-time",
@@ -1599,6 +1609,7 @@ export async function createAgentSession(options: CreateAgentSessionOptions = {}
1599
1609
  toolRegistry,
1600
1610
  transformContext,
1601
1611
  onPayload,
1612
+ onResponse,
1602
1613
  convertToLlm: convertToLlmFinal,
1603
1614
  rebuildSystemPrompt,
1604
1615
  mcpDiscoveryEnabled,