@oh-my-pi/pi-coding-agent 13.9.2 → 13.9.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +53 -0
  2. package/examples/sdk/02-custom-model.ts +2 -1
  3. package/package.json +7 -7
  4. package/src/cli/args.ts +6 -5
  5. package/src/cli/list-models.ts +2 -2
  6. package/src/commands/launch.ts +3 -3
  7. package/src/config/model-registry.ts +85 -39
  8. package/src/config/model-resolver.ts +47 -21
  9. package/src/config/settings-schema.ts +56 -2
  10. package/src/discovery/helpers.ts +2 -2
  11. package/src/extensibility/custom-tools/types.ts +2 -0
  12. package/src/extensibility/extensions/loader.ts +3 -2
  13. package/src/extensibility/extensions/types.ts +10 -7
  14. package/src/extensibility/hooks/types.ts +2 -0
  15. package/src/main.ts +5 -22
  16. package/src/memories/index.ts +7 -3
  17. package/src/modes/components/footer.ts +10 -8
  18. package/src/modes/components/model-selector.ts +33 -38
  19. package/src/modes/components/settings-defs.ts +31 -2
  20. package/src/modes/components/settings-selector.ts +16 -5
  21. package/src/modes/components/status-line/context-thresholds.ts +68 -0
  22. package/src/modes/components/status-line/segments.ts +11 -12
  23. package/src/modes/components/thinking-selector.ts +7 -7
  24. package/src/modes/components/tree-selector.ts +3 -2
  25. package/src/modes/controllers/command-controller.ts +11 -26
  26. package/src/modes/controllers/event-controller.ts +16 -3
  27. package/src/modes/controllers/input-controller.ts +4 -2
  28. package/src/modes/controllers/selector-controller.ts +5 -4
  29. package/src/modes/interactive-mode.ts +2 -2
  30. package/src/modes/rpc/rpc-client.ts +5 -10
  31. package/src/modes/rpc/rpc-types.ts +5 -5
  32. package/src/modes/theme/theme.ts +8 -3
  33. package/src/priority.json +1 -0
  34. package/src/prompts/system/auto-handoff-threshold-focus.md +1 -0
  35. package/src/prompts/system/system-prompt.md +18 -2
  36. package/src/prompts/tools/hashline.md +139 -83
  37. package/src/sdk.ts +22 -14
  38. package/src/session/agent-session.ts +259 -117
  39. package/src/session/agent-storage.ts +14 -14
  40. package/src/session/compaction/compaction.ts +500 -13
  41. package/src/session/messages.ts +12 -1
  42. package/src/session/session-manager.ts +77 -19
  43. package/src/slash-commands/builtin-registry.ts +48 -0
  44. package/src/task/agents.ts +3 -2
  45. package/src/task/executor.ts +2 -2
  46. package/src/task/types.ts +2 -1
  47. package/src/thinking.ts +87 -0
  48. package/src/tools/browser.ts +15 -6
  49. package/src/tools/fetch.ts +118 -100
  50. package/src/web/search/providers/exa.ts +74 -3
@@ -1,4 +1,4 @@
1
- import { getAvailableThinkingLevels } from "@oh-my-pi/pi-ai";
1
+ import { THINKING_EFFORTS } from "@oh-my-pi/pi-ai";
2
2
 
3
3
  /** Unified settings schema - single source of truth for all settings.
4
4
  * Unified settings schema - single source of truth for all settings.
@@ -192,7 +192,7 @@ export const SETTINGS_SCHEMA = {
192
192
  },
193
193
  defaultThinkingLevel: {
194
194
  type: "enum",
195
- values: getAvailableThinkingLevels(),
195
+ values: THINKING_EFFORTS,
196
196
  default: "high",
197
197
  ui: {
198
198
  tab: "agent",
@@ -345,9 +345,48 @@ export const SETTINGS_SCHEMA = {
345
345
  description: "Automatically compact context when it gets too large",
346
346
  },
347
347
  },
348
+ "compaction.strategy": {
349
+ type: "enum",
350
+ values: ["context-full", "handoff", "off"] as const,
351
+ default: "context-full",
352
+ ui: {
353
+ tab: "agent",
354
+ label: "Context-full strategy",
355
+ description: "Choose in-place context-full maintenance, auto-handoff, or disable auto maintenance (off)",
356
+ submenu: true,
357
+ },
358
+ },
359
+ "compaction.thresholdPercent": {
360
+ type: "number",
361
+ default: -1,
362
+ ui: {
363
+ tab: "agent",
364
+ label: "Context threshold",
365
+ description: "Percent threshold for context maintenance; set to Default to use legacy reserve-based behavior",
366
+ submenu: true,
367
+ },
368
+ },
369
+ "compaction.handoffSaveToDisk": {
370
+ type: "boolean",
371
+ default: false,
372
+ ui: {
373
+ tab: "agent",
374
+ label: "Save auto-handoff docs",
375
+ description: "Save generated handoff documents to markdown files for the auto-handoff flow",
376
+ },
377
+ },
348
378
  "compaction.reserveTokens": { type: "number", default: 16384 },
349
379
  "compaction.keepRecentTokens": { type: "number", default: 20000 },
350
380
  "compaction.autoContinue": { type: "boolean", default: true },
381
+ "compaction.remoteEnabled": {
382
+ type: "boolean",
383
+ default: true,
384
+ ui: {
385
+ tab: "agent",
386
+ label: "Remote compaction",
387
+ description: "Use remote compaction endpoints when available instead of local summarization",
388
+ },
389
+ },
351
390
  "compaction.remoteEndpoint": { type: "string", default: undefined },
352
391
 
353
392
  // ─────────────────────────────────────────────────────────────────────────
@@ -1186,6 +1225,17 @@ export const SETTINGS_SCHEMA = {
1186
1225
  submenu: true,
1187
1226
  },
1188
1227
  },
1228
+ serviceTier: {
1229
+ type: "enum",
1230
+ values: ["none", "auto", "default", "flex", "scale", "priority"] as const,
1231
+ default: "none",
1232
+ ui: {
1233
+ tab: "agent",
1234
+ label: "Service tier",
1235
+ description: "OpenAI processing priority (none = omit parameter)",
1236
+ submenu: true,
1237
+ },
1238
+ },
1189
1239
  } as const;
1190
1240
 
1191
1241
  // ═══════════════════════════════════════════════════════════════════════════
@@ -1265,9 +1315,13 @@ export type StatusLineSeparatorStyle = SettingValue<"statusLine.separator">;
1265
1315
 
1266
1316
  export interface CompactionSettings {
1267
1317
  enabled: boolean;
1318
+ strategy: "context-full" | "handoff" | "off";
1319
+ thresholdPercent: number;
1268
1320
  reserveTokens: number;
1269
1321
  keepRecentTokens: number;
1322
+ handoffSaveToDisk: boolean;
1270
1323
  autoContinue: boolean;
1324
+ remoteEnabled: boolean;
1271
1325
  remoteEndpoint: string | undefined;
1272
1326
  }
1273
1327
 
@@ -1,13 +1,13 @@
1
1
  import * as fs from "node:fs";
2
2
  import * as path from "node:path";
3
- import type { ThinkingLevel } from "@oh-my-pi/pi-ai";
4
- import { parseThinkingLevel } from "@oh-my-pi/pi-ai";
3
+ import type { ThinkingLevel } from "@oh-my-pi/pi-agent-core";
5
4
  import { FileType, glob } from "@oh-my-pi/pi-natives";
6
5
  import { CONFIG_DIR_NAME, tryParseJson } from "@oh-my-pi/pi-utils";
7
6
  import { readFile } from "../capability/fs";
8
7
  import { parseRuleConditionAndScope, type Rule, type RuleFrontmatter } from "../capability/rule";
9
8
  import type { Skill, SkillFrontmatter } from "../capability/skill";
10
9
  import type { LoadContext, LoadResult, SourceMeta } from "../capability/types";
10
+ import { parseThinkingLevel } from "../thinking";
11
11
  import { parseFrontmatter } from "../utils/frontmatter";
12
12
 
13
13
  /**
@@ -90,9 +90,11 @@ export type CustomToolSessionEvent =
90
90
  | {
91
91
  reason: "auto_compaction_start";
92
92
  trigger: "threshold" | "overflow";
93
+ action: "context-full" | "handoff";
93
94
  }
94
95
  | {
95
96
  reason: "auto_compaction_end";
97
+ action: "context-full" | "handoff";
96
98
  result: CompactionResult | undefined;
97
99
  aborted: boolean;
98
100
  willRetry: boolean;
@@ -4,7 +4,8 @@
4
4
  import type * as fs1 from "node:fs";
5
5
  import * as fs from "node:fs/promises";
6
6
  import * as path from "node:path";
7
- import type { ImageContent, Model, TextContent, ThinkingLevel } from "@oh-my-pi/pi-ai";
7
+ import type { ThinkingLevel } from "@oh-my-pi/pi-agent-core";
8
+ import type { ImageContent, Model, TextContent } from "@oh-my-pi/pi-ai";
8
9
  import * as piCodingAgent from "@oh-my-pi/pi-coding-agent";
9
10
  import type { KeyId } from "@oh-my-pi/pi-tui";
10
11
  import { hasFsCode, isEacces, isEnoent, logger } from "@oh-my-pi/pi-utils";
@@ -214,7 +215,7 @@ class ConcreteExtensionAPI implements ExtensionAPI, IExtensionRuntime {
214
215
  return this.runtime.setModel(model);
215
216
  }
216
217
 
217
- getThinkingLevel(): ThinkingLevel {
218
+ getThinkingLevel(): ThinkingLevel | undefined {
218
219
  return this.runtime.getThinkingLevel();
219
220
  }
220
221
 
@@ -7,7 +7,7 @@
7
7
  * - Register commands, keyboard shortcuts, and CLI flags
8
8
  * - Interact with the user via UI primitives
9
9
  */
10
- import type { AgentMessage, AgentToolResult, AgentToolUpdateCallback } from "@oh-my-pi/pi-agent-core";
10
+ import type { AgentMessage, AgentToolResult, AgentToolUpdateCallback, ThinkingLevel } from "@oh-my-pi/pi-agent-core";
11
11
  import type {
12
12
  Api,
13
13
  AssistantMessageEvent,
@@ -19,7 +19,6 @@ import type {
19
19
  OAuthLoginCallbacks,
20
20
  SimpleStreamOptions,
21
21
  TextContent,
22
- ThinkingLevel,
23
22
  ToolResultMessage,
24
23
  } from "@oh-my-pi/pi-ai";
25
24
  import type * as piCodingAgent from "@oh-my-pi/pi-coding-agent";
@@ -538,11 +537,13 @@ export interface ToolExecutionEndEvent {
538
537
  export interface AutoCompactionStartEvent {
539
538
  type: "auto_compaction_start";
540
539
  reason: "threshold" | "overflow";
540
+ action: "context-full" | "handoff";
541
541
  }
542
542
 
543
543
  /** Fired when auto-compaction ends */
544
544
  export interface AutoCompactionEndEvent {
545
545
  type: "auto_compaction_end";
546
+ action: "context-full" | "handoff";
546
547
  result: CompactionResult | undefined;
547
548
  aborted: boolean;
548
549
  willRetry: boolean;
@@ -1056,9 +1057,9 @@ export interface ExtensionAPI {
1056
1057
  setModel(model: Model): Promise<boolean>;
1057
1058
 
1058
1059
  /** Get current thinking level. */
1059
- getThinkingLevel(): ThinkingLevel;
1060
+ getThinkingLevel(): ThinkingLevel | undefined;
1060
1061
 
1061
- /** Set thinking level (clamped to model capabilities). */
1062
+ /** Set thinking level for the current session. */
1062
1063
  setThinkingLevel(level: ThinkingLevel): void;
1063
1064
 
1064
1065
  // =========================================================================
@@ -1084,11 +1085,11 @@ export interface ExtensionAPI {
1084
1085
  * id: "claude-sonnet-4@20250514",
1085
1086
  * name: "Claude Sonnet 4 (Vertex)",
1086
1087
  * reasoning: true,
1088
+ * thinking: { mode: "anthropic-adaptive", minLevel: "minimal", maxLevel: "high" },
1087
1089
  * input: ["text", "image"],
1088
1090
  * cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
1089
1091
  * contextWindow: 200000,
1090
1092
  * maxTokens: 64000,
1091
- * }
1092
1093
  * ]
1093
1094
  * });
1094
1095
  *
@@ -1147,8 +1148,10 @@ export interface ProviderModelConfig {
1147
1148
  name: string;
1148
1149
  /** API type override for this model. */
1149
1150
  api?: Api;
1150
- /** Whether the model supports extended thinking. */
1151
+ /** Whether the model supports extended thinking at all. */
1151
1152
  reasoning: boolean;
1153
+ /** Optional canonical thinking capability metadata for per-model effort support. */
1154
+ thinking?: Model["thinking"];
1152
1155
  /** Supported input types. */
1153
1156
  input: ("text" | "image")[];
1154
1157
  /** Cost per million tokens. */
@@ -1216,7 +1219,7 @@ export type SetActiveToolsHandler = (toolNames: string[]) => Promise<void>;
1216
1219
 
1217
1220
  export type SetModelHandler = (model: Model) => Promise<boolean>;
1218
1221
 
1219
- export type GetThinkingLevelHandler = () => ThinkingLevel;
1222
+ export type GetThinkingLevelHandler = () => ThinkingLevel | undefined;
1220
1223
 
1221
1224
  export type SetThinkingLevelHandler = (level: ThinkingLevel, persist?: boolean) => void;
1222
1225
 
@@ -395,11 +395,13 @@ export interface TurnEndEvent {
395
395
  export interface AutoCompactionStartEvent {
396
396
  type: "auto_compaction_start";
397
397
  reason: "threshold" | "overflow";
398
+ action: "context-full" | "handoff";
398
399
  }
399
400
 
400
401
  /** Event data for auto_compaction_end event. */
401
402
  export interface AutoCompactionEndEvent {
402
403
  type: "auto_compaction_end";
404
+ action: "context-full" | "handoff";
403
405
  result: CompactionResult | undefined;
404
406
  aborted: boolean;
405
407
  willRetry: boolean;
package/src/main.ts CHANGED
@@ -10,7 +10,7 @@ import * as fs from "node:fs/promises";
10
10
  import * as os from "node:os";
11
11
  import * as path from "node:path";
12
12
  import { createInterface } from "node:readline/promises";
13
- import { type ImageContent, supportsXhigh } from "@oh-my-pi/pi-ai";
13
+ import type { ImageContent } from "@oh-my-pi/pi-ai";
14
14
  import { $env, getProjectDir, logger, postmortem, setProjectDir, VERSION } from "@oh-my-pi/pi-utils";
15
15
  import chalk from "chalk";
16
16
  import type { Args } from "./cli/args";
@@ -334,11 +334,10 @@ async function buildSessionOptions(
334
334
  scopedModels: ScopedModel[],
335
335
  sessionManager: SessionManager | undefined,
336
336
  modelRegistry: ModelRegistry,
337
- ): Promise<{ options: CreateAgentSessionOptions; cliThinkingFromModel: boolean }> {
337
+ ): Promise<{ options: CreateAgentSessionOptions }> {
338
338
  const options: CreateAgentSessionOptions = {
339
339
  cwd: parsed.cwd ?? getProjectDir(),
340
340
  };
341
- let cliThinkingFromModel = false;
342
341
 
343
342
  // Auto-discover SYSTEM.md if no CLI system prompt provided
344
343
  const systemPromptSource = parsed.systemPrompt ?? discoverSystemPromptFile();
@@ -380,7 +379,6 @@ async function buildSessionOptions(
380
379
  settings.overrideModelRoles({ default: `${resolved.model.provider}/${resolved.model.id}` });
381
380
  if (!parsed.thinking && resolved.thinkingLevel) {
382
381
  options.thinkingLevel = resolved.thinkingLevel;
383
- cliThinkingFromModel = true;
384
382
  }
385
383
  }
386
384
  } else if (scopedModels.length > 0 && !parsed.continue && !parsed.resume) {
@@ -483,7 +481,7 @@ async function buildSessionOptions(
483
481
  options.additionalExtensionPaths = [];
484
482
  }
485
483
 
486
- return { options, cliThinkingFromModel };
484
+ return { options };
487
485
  }
488
486
 
489
487
  export async function runRootCommand(parsed: Args, rawArgs: string[]): Promise<void> {
@@ -618,7 +616,7 @@ export async function runRootCommand(parsed: Args, rawArgs: string[]): Promise<v
618
616
  sessionManager = await SessionManager.open(selectedPath);
619
617
  }
620
618
 
621
- const { options: sessionOptions, cliThinkingFromModel } = await logger.timeAsync("buildSessionOptions", () =>
619
+ const { options: sessionOptions } = await logger.timeAsync("buildSessionOptions", () =>
622
620
  buildSessionOptions(parsedArgs, scopedModels, sessionManager, modelRegistry),
623
621
  );
624
622
  sessionOptions.authStorage = authStorage;
@@ -692,21 +690,6 @@ export async function runRootCommand(parsed: Args, rawArgs: string[]): Promise<v
692
690
  process.exit(1);
693
691
  }
694
692
 
695
- // Clamp thinking level to model capabilities for CLI-provided thinking levels.
696
- // This covers both --thinking <level> and --model <pattern>:<thinking>.
697
- const cliThinkingOverride = parsedArgs.thinking !== undefined || cliThinkingFromModel;
698
- if (session.model && cliThinkingOverride) {
699
- let effectiveThinking = session.thinkingLevel;
700
- if (!session.model.reasoning) {
701
- effectiveThinking = "off";
702
- } else if (effectiveThinking === "xhigh" && !supportsXhigh(session.model)) {
703
- effectiveThinking = "high";
704
- }
705
- if (effectiveThinking !== session.thinkingLevel) {
706
- session.setThinkingLevel(effectiveThinking);
707
- }
708
- }
709
-
710
693
  if (mode === "rpc") {
711
694
  await runRpcMode(session);
712
695
  } else if (isInteractive) {
@@ -717,7 +700,7 @@ export async function runRootCommand(parsed: Args, rawArgs: string[]): Promise<v
717
700
  if (scopedModelsForDisplay.length > 0) {
718
701
  const modelList = scopedModelsForDisplay
719
702
  .map(scopedModel => {
720
- const thinkingStr = scopedModel.thinkingLevel !== "off" ? `:${scopedModel.thinkingLevel}` : "";
703
+ const thinkingStr = !scopedModel.thinkingLevel ? `:${scopedModel.thinkingLevel}` : "";
721
704
  return `${scopedModel.model.id}${thinkingStr}`;
722
705
  })
723
706
  .join(", ");
@@ -3,7 +3,7 @@ import type * as fsNode from "node:fs";
3
3
  import * as fs from "node:fs/promises";
4
4
  import * as path from "node:path";
5
5
  import type { AgentMessage } from "@oh-my-pi/pi-agent-core";
6
- import { completeSimple, type Model } from "@oh-my-pi/pi-ai";
6
+ import { completeSimple, Effort, type Model } from "@oh-my-pi/pi-ai";
7
7
  import { getAgentDbPath, logger, parseJsonlLenient } from "@oh-my-pi/pi-utils";
8
8
  import type { ModelRegistry } from "../config/model-registry";
9
9
  import { parseModelString } from "../config/model-resolver";
@@ -583,7 +583,11 @@ async function runStage1Job(options: {
583
583
  systemPrompt: stageOneSystemTemplate,
584
584
  messages: [{ role: "user", content: [{ type: "text", text: inputPrompt }], timestamp: Date.now() }],
585
585
  },
586
- { apiKey, maxTokens: Math.max(1024, Math.min(4096, Math.floor(modelMaxTokens * 0.2))), reasoning: "low" },
586
+ {
587
+ apiKey,
588
+ maxTokens: Math.max(1024, Math.min(4096, Math.floor(modelMaxTokens * 0.2))),
589
+ reasoning: Effort.Low,
590
+ },
587
591
  );
588
592
 
589
593
  if (response.stopReason === "error") {
@@ -709,7 +713,7 @@ async function runConsolidationModel(options: { memoryRoot: string; model: Model
709
713
  {
710
714
  messages: [{ role: "user", content: [{ type: "text", text: input }], timestamp: Date.now() }],
711
715
  },
712
- { apiKey, maxTokens: 8192, reasoning: "medium" },
716
+ { apiKey, maxTokens: 8192, reasoning: Effort.Medium },
713
717
  );
714
718
  if (response.stopReason === "error") {
715
719
  throw new Error(response.errorMessage || "phase2 model error");
@@ -1,10 +1,12 @@
1
1
  import * as fs from "node:fs";
2
+ import { ThinkingLevel } from "@oh-my-pi/pi-agent-core";
2
3
  import { type Component, padding, truncateToWidth, visibleWidth } from "@oh-my-pi/pi-tui";
3
4
  import { formatNumber, getProjectDir } from "@oh-my-pi/pi-utils";
4
5
  import { theme } from "../../modes/theme/theme";
5
6
  import type { AgentSession } from "../../session/agent-session";
6
7
  import { shortenPath } from "../../tools/render-utils";
7
8
  import { findGitHeadPathAsync, sanitizeStatusText } from "../shared";
9
+ import { getContextUsageLevel, getContextUsageThemeColor } from "./status-line/context-thresholds";
8
10
 
9
11
  /**
10
12
  * Footer component that shows pwd, token stats, and context usage
@@ -197,10 +199,10 @@ export class FooterComponent implements Component {
197
199
  contextPercent === "?"
198
200
  ? `?/${formatNumber(contextWindow)}${autoIndicator}`
199
201
  : `${contextPercent}%/${formatNumber(contextWindow)}${autoIndicator}`;
200
- if (contextPercentValue > 90) {
201
- contextPercentStr = theme.fg("error", contextPercentDisplay);
202
- } else if (contextPercentValue > 70) {
203
- contextPercentStr = theme.fg("warning", contextPercentDisplay);
202
+ if (contextUsage?.percent !== null && contextUsage?.percent !== undefined) {
203
+ const color = getContextUsageThemeColor(getContextUsageLevel(contextPercentValue, contextWindow));
204
+ contextPercentStr =
205
+ color === "statusLineContext" ? contextPercentDisplay : theme.fg(color, contextPercentDisplay);
204
206
  } else {
205
207
  contextPercentStr = contextPercentDisplay;
206
208
  }
@@ -211,11 +213,11 @@ export class FooterComponent implements Component {
211
213
  // Add model name on the right side, plus thinking level if model supports it
212
214
  const modelName = state.model?.id || "no-model";
213
215
 
214
- // Add thinking level hint if model supports reasoning and thinking is enabled
216
+ // Add thinking level hint when the current model advertises supported efforts
215
217
  let rightSide = modelName;
216
- if (state.model?.reasoning) {
217
- const thinkingLevel = state.thinkingLevel || "off";
218
- if (thinkingLevel !== "off") {
218
+ if (state.model?.thinking) {
219
+ const thinkingLevel = state.thinkingLevel ?? ThinkingLevel.Off;
220
+ if (thinkingLevel !== ThinkingLevel.Off) {
219
221
  rightSide = `${modelName} • ${thinkingLevel}`;
220
222
  }
221
223
  }
@@ -1,16 +1,11 @@
1
- import {
2
- getAvailableThinkingLevels,
3
- getThinkingMetadata,
4
- type Model,
5
- modelsAreEqual,
6
- supportsXhigh,
7
- type ThinkingMode,
8
- } from "@oh-my-pi/pi-ai";
1
+ import { ThinkingLevel } from "@oh-my-pi/pi-agent-core";
2
+ import { getSupportedEfforts, type Model, modelsAreEqual } from "@oh-my-pi/pi-ai";
9
3
  import { Container, Input, matchesKey, Spacer, type Tab, TabBar, Text, type TUI, visibleWidth } from "@oh-my-pi/pi-tui";
10
4
  import { MODEL_ROLE_IDS, MODEL_ROLES, type ModelRegistry, type ModelRole } from "../../config/model-registry";
11
5
  import { resolveModelRoleValue } from "../../config/model-resolver";
12
6
  import type { Settings } from "../../config/settings";
13
7
  import { type ThemeColor, theme } from "../../modes/theme/theme";
8
+ import { getThinkingLevelMetadata } from "../../thinking";
14
9
  import { fuzzyFilter } from "../../utils/fuzzy";
15
10
  import { getTabBarTheme } from "../shared";
16
11
  import { DynamicBorder } from "./dynamic-border";
@@ -29,15 +24,15 @@ interface ModelItem {
29
24
 
30
25
  interface ScopedModelItem {
31
26
  model: Model;
32
- thinkingLevel: string;
27
+ thinkingLevel?: string;
33
28
  }
34
29
 
35
30
  interface RoleAssignment {
36
31
  model: Model;
37
- thinkingMode: ThinkingMode;
32
+ thinkingLevel: ThinkingLevel;
38
33
  }
39
34
 
40
- type RoleSelectCallback = (model: Model, role: ModelRole | null, thinkingMode?: ThinkingMode) => void;
35
+ type RoleSelectCallback = (model: Model, role: ModelRole | null, thinkingLevel?: ThinkingLevel) => void;
41
36
  type CancelCallback = () => void;
42
37
  interface MenuRoleAction {
43
38
  label: string;
@@ -97,7 +92,7 @@ export class ModelSelectorComponent extends Container {
97
92
  settings: Settings,
98
93
  modelRegistry: ModelRegistry,
99
94
  scopedModels: ReadonlyArray<ScopedModelItem>,
100
- onSelect: (model: Model, role: ModelRole | null, thinkingMode?: ThinkingMode) => void,
95
+ onSelect: (model: Model, role: ModelRole | null, thinkingLevel?: ThinkingLevel) => void,
101
96
  onCancel: () => void,
102
97
  options?: { temporaryOnly?: boolean; initialSearchInput?: string },
103
98
  ) {
@@ -192,7 +187,8 @@ export class ModelSelectorComponent extends Container {
192
187
  if (model) {
193
188
  this.#roles[role] = {
194
189
  model,
195
- thinkingMode: explicitThinkingLevel && thinkingLevel !== undefined ? thinkingLevel : "inherit",
190
+ thinkingLevel:
191
+ explicitThinkingLevel && thinkingLevel !== undefined ? thinkingLevel : ThinkingLevel.Inherit,
196
192
  };
197
193
  }
198
194
  }
@@ -409,7 +405,7 @@ export class ModelSelectorComponent extends Container {
409
405
  if (!tag || !assigned || !modelsAreEqual(assigned.model, item.model)) continue;
410
406
 
411
407
  const badge = makeInvertedBadge(tag, color ?? "success");
412
- const thinkingLabel = getThinkingMetadata(assigned.thinkingMode).label;
408
+ const thinkingLabel = getThinkingLevelMetadata(assigned.thinkingLevel).label;
413
409
  roleBadgeTokens.push(`${badge} ${theme.fg("dim", `(${thinkingLabel})`)}`);
414
410
  }
415
411
  const badgeText = roleBadgeTokens.length > 0 ? ` ${roleBadgeTokens.join(" ")}` : "";
@@ -456,19 +452,18 @@ export class ModelSelectorComponent extends Container {
456
452
  this.#listContainer.addChild(new Text(theme.fg("muted", ` Model Name: ${selected.model.name}`), 0, 0));
457
453
  }
458
454
  }
459
- #getThinkingModesForModel(model: Model): ReadonlyArray<ThinkingMode> {
460
- return ["inherit", ...getAvailableThinkingLevels(supportsXhigh(model))];
455
+ #getThinkingLevelsForModel(model: Model): ReadonlyArray<ThinkingLevel> {
456
+ return [ThinkingLevel.Inherit, ThinkingLevel.Off, ...getSupportedEfforts(model)];
461
457
  }
462
458
 
463
- #getCurrentRoleThinkingMode(role: ModelRole): ThinkingMode {
464
- return this.#roles[role]?.thinkingMode ?? "inherit";
459
+ #getCurrentRoleThinkingLevel(role: ModelRole): ThinkingLevel {
460
+ return this.#roles[role]?.thinkingLevel ?? ThinkingLevel.Inherit;
465
461
  }
466
462
 
467
463
  #getThinkingPreselectIndex(role: ModelRole, model: Model): number {
468
- const options = this.#getThinkingModesForModel(model);
469
- const currentMode = this.#getCurrentRoleThinkingMode(role);
470
- const preferredMode = currentMode === "xhigh" && !options.includes("xhigh") ? "high" : currentMode;
471
- const foundIndex = options.indexOf(preferredMode);
464
+ const options = this.#getThinkingLevelsForModel(model);
465
+ const currentLevel = this.#getCurrentRoleThinkingLevel(role);
466
+ const foundIndex = options.indexOf(currentLevel);
472
467
  return foundIndex >= 0 ? foundIndex : 0;
473
468
  }
474
469
 
@@ -496,11 +491,11 @@ export class ModelSelectorComponent extends Container {
496
491
  if (!selectedModel) return;
497
492
 
498
493
  const showingThinking = this.#menuStep === "thinking" && this.#menuSelectedRole !== null;
499
- const thinkingOptions = showingThinking ? this.#getThinkingModesForModel(selectedModel.model) : [];
494
+ const thinkingOptions = showingThinking ? this.#getThinkingLevelsForModel(selectedModel.model) : [];
500
495
  const optionLines = showingThinking
501
- ? thinkingOptions.map((thinkingMode, index) => {
496
+ ? thinkingOptions.map((thinkingLevel, index) => {
502
497
  const prefix = index === this.#menuSelectedIndex ? ` ${theme.nav.cursor} ` : " ";
503
- const label = getThinkingMetadata(thinkingMode).label;
498
+ const label = getThinkingLevelMetadata(thinkingLevel).label;
504
499
  return `${prefix}${label}`;
505
500
  })
506
501
  : MENU_ROLE_ACTIONS.map((action, index) => {
@@ -607,7 +602,7 @@ export class ModelSelectorComponent extends Container {
607
602
 
608
603
  const optionCount =
609
604
  this.#menuStep === "thinking" && this.#menuSelectedRole !== null
610
- ? this.#getThinkingModesForModel(selectedModel.model).length
605
+ ? this.#getThinkingLevelsForModel(selectedModel.model).length
611
606
  : MENU_ROLE_ACTIONS.length;
612
607
  if (optionCount === 0) return;
613
608
 
@@ -635,10 +630,10 @@ export class ModelSelectorComponent extends Container {
635
630
  }
636
631
 
637
632
  if (!this.#menuSelectedRole) return;
638
- const thinkingOptions = this.#getThinkingModesForModel(selectedModel.model);
639
- const thinkingMode = thinkingOptions[this.#menuSelectedIndex];
640
- if (!thinkingMode) return;
641
- this.#handleSelect(selectedModel.model, this.#menuSelectedRole, thinkingMode);
633
+ const thinkingOptions = this.#getThinkingLevelsForModel(selectedModel.model);
634
+ const thinkingLevel = thinkingOptions[this.#menuSelectedIndex];
635
+ if (!thinkingLevel) return;
636
+ this.#handleSelect(selectedModel.model, this.#menuSelectedRole, thinkingLevel);
642
637
  this.#closeMenu();
643
638
  return;
644
639
  }
@@ -657,28 +652,28 @@ export class ModelSelectorComponent extends Container {
657
652
  }
658
653
  }
659
654
 
660
- #formatRoleModelValue(model: Model, thinkingMode: ThinkingMode): string {
655
+ #formatRoleModelValue(model: Model, thinkingLevel: ThinkingLevel): string {
661
656
  const modelKey = `${model.provider}/${model.id}`;
662
- if (thinkingMode === "inherit") return modelKey;
663
- return `${modelKey}:${thinkingMode}`;
657
+ if (thinkingLevel === ThinkingLevel.Inherit) return modelKey;
658
+ return `${modelKey}:${thinkingLevel}`;
664
659
  }
665
- #handleSelect(model: Model, role: ModelRole | null, thinkingMode?: ThinkingMode): void {
660
+ #handleSelect(model: Model, role: ModelRole | null, thinkingLevel?: ThinkingLevel): void {
666
661
  // For temporary role, don't save to settings - just notify caller
667
662
  if (role === null) {
668
663
  this.#onSelectCallback(model, null);
669
664
  return;
670
665
  }
671
666
 
672
- const selectedThinkingMode = thinkingMode ?? this.#getCurrentRoleThinkingMode(role);
667
+ const selectedThinkingLevel = thinkingLevel ?? this.#getCurrentRoleThinkingLevel(role);
673
668
 
674
669
  // Save to settings
675
- this.#settings.setModelRole(role, this.#formatRoleModelValue(model, selectedThinkingMode));
670
+ this.#settings.setModelRole(role, this.#formatRoleModelValue(model, selectedThinkingLevel));
676
671
 
677
672
  // Update local state for UI
678
- this.#roles[role] = { model, thinkingMode: selectedThinkingMode };
673
+ this.#roles[role] = { model, thinkingLevel: selectedThinkingLevel };
679
674
 
680
675
  // Notify caller (for updating agent state if needed)
681
- this.#onSelectCallback(model, role, selectedThinkingMode);
676
+ this.#onSelectCallback(model, role, selectedThinkingLevel);
682
677
 
683
678
  // Update list to show new badges
684
679
  this.#updateList();
@@ -7,7 +7,7 @@
7
7
  * 2. That's it - it appears in the UI automatically
8
8
  */
9
9
 
10
- import { getAvailableThinkingLevels, getThinkingMetadata } from "@oh-my-pi/pi-ai";
10
+ import { THINKING_EFFORTS } from "@oh-my-pi/pi-ai";
11
11
  import { TERMINAL } from "@oh-my-pi/pi-tui";
12
12
  import {
13
13
  getDefault,
@@ -19,6 +19,7 @@ import {
19
19
  type SettingPath,
20
20
  type SettingTab,
21
21
  } from "../../config/settings-schema";
22
+ import { getThinkingLevelMetadata } from "../../thinking";
22
23
 
23
24
  // ═══════════════════════════════════════════════════════════════════════════
24
25
  // UI Definition Types
@@ -68,6 +69,26 @@ type OptionList = ReadonlyArray<{ value: string; label: string; description?: st
68
69
  type OptionProvider = (() => OptionList) | OptionList;
69
70
 
70
71
  const OPTION_PROVIDERS: Partial<Record<SettingPath, OptionProvider>> = {
72
+ // Context maintenance strategy
73
+ "compaction.strategy": [
74
+ { value: "context-full", label: "Context-full", description: "Summarize in-place and keep the current session" },
75
+ { value: "handoff", label: "Handoff", description: "Generate handoff and continue in a new session" },
76
+ {
77
+ value: "off",
78
+ label: "Off",
79
+ description: "Disable automatic context maintenance (same behavior as Auto-compact off)",
80
+ },
81
+ ],
82
+ // Context maintenance threshold
83
+ "compaction.thresholdPercent": [
84
+ { value: "default", label: "Default", description: "Legacy reserve-based threshold" },
85
+ { value: "70", label: "70%", description: "Very early maintenance" },
86
+ { value: "75", label: "75%", description: "Early maintenance" },
87
+ { value: "80", label: "80%", description: "Balanced" },
88
+ { value: "85", label: "85%", description: "Typical threshold" },
89
+ { value: "90", label: "90%", description: "Aggressive context usage" },
90
+ { value: "95", label: "95%", description: "Near context limit" },
91
+ ],
71
92
  // Retry max retries
72
93
  "retry.maxRetries": [
73
94
  { value: "1", label: "1 retry" },
@@ -231,7 +252,7 @@ const OPTION_PROVIDERS: Partial<Record<SettingPath, OptionProvider>> = {
231
252
  { value: "on", label: "On", description: "Force websockets for OpenAI Codex models" },
232
253
  ],
233
254
  // Default thinking level
234
- defaultThinkingLevel: [...getAvailableThinkingLevels().map(getThinkingMetadata)],
255
+ defaultThinkingLevel: [...THINKING_EFFORTS.map(getThinkingLevelMetadata)],
235
256
  // Temperature
236
257
  temperature: [
237
258
  { value: "-1", label: "Default", description: "Use provider default" },
@@ -277,6 +298,14 @@ const OPTION_PROVIDERS: Partial<Record<SettingPath, OptionProvider>> = {
277
298
  { value: "1.2", label: "1.2", description: "Balanced" },
278
299
  { value: "1.5", label: "1.5", description: "Strong penalty" },
279
300
  ],
301
+ serviceTier: [
302
+ { value: "none", label: "None", description: "Omit service_tier parameter" },
303
+ { value: "auto", label: "Auto", description: "Use provider default tier selection" },
304
+ { value: "default", label: "Default", description: "Standard priority processing" },
305
+ { value: "flex", label: "Flex", description: "Use flexible capacity tier when available" },
306
+ { value: "scale", label: "Scale", description: "Use Scale Tier credits when available" },
307
+ { value: "priority", label: "Priority", description: "Use Priority processing" },
308
+ ],
280
309
  // Symbol preset
281
310
  symbolPreset: [
282
311
  { value: "unicode", label: "Unicode", description: "Standard symbols (default)" },