@in-the-loop-labs/pair-review 3.3.4 → 3.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@in-the-loop-labs/pair-review",
3
- "version": "3.3.4",
3
+ "version": "3.3.6",
4
4
  "description": "Your AI-powered code review partner - Close the feedback loop with AI coding agents",
5
5
  "main": "src/server.js",
6
6
  "bin": {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "pair-review",
3
- "version": "3.3.4",
3
+ "version": "3.3.6",
4
4
  "description": "pair-review app integration — Open PRs and local changes in the pair-review web UI, run server-side AI analysis, and address review feedback. Requires the pair-review MCP server.",
5
5
  "author": {
6
6
  "name": "in-the-loop-labs",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "code-critic",
3
- "version": "3.3.4",
3
+ "version": "3.3.6",
4
4
  "description": "AI-powered code review analysis — Run three-level AI analysis and implement-review-fix loops directly in your coding agent. Works standalone, no server required.",
5
5
  "author": {
6
6
  "name": "in-the-loop-labs",
@@ -157,7 +157,9 @@ class RepoSettingsPage {
157
157
  const newProvider = this.providers[newProviderId];
158
158
 
159
159
  if (oldProvider && newProvider) {
160
- const currentModel = oldProvider.models.find(m => m.id === this.currentSettings.default_model);
160
+ // Match by id or alias so legacy model IDs still resolve their tier
161
+ // when the user switches providers.
162
+ const currentModel = this.findModelWithAliases(oldProvider, this.currentSettings.default_model);
161
163
  if (currentModel) {
162
164
  const matchingModel = newProvider.models.find(m => m.tier === currentModel.tier);
163
165
  const defaultModel = newProvider.models.find(m => m.default);
@@ -619,6 +621,22 @@ class RepoSettingsPage {
619
621
  this.selectedProvider = providerId;
620
622
  }
621
623
 
624
+ /**
625
+ * Look up a model by ID within a provider, matching both canonical `id` and
626
+ * `aliases`. Historical repo settings may still reference legacy model IDs
627
+ * (e.g. `gpt-5.4` before reasoning-effort variants were introduced); those
628
+ * must still resolve to the canonical model so the UI shows the correct
629
+ * selection instead of silently falling back to the provider default.
630
+ *
631
+ * @param {Object} provider - Provider object with a `models` array
632
+ * @param {string} modelId - Model ID to look up (may be an alias)
633
+ * @returns {Object|undefined} Matching model definition, or undefined if not found
634
+ */
635
+ findModelWithAliases(provider, modelId) {
636
+ if (!provider || !provider.models || !modelId) return undefined;
637
+ return provider.models.find(m => m.id === modelId || m.aliases?.includes(modelId));
638
+ }
639
+
622
640
  /**
623
641
  * Render model select dropdown for the currently selected provider
624
642
  */
@@ -654,9 +672,10 @@ class RepoSettingsPage {
654
672
  return;
655
673
  }
656
674
 
657
- // Find the selected model, fall back to default or first
675
+ // Find the selected model, fall back to default or first. Match aliases
676
+ // so legacy model IDs still render the canonical card.
658
677
  const modelId = this.currentSettings.default_model;
659
- const model = provider.models.find(m => m.id === modelId)
678
+ const model = this.findModelWithAliases(provider, modelId)
660
679
  || provider.models.find(m => m.default)
661
680
  || provider.models[0];
662
681
 
@@ -691,7 +710,9 @@ class RepoSettingsPage {
691
710
  if (!provider) {
692
711
  return { providerName: providerId || 'Unknown', modelName: modelId || 'Unknown' };
693
712
  }
694
- const model = provider.models?.find(m => m.id === modelId);
713
+ // Match aliases so historical council/voice configs that stored a legacy
714
+ // model ID still show the canonical model's display name.
715
+ const model = this.findModelWithAliases(provider, modelId);
695
716
  return {
696
717
  providerName: provider.name,
697
718
  modelName: model ? model.name : (modelId || 'Unknown')
@@ -1166,11 +1187,20 @@ class RepoSettingsPage {
1166
1187
  this.selectProvider(providerId);
1167
1188
  this.renderProviderSelect();
1168
1189
 
1169
- // Validate saved model exists in current provider
1190
+ // Validate saved model exists in current provider. Match aliases so legacy
1191
+ // model IDs (e.g. `gpt-5.4` recorded before reasoning-effort variants) keep
1192
+ // resolving to the canonical model; if matched via alias, canonicalize the
1193
+ // stored ID so the dropdown selects the right option and the next save
1194
+ // writes the canonical ID back.
1170
1195
  const provider = this.providers[this.selectedProvider];
1171
1196
  if (provider) {
1172
- const modelExists = provider.models.some(m => m.id === this.currentSettings.default_model);
1173
- if (!modelExists) {
1197
+ const matchedModel = this.findModelWithAliases(provider, this.currentSettings.default_model);
1198
+ if (matchedModel) {
1199
+ if (matchedModel.id !== this.currentSettings.default_model) {
1200
+ this.currentSettings.default_model = matchedModel.id;
1201
+ this.originalSettings.default_model = matchedModel.id;
1202
+ }
1203
+ } else {
1174
1204
  const fallbackModel = provider.models.find(m => m.default) || provider.models[0];
1175
1205
  if (fallbackModel) {
1176
1206
  this.currentSettings.default_model = fallbackModel.id;
@@ -24,17 +24,28 @@ const CLAUDE_MODELS = [
24
24
  id: 'opus-4.7-xhigh',
25
25
  cli_model: 'claude-opus-4-7',
26
26
  env: { CLAUDE_CODE_EFFORT_LEVEL: 'xhigh' },
27
- name: 'Opus 4.7 xhigh',
27
+ name: 'Opus 4.7 XHigh',
28
28
  tier: 'thorough',
29
29
  tagline: 'Latest Gen',
30
30
  description: 'Opus 4.7 (latest) with extra-high effort',
31
31
  badge: 'Latest',
32
32
  badgeClass: 'badge-power'
33
33
  },
34
+ {
35
+ id: 'opus-4.7-high',
36
+ cli_model: 'claude-opus-4-7',
37
+ env: { CLAUDE_CODE_EFFORT_LEVEL: 'high' },
38
+ name: 'Opus 4.7 High',
39
+ tier: 'thorough',
40
+ tagline: 'Latest Gen',
41
+ description: 'Opus 4.7 (latest) with high effort',
42
+ badge: 'Latest',
43
+ badgeClass: 'badge-power'
44
+ },
34
45
  {
35
46
  id: 'opus',
36
47
  aliases: ['opus-4.6-high'],
37
- cli_model: 'opus-4-6',
48
+ cli_model: 'claude-opus-4-6',
38
49
  env: { CLAUDE_CODE_EFFORT_LEVEL: 'high' },
39
50
  name: 'Opus 4.6 High',
40
51
  tier: 'thorough',
@@ -65,7 +76,7 @@ const CLAUDE_MODELS = [
65
76
  },
66
77
  {
67
78
  id: 'opus-4.6-low',
68
- cli_model: 'opus-4-6',
79
+ cli_model: 'claude-opus-4-6',
69
80
  env: { CLAUDE_CODE_EFFORT_LEVEL: 'low' },
70
81
  name: 'Opus 4.6 Low',
71
82
  tier: 'balanced',
@@ -76,7 +87,7 @@ const CLAUDE_MODELS = [
76
87
  },
77
88
  {
78
89
  id: 'opus-4.6-medium',
79
- cli_model: 'opus-4-6',
90
+ cli_model: 'claude-opus-4-6',
80
91
  env: { CLAUDE_CODE_EFFORT_LEVEL: 'medium' },
81
92
  name: 'Opus 4.6 Medium',
82
93
  tier: 'balanced',
@@ -87,7 +98,7 @@ const CLAUDE_MODELS = [
87
98
  },
88
99
  {
89
100
  id: 'opus-4.6-1m',
90
- cli_model: 'opus-4-6[1m]',
101
+ cli_model: 'claude-opus-4-6[1m]',
91
102
  name: 'Opus 4.6 1M',
92
103
  tier: 'balanced',
93
104
  tagline: 'Extended Context',
@@ -23,31 +23,65 @@ const BIN_DIR = path.join(__dirname, '..', '..', 'bin');
23
23
  * Based on OpenAI Codex Models guide (developers.openai.com/codex/models)
24
24
  * - gpt-5.4-nano: Cheapest model ($0.20/$1.25 per MTok), good for surface scans
25
25
  * - gpt-5.4-mini: Fast with 400k context ($0.75/$4.50 per MTok)
26
- * - gpt-5.4: Flagship model combining coding, reasoning, and agentic workflows
27
26
  * - gpt-5.3-codex: Industry-leading coding model for complex engineering tasks
27
+ * - gpt-5.4 / gpt-5.5: Exposed only via -high / -xhigh reasoning variants so the
28
+ * selected effort level is always explicit.
29
+ *
30
+ * Reasoning-effort variants (-high / -xhigh) use `cli_model` to pass the base
31
+ * model ID to `codex exec -m` and add `-c model_reasoning_effort="..."` via
32
+ * extra_args so Codex picks up the effort level through its config override.
28
33
  *
29
34
  * Deprecated (April 2026): gpt-5.1-codex-mini, gpt-5.1-codex-max, gpt-5.1-codex
30
35
  */
31
36
  const CODEX_MODELS = [
32
37
  {
33
- id: 'gpt-5.4-nano',
34
- name: 'GPT-5.4 Nano',
35
- tier: 'fast',
36
- tagline: 'Cheapest',
37
- description: 'Ultra-low-cost surface scans for style issues, obvious bugs, and lint-level feedback.',
38
- badge: 'Cheapest',
39
- badgeClass: 'badge-speed'
40
- },
41
- {
42
- id: 'gpt-5.4-mini',
43
- name: 'GPT-5.4 Mini',
44
- tier: 'balanced',
45
- tagline: 'Best Balance',
46
- description: 'Fast reviews with 400k context—good balance of speed and capability for everyday PR review.',
38
+ id: 'gpt-5.4-high',
39
+ // Alias keeps results/councils saved under the previous bare `gpt-5.4`
40
+ // model ID resolving to the now-explicit high-effort variant.
41
+ aliases: ['gpt-5.4'],
42
+ cli_model: 'gpt-5.4',
43
+ extra_args: ['-c', 'model_reasoning_effort="high"'],
44
+ name: 'GPT-5.4 High',
45
+ tier: 'thorough',
46
+ tagline: 'Deep Review',
47
+ description: 'GPT-5.4 with high reasoning effort for complex multi-file reviews, architectural consistency, and subtle behavioral regressions.',
47
48
  badge: 'Recommended',
48
49
  badgeClass: 'badge-recommended',
49
50
  default: true
50
51
  },
52
+ {
53
+ id: 'gpt-5.4-xhigh',
54
+ cli_model: 'gpt-5.4',
55
+ extra_args: ['-c', 'model_reasoning_effort="xhigh"'],
56
+ name: 'GPT-5.4 XHigh',
57
+ tier: 'thorough',
58
+ tagline: 'Max Depth',
59
+ description: 'GPT-5.4 with extra-high reasoning effort for difficult reviews that need broad context, careful tradeoff analysis, and deeper issue validation.',
60
+ badge: 'Extra High',
61
+ badgeClass: 'badge-power'
62
+ },
63
+ {
64
+ id: 'gpt-5.5-high',
65
+ cli_model: 'gpt-5.5',
66
+ extra_args: ['-c', 'model_reasoning_effort="high"'],
67
+ name: 'GPT-5.5 High',
68
+ tier: 'thorough',
69
+ tagline: 'Latest Deep',
70
+ description: 'Latest-generation GPT model with high reasoning effort for demanding PR reviews, strong code understanding, and careful cross-file analysis.',
71
+ badge: 'High Effort',
72
+ badgeClass: 'badge-power'
73
+ },
74
+ {
75
+ id: 'gpt-5.5-xhigh',
76
+ cli_model: 'gpt-5.5',
77
+ extra_args: ['-c', 'model_reasoning_effort="xhigh"'],
78
+ name: 'GPT-5.5 XHigh',
79
+ tier: 'thorough',
80
+ tagline: 'Frontier Depth',
81
+ description: 'GPT-5.5 with extra-high reasoning effort for the hardest reviews: architecture, concurrency, security-sensitive changes, and large codebase context.',
82
+ badge: 'Max Reasoning',
83
+ badgeClass: 'badge-power'
84
+ },
51
85
  {
52
86
  id: 'gpt-5.3-codex',
53
87
  name: 'GPT-5.3 Codex',
@@ -58,13 +92,22 @@ const CODEX_MODELS = [
58
92
  badgeClass: 'badge-power'
59
93
  },
60
94
  {
61
- id: 'gpt-5.4',
62
- name: 'GPT-5.4',
63
- tier: 'thorough',
64
- tagline: 'Latest Gen',
65
- description: 'Flagship model combining coding, reasoning, and agentic workflows for complex architectural reviews.',
66
- badge: 'Most Thorough',
67
- badgeClass: 'badge-power'
95
+ id: 'gpt-5.4-mini',
96
+ name: 'GPT-5.4 Mini',
97
+ tier: 'balanced',
98
+ tagline: 'Best Balance',
99
+ description: 'Fast reviews with 400k context—good balance of speed and capability for everyday PR review.',
100
+ badge: 'Fast',
101
+ badgeClass: 'badge-speed'
102
+ },
103
+ {
104
+ id: 'gpt-5.4-nano',
105
+ name: 'GPT-5.4 Nano',
106
+ tier: 'fast',
107
+ tagline: 'Cheapest',
108
+ description: 'Ultra-low-cost surface scans for style issues, obvious bugs, and lint-level feedback.',
109
+ badge: 'Cheapest',
110
+ badgeClass: 'badge-speed'
68
111
  }
69
112
  ];
70
113
 
@@ -78,7 +121,7 @@ class CodexProvider extends AIProvider {
78
121
  * @param {Object} configOverrides.env - Additional environment variables
79
122
  * @param {Object[]} configOverrides.models - Custom model definitions
80
123
  */
81
- constructor(model = 'gpt-5.4-mini', configOverrides = {}) {
124
+ constructor(model = 'gpt-5.4-high', configOverrides = {}) {
82
125
  super(model);
83
126
 
84
127
  // Command precedence: ENV > config > default
@@ -127,27 +170,70 @@ class CodexProvider extends AIProvider {
127
170
  // same two-tier pattern as chat-providers.js: args replaces, extra_args appends.
128
171
  const defaultShellEnvArgs = ['-c', 'allow_login_shell=false', '-c', 'shell_environment_policy.include_only=["PATH","HOME","USER","GH_TOKEN","GITHUB_TOKEN"]'];
129
172
  const configArgs = configOverrides.args || defaultShellEnvArgs;
130
- const baseArgs = ['exec', '-m', model, '--json', ...sandboxArgs, ...configArgs, '-'];
131
- const providerArgs = configOverrides.extra_args || [];
132
- const modelConfig = configOverrides.models?.find(m => m.id === model);
133
- const modelArgs = modelConfig?.extra_args || [];
134
173
 
135
- // Merge env: provider env + model env
136
- this.extraEnv = {
137
- ...(configOverrides.env || {}),
138
- ...(modelConfig?.env || {})
139
- };
174
+ // Resolve cli_model + extra_args + env from built-in model, provider config,
175
+ // and per-model config. This is what lets reasoning variants like
176
+ // gpt-5.4-high pass `-m gpt-5.4` plus `-c model_reasoning_effort="high"`.
177
+ const { cliModel, extraArgs, env } = this._resolveModelConfig(model);
178
+
179
+ // IMPORTANT: `-` (stdin marker) must come LAST, after any extra_args.
180
+ // Reasoning variants contribute `-c model_reasoning_effort="..."` via
181
+ // extraArgs; if '-' were placed inside baseArgs those flags would land
182
+ // after the positional stdin marker and be ignored. `buildArgsForModel`
183
+ // enforces the same invariant for the extraction path.
184
+ const baseArgs = ['exec', '-m', cliModel, '--json', ...sandboxArgs, ...configArgs];
185
+
186
+ this.extraEnv = env;
140
187
 
141
188
  if (this.useShell) {
142
189
  // In shell mode, build full command string with args
143
- this.command = `${codexCmd} ${quoteShellArgs([...baseArgs, ...providerArgs, ...modelArgs]).join(' ')}`;
190
+ this.command = `${codexCmd} ${quoteShellArgs([...baseArgs, ...extraArgs, '-']).join(' ')}`;
144
191
  this.args = [];
145
192
  } else {
146
193
  this.command = codexCmd;
147
- this.args = [...baseArgs, ...providerArgs, ...modelArgs];
194
+ this.args = [...baseArgs, ...extraArgs, '-'];
148
195
  }
149
196
  }
150
197
 
198
+ /**
199
+ * Resolve model configuration by looking up built-in and config override definitions.
200
+ * Produces the CLI model ID (for `-m`), merged extra_args, and merged env.
201
+ *
202
+ * Precedence for cli_model: config model > built-in model > modelId.
203
+ * `cli_model` lets reasoning-effort variants (e.g. `gpt-5.4-high`) pass the
204
+ * base model (`gpt-5.4`) to `codex exec -m` while adding reasoning overrides
205
+ * via extra_args.
206
+ *
207
+ * @param {string} modelId
208
+ * @returns {{ builtIn: Object|undefined, configModel: Object|undefined, cliModel: string, extraArgs: string[], env: Object }}
209
+ * @private
210
+ */
211
+ _resolveModelConfig(modelId) {
212
+ const configOverrides = this.configOverrides || {};
213
+
214
+ const builtIn = CODEX_MODELS.find(m => m.id === modelId || (m.aliases && m.aliases.includes(modelId)));
215
+ const configModel = configOverrides.models?.find(m => m.id === modelId);
216
+
217
+ const cliModel = configModel?.cli_model !== undefined
218
+ ? configModel.cli_model
219
+ : (builtIn?.cli_model !== undefined ? builtIn.cli_model : modelId);
220
+
221
+ // Three-way merge for extra_args: built-in model → provider config → per-model config
222
+ const builtInArgs = builtIn?.extra_args || [];
223
+ const providerArgs = configOverrides.extra_args || [];
224
+ const configModelArgs = configModel?.extra_args || [];
225
+ const extraArgs = [...builtInArgs, ...providerArgs, ...configModelArgs];
226
+
227
+ // Three-way merge for env: built-in model → provider config → per-model config
228
+ const env = {
229
+ ...(builtIn?.env || {}),
230
+ ...(configOverrides.env || {}),
231
+ ...(configModel?.env || {})
232
+ };
233
+
234
+ return { builtIn, configModel, cliModel, extraArgs, env };
235
+ }
236
+
151
237
  /**
152
238
  * Execute Codex CLI with a prompt
153
239
  * @param {string} prompt - The prompt to send to Codex
@@ -572,17 +658,16 @@ class CodexProvider extends AIProvider {
572
658
  * @returns {string[]} Complete args array for the CLI
573
659
  */
574
660
  buildArgsForModel(model) {
661
+ // Resolve cli_model + merged extra_args so reasoning-effort variants behave
662
+ // the same for extraction as they do for the main analysis call.
663
+ const { cliModel, extraArgs } = this._resolveModelConfig(model);
664
+
575
665
  // Base args for extraction (read-only sandbox, no shell access needed)
576
666
  // Note: '-' (stdin marker) must come LAST, after any extra_args
577
- const baseArgs = ['exec', '-m', model, '--json', '--sandbox', 'read-only', '--full-auto'];
578
- // Provider-level extra_args (from configOverrides)
579
- const providerArgs = this.configOverrides?.extra_args || [];
580
- // Model-specific extra_args (from the model config for the given model)
581
- const modelConfig = this.configOverrides?.models?.find(m => m.id === model);
582
- const modelArgs = modelConfig?.extra_args || [];
667
+ const baseArgs = ['exec', '-m', cliModel, '--json', '--sandbox', 'read-only', '--full-auto'];
583
668
 
584
669
  // Append stdin marker '-' at the end after all other args
585
- return [...baseArgs, ...providerArgs, ...modelArgs, '-'];
670
+ return [...baseArgs, ...extraArgs, '-'];
586
671
  }
587
672
 
588
673
  /**
@@ -598,20 +683,25 @@ class CodexProvider extends AIProvider {
598
683
 
599
684
  // Build args consistently using the shared method, applying provider and model extra_args
600
685
  const args = this.buildArgsForModel(model);
686
+ // Surface merged env (built-in + provider + per-model) so the extraction
687
+ // spawn matches the contract used by other providers.
688
+ const { env } = this._resolveModelConfig(model);
601
689
 
602
690
  if (useShell) {
603
691
  return {
604
692
  command: `${codexCmd} ${quoteShellArgs(args).join(' ')}`,
605
693
  args: [],
606
694
  useShell: true,
607
- promptViaStdin: true
695
+ promptViaStdin: true,
696
+ env
608
697
  };
609
698
  }
610
699
  return {
611
700
  command: codexCmd,
612
701
  args,
613
702
  useShell: false,
614
- promptViaStdin: true
703
+ promptViaStdin: true,
704
+ env
615
705
  };
616
706
  }
617
707
 
@@ -700,7 +790,7 @@ class CodexProvider extends AIProvider {
700
790
  }
701
791
 
702
792
  static getDefaultModel() {
703
- return 'gpt-5.4-mini';
793
+ return 'gpt-5.4-high';
704
794
  }
705
795
 
706
796
  static getInstallInstructions() {
package/src/ai/index.js CHANGED
@@ -21,7 +21,8 @@ const {
21
21
  inferModelDefaults,
22
22
  resolveDefaultModel,
23
23
  prettifyModelId,
24
- createAliasedProviderClass
24
+ createAliasedProviderClass,
25
+ getTierForModel
25
26
  } = require('./provider');
26
27
 
27
28
  // Load the availability checking module
@@ -73,6 +74,7 @@ module.exports = {
73
74
  inferModelDefaults,
74
75
  resolveDefaultModel,
75
76
  prettifyModelId,
77
+ getTierForModel,
76
78
 
77
79
  // Provider factories
78
80
  createExecutableProviderClass,
@@ -25,7 +25,7 @@ const { AIProvider, registerProvider, quoteShellArgs } = require('./provider');
25
25
  const logger = require('../utils/logger');
26
26
  const { extractJSON } = require('../utils/json-extractor');
27
27
  const { CancellationError, isAnalysisCancelled } = require('../routes/shared');
28
- const { StreamParser, parsePiLine, createPiLineParser } = require('./stream-parser');
28
+ const { createPiLineParser } = require('./stream-parser');
29
29
 
30
30
  // Directory containing bin scripts (git-diff-lines, etc.)
31
31
  const BIN_DIR = path.join(__dirname, '..', '..', 'bin');
@@ -42,6 +42,16 @@ const REVIEW_SKILL_PATH = path.join(__dirname, '..', '..', '.pi', 'skills', 'rev
42
42
  // in parallel for diverse multi-perspective code review
43
43
  const ROULETTE_SKILL_PATH = path.join(__dirname, '..', '..', '.pi', 'skills', 'review-roulette', 'SKILL.md');
44
44
 
45
+ // Keep raw stream capture bounded so large JSONL sessions cannot exhaust V8's
46
+ // maximum string size. Assistant text is still extracted incrementally from all
47
+ // complete JSONL lines and used as the primary parse/fallback input.
48
+ const MAX_PI_CAPTURED_STDOUT_CHARS = 5 * 1024 * 1024;
49
+ const MAX_PI_CAPTURED_STDERR_CHARS = 1 * 1024 * 1024;
50
+ const MAX_PI_LINE_CHARS = 2 * 1024 * 1024;
51
+ const PI_STDERR_HEAD_CHARS = 128 * 1024;
52
+ const PI_STDERR_TAIL_CHARS = MAX_PI_CAPTURED_STDERR_CHARS - PI_STDERR_HEAD_CHARS;
53
+ const PI_TRUNCATED_LINE_MARKER = '...[line truncated]...';
54
+
45
55
  /**
46
56
  * Pi model definitions
47
57
  *
@@ -122,6 +132,324 @@ function extractAssistantText(content, seenTexts) {
122
132
  return text;
123
133
  }
124
134
 
135
+ /**
136
+ * Determine whether a parsed JSON object looks like a Pi JSONL event envelope
137
+ * rather than a final review result payload.
138
+ *
139
+ * @param {Object} value - Parsed JSON object
140
+ * @returns {boolean} True when the object appears to be a Pi event
141
+ */
142
+ function isPiEventEnvelope(value) {
143
+ if (!value || typeof value !== 'object' || Array.isArray(value)) {
144
+ return false;
145
+ }
146
+
147
+ if (typeof value.type !== 'string') {
148
+ return false;
149
+ }
150
+
151
+ if (
152
+ value.message ||
153
+ Array.isArray(value.messages) ||
154
+ value.assistantMessageEvent ||
155
+ value.toolName ||
156
+ value.toolCallId ||
157
+ Object.hasOwn(value, 'partialResult') ||
158
+ Object.hasOwn(value, 'result') ||
159
+ Object.hasOwn(value, 'version')
160
+ ) {
161
+ return true;
162
+ }
163
+
164
+ return /_(start|update|end)$/.test(value.type) || value.type === 'session';
165
+ }
166
+
167
+ /**
168
+ * Append a chunk to a captured stream buffer without exceeding the configured
169
+ * maximum. Returns the updated buffer and whether truncation occurred.
170
+ *
171
+ * @param {string} existing - Existing captured output
172
+ * @param {string} chunk - New output chunk
173
+ * @param {number} maxChars - Maximum number of chars to retain
174
+ * @returns {{value: string, truncated: boolean}}
175
+ */
176
+ function appendWithLimit(existing, chunk, maxChars) {
177
+ if (!chunk || maxChars <= 0) {
178
+ return { value: existing, truncated: false };
179
+ }
180
+
181
+ const remaining = maxChars - existing.length;
182
+ if (remaining <= 0) {
183
+ return { value: existing, truncated: true };
184
+ }
185
+
186
+ if (chunk.length <= remaining) {
187
+ return { value: existing + chunk, truncated: false };
188
+ }
189
+
190
+ return {
191
+ value: existing + chunk.slice(0, remaining),
192
+ truncated: true
193
+ };
194
+ }
195
+
196
+ /**
197
+ * Append a chunk to a bounded head+tail buffer so error logs preserve both the
198
+ * start and end of noisy stderr output.
199
+ *
200
+ * @param {{head: string, tail: string, headFull: boolean, omittedChars: number}} buffer - Buffer state
201
+ * @param {string} chunk - New stderr chunk
202
+ * @param {number} maxHeadChars - Max chars to retain from the start
203
+ * @param {number} maxTailChars - Max chars to retain from the end
204
+ */
205
+ function appendHeadTailBuffer(buffer, chunk, maxHeadChars, maxTailChars) {
206
+ if (!chunk) return;
207
+
208
+ if (!buffer.headFull) {
209
+ const remainingHead = maxHeadChars - buffer.head.length;
210
+ if (chunk.length <= remainingHead) {
211
+ buffer.head += chunk;
212
+ if (buffer.head.length >= maxHeadChars) {
213
+ buffer.headFull = true;
214
+ }
215
+ return;
216
+ }
217
+
218
+ const safeHead = Math.max(remainingHead, 0);
219
+ buffer.head += chunk.slice(0, safeHead);
220
+ buffer.headFull = true;
221
+
222
+ const overflow = chunk.slice(safeHead);
223
+ if (overflow.length > maxTailChars) {
224
+ buffer.omittedChars += overflow.length - maxTailChars;
225
+ buffer.tail = overflow.slice(-maxTailChars);
226
+ } else {
227
+ buffer.tail = overflow;
228
+ }
229
+ return;
230
+ }
231
+
232
+ const combinedTail = buffer.tail + chunk;
233
+ if (combinedTail.length > maxTailChars) {
234
+ buffer.omittedChars += combinedTail.length - maxTailChars;
235
+ buffer.tail = combinedTail.slice(-maxTailChars);
236
+ } else {
237
+ buffer.tail = combinedTail;
238
+ }
239
+ }
240
+
241
+ /**
242
+ * Render a bounded head+tail buffer as a string for logs and error messages.
243
+ *
244
+ * @param {{head: string, tail: string, headFull: boolean, omittedChars: number}} buffer - Buffer state
245
+ * @returns {string} Formatted stderr capture
246
+ */
247
+ function formatHeadTailBuffer(buffer) {
248
+ if (buffer.omittedChars === 0) {
249
+ return `${buffer.head}${buffer.tail}`;
250
+ }
251
+
252
+ return `${buffer.head}\n...[${buffer.omittedChars} chars omitted]...\n${buffer.tail}`;
253
+ }
254
+
255
+ /**
256
+ * Extract final assistant text from a Pi JSONL event.
257
+ *
258
+ * @param {Object} event - Parsed Pi event object
259
+ * @param {Set<string>} seenTexts - Set tracking already-seen text blocks
260
+ * @returns {string} Extracted text from the event
261
+ */
262
+ function extractPiEventText(event, seenTexts) {
263
+ let text = '';
264
+
265
+ if (event.type === 'message_end' && event.message?.role === 'assistant') {
266
+ text += extractAssistantText(event.message.content, seenTexts);
267
+ }
268
+
269
+ if (event.type === 'turn_end' && event.message?.role === 'assistant') {
270
+ text += extractAssistantText(event.message.content, seenTexts);
271
+ }
272
+
273
+ if (event.type === 'agent_end' && Array.isArray(event.messages)) {
274
+ for (const msg of event.messages) {
275
+ if (msg.role === 'assistant') {
276
+ text += extractAssistantText(msg.content, seenTexts);
277
+ }
278
+ }
279
+ }
280
+
281
+ return text;
282
+ }
283
+
284
+ /**
285
+ * Accumulate only raw lines that could plausibly help the direct JSON fallback.
286
+ * Pi JSONL event envelopes are intentionally excluded because they are noisy
287
+ * transport records, not the final review result.
288
+ *
289
+ * @param {string} line - One stdout line
290
+ * @param {{rawOutput: string, rawOutputTruncated: boolean}} state - Parse state
291
+ * @param {string} levelPrefix - Prefix used in logs
292
+ * @param {{status: 'parsed', value: Object} | {status: 'failed'}} [parseResult] - Optional parse result reuse
293
+ */
294
+ function accumulatePiRawFallbackLine(line, state, levelPrefix, parseResult) {
295
+ if (!line?.trim()) return;
296
+
297
+ let parsed;
298
+ let parseFailed = false;
299
+
300
+ if (parseResult?.status === 'parsed') {
301
+ parsed = parseResult.value;
302
+ } else if (parseResult?.status === 'failed') {
303
+ parseFailed = true;
304
+ } else {
305
+ try {
306
+ parsed = JSON.parse(line);
307
+ } catch {
308
+ parseFailed = true;
309
+ }
310
+ }
311
+
312
+ if (!parseFailed && isPiEventEnvelope(parsed)) {
313
+ return;
314
+ }
315
+
316
+ const capture = appendWithLimit(state.rawOutput, `${line}\n`, MAX_PI_CAPTURED_STDOUT_CHARS);
317
+ state.rawOutput = capture.value;
318
+
319
+ if (capture.truncated && !state.rawOutputTruncated) {
320
+ state.rawOutputTruncated = true;
321
+ logger.warn(
322
+ `${levelPrefix} Pi CLI raw-output fallback exceeded ${MAX_PI_CAPTURED_STDOUT_CHARS} chars; retaining only the first ${MAX_PI_CAPTURED_STDOUT_CHARS} chars`
323
+ );
324
+ }
325
+ }
326
+
327
+ /**
328
+ * Parse a single Pi JSONL line into accumulated assistant text.
329
+ *
330
+ * @param {string} line - One JSONL line
331
+ * @param {{textContent: string, seenTexts: Set<string>, rawOutput: string, rawOutputTruncated: boolean}} state - Parse state
332
+ * @param {string} levelPrefix - Prefix used in logs
333
+ */
334
+ function accumulatePiResponseLine(line, state, levelPrefix) {
335
+ if (!line?.trim()) return;
336
+
337
+ let parseResult;
338
+ try {
339
+ const event = JSON.parse(line);
340
+ state.textContent += extractPiEventText(event, state.seenTexts);
341
+ parseResult = { status: 'parsed', value: event };
342
+ } catch {
343
+ logger.debug(`${levelPrefix} Skipping malformed JSONL line: ${line.substring(0, 100)}`);
344
+ parseResult = { status: 'failed' };
345
+ }
346
+
347
+ accumulatePiRawFallbackLine(line, state, levelPrefix, parseResult);
348
+ }
349
+
350
+ /**
351
+ * Append stdout data to the pending JSONL line buffer while capping any single
352
+ * unterminated line to avoid retaining multi-megabyte tool payloads in memory.
353
+ *
354
+ * @param {{buffer: string, lineTruncated: boolean, warningLogged: boolean}} state - Pending line state
355
+ * @param {string} chunk - New stdout chunk
356
+ * @param {string} levelPrefix - Prefix used in logs
357
+ * @returns {string[]} Complete lines extracted from the chunk
358
+ */
359
+ function appendPiChunkToLineBuffer(state, chunk, levelPrefix) {
360
+ if (!chunk) return [];
361
+
362
+ const lines = [];
363
+ let cursor = 0;
364
+
365
+ while (cursor < chunk.length) {
366
+ if (state.lineTruncated) {
367
+ const nextNewline = chunk.indexOf('\n', cursor);
368
+ if (nextNewline === -1) {
369
+ return lines;
370
+ }
371
+
372
+ lines.push(state.buffer);
373
+ state.buffer = '';
374
+ state.lineTruncated = false;
375
+ cursor = nextNewline + 1;
376
+ continue;
377
+ }
378
+
379
+ const nextNewline = chunk.indexOf('\n', cursor);
380
+ const segmentEnd = nextNewline === -1 ? chunk.length : nextNewline;
381
+ const segment = chunk.slice(cursor, segmentEnd);
382
+ const remainingCapacity = MAX_PI_LINE_CHARS - state.buffer.length;
383
+
384
+ if (segment.length <= remainingCapacity) {
385
+ state.buffer += segment;
386
+ if (nextNewline !== -1) {
387
+ lines.push(state.buffer);
388
+ state.buffer = '';
389
+ }
390
+ cursor = segmentEnd + (nextNewline === -1 ? 0 : 1);
391
+ continue;
392
+ }
393
+
394
+ const safeCapacity = Math.max(remainingCapacity, 0);
395
+ state.buffer += segment.slice(0, safeCapacity) + PI_TRUNCATED_LINE_MARKER;
396
+ state.lineTruncated = true;
397
+
398
+ if (!state.warningLogged) {
399
+ state.warningLogged = true;
400
+ logger.warn(
401
+ `${levelPrefix} Pi CLI emitted a JSONL event longer than ${MAX_PI_LINE_CHARS} chars; truncating the pending line buffer until the next newline`
402
+ );
403
+ }
404
+
405
+ if (nextNewline !== -1) {
406
+ lines.push(state.buffer);
407
+ state.buffer = '';
408
+ state.lineTruncated = false;
409
+ cursor = nextNewline + 1;
410
+ continue;
411
+ }
412
+
413
+ return lines;
414
+ }
415
+
416
+ return lines;
417
+ }
418
+
419
+ /**
420
+ * Finalize Pi response parsing from incrementally extracted assistant text and
421
+ * a bounded raw-output fallback buffer.
422
+ *
423
+ * @param {Object} input - Parse inputs
424
+ * @param {string} input.textContent - Assistant text extracted from JSONL events
425
+ * @param {string} input.rawOutput - Bounded raw stdout capture
426
+ * @param {boolean} [input.rawOutputTruncated=false] - Whether raw stdout was truncated
427
+ * @param {string|number} level - Analysis level for logging
428
+ * @param {string} levelPrefix - Prefix used in logs
429
+ * @returns {{success: boolean, data?: Object, error?: string, textContent?: string}}
430
+ */
431
+ function finalizePiResponseParsing({ textContent, rawOutput, rawOutputTruncated = false }, level, levelPrefix) {
432
+ if (textContent) {
433
+ const extracted = extractJSON(textContent, level);
434
+ if (extracted.success) {
435
+ return extracted;
436
+ }
437
+
438
+ logger.warn(`${levelPrefix} Text content is not JSON, treating as raw text`);
439
+ return { success: false, error: 'Text content is not valid JSON', textContent };
440
+ }
441
+
442
+ if (rawOutputTruncated) {
443
+ logger.warn(`${levelPrefix} Pi CLI raw-output fallback was truncated before assistant text could be recovered`);
444
+ return {
445
+ success: false,
446
+ error: 'Pi CLI raw-output fallback was truncated before assistant text could be recovered'
447
+ };
448
+ }
449
+
450
+ return extractJSON(rawOutput, level);
451
+ }
452
+
125
453
  class PiProvider extends AIProvider {
126
454
  /**
127
455
  * @param {string|null} [model='default'] - Model identifier or null/undefined for default mode
@@ -298,12 +626,27 @@ class PiProvider extends AIProvider {
298
626
  logger.info(`${levelPrefix} Registered process ${pid} for analysis ${analysisId}`);
299
627
  }
300
628
 
301
- let stdout = '';
302
- let stderr = '';
629
+ const stderrCapture = {
630
+ head: '',
631
+ tail: '',
632
+ headFull: false,
633
+ omittedChars: 0
634
+ };
635
+ let stderrTruncated = false;
303
636
  let timeoutId = null;
304
637
  let settled = false; // Guard against multiple resolve/reject calls
305
- let lineBuffer = ''; // Buffer for incomplete JSONL lines
306
638
  let lineCount = 0; // Count of JSONL lines received
639
+ const lineBufferState = {
640
+ buffer: '',
641
+ lineTruncated: false,
642
+ warningLogged: false
643
+ };
644
+ const responseState = {
645
+ textContent: '',
646
+ seenTexts: new Set(),
647
+ rawOutput: '',
648
+ rawOutputTruncated: false
649
+ };
307
650
 
308
651
  const settle = (fn, value) => {
309
652
  if (settled) return;
@@ -312,12 +655,21 @@ class PiProvider extends AIProvider {
312
655
  fn(value);
313
656
  };
314
657
 
315
- // Set up side-channel stream parser for live progress events.
316
658
  // Use the buffered Pi line parser to accumulate text_delta fragments
317
659
  // before emitting, preventing the UI from being flooded with tiny updates.
318
- const streamParser = onStreamEvent
319
- ? new StreamParser(createPiLineParser(), onStreamEvent, { cwd })
320
- : null;
660
+ const streamLineParser = onStreamEvent ? createPiLineParser() : null;
661
+ const emitStreamLine = (line) => {
662
+ if (!streamLineParser || !line?.trim()) return;
663
+
664
+ const event = streamLineParser(line, { cwd });
665
+ if (!event) return;
666
+
667
+ try {
668
+ onStreamEvent(event);
669
+ } catch (error) {
670
+ logger.warn(`${levelPrefix} Pi stream event callback error: ${error.message}`);
671
+ }
672
+ };
321
673
 
322
674
  // Set timeout
323
675
  if (timeout) {
@@ -331,30 +683,27 @@ class PiProvider extends AIProvider {
331
683
  // Stream and log JSONL lines as they arrive for debugging visibility
332
684
  pi.stdout.on('data', (data) => {
333
685
  const chunk = data.toString();
334
- stdout += chunk;
335
-
336
- // Feed side-channel stream parser for live progress events
337
- if (streamParser) {
338
- streamParser.feed(chunk);
339
- }
340
-
341
- lineBuffer += chunk;
342
-
343
- // Process complete lines (JSONL - each line is a complete JSON object)
344
- const lines = lineBuffer.split('\n');
345
- // Keep the last incomplete line in the buffer
346
- lineBuffer = lines.pop() || '';
686
+ const lines = appendPiChunkToLineBuffer(lineBufferState, chunk, levelPrefix);
347
687
 
348
688
  for (const line of lines) {
349
689
  if (!line.trim()) continue;
350
690
  lineCount++;
691
+ emitStreamLine(line);
351
692
  this.logStreamLine(line, lineCount, levelPrefix);
693
+ accumulatePiResponseLine(line, responseState, levelPrefix);
352
694
  }
353
695
  });
354
696
 
355
697
  // Collect stderr
356
698
  pi.stderr.on('data', (data) => {
357
- stderr += data.toString();
699
+ const chunk = data.toString();
700
+ appendHeadTailBuffer(stderrCapture, chunk, PI_STDERR_HEAD_CHARS, PI_STDERR_TAIL_CHARS);
701
+ if (stderrCapture.omittedChars > 0 && !stderrTruncated) {
702
+ stderrTruncated = true;
703
+ logger.warn(
704
+ `${levelPrefix} Pi CLI stderr exceeded ${MAX_PI_CAPTURED_STDERR_CHARS} chars; retaining a head+tail excerpt (${stderrCapture.omittedChars} chars omitted so far)`
705
+ );
706
+ }
358
707
  });
359
708
 
360
709
  // Handle completion
@@ -362,11 +711,6 @@ class PiProvider extends AIProvider {
362
711
  cleanupTmpFile();
363
712
  if (settled) return; // Already settled by timeout or error
364
713
 
365
- // Flush any remaining stream parser buffer
366
- if (streamParser) {
367
- streamParser.flush();
368
- }
369
-
370
714
  // Check for cancellation signals (SIGTERM=143, SIGKILL=137)
371
715
  const isCancellationCode = code === 143 || code === 137;
372
716
  if (isCancellationCode && analysisId && isAnalysisCancelled(analysisId)) {
@@ -383,6 +727,8 @@ class PiProvider extends AIProvider {
383
727
  return;
384
728
  }
385
729
 
730
+ const stderr = formatHeadTailBuffer(stderrCapture);
731
+
386
732
  // Always log stderr if present
387
733
  if (stderr.trim()) {
388
734
  if (code !== 0) {
@@ -399,15 +745,21 @@ class PiProvider extends AIProvider {
399
745
  }
400
746
 
401
747
  // Process any remaining buffered line
402
- if (lineBuffer.trim()) {
748
+ if (lineBufferState.buffer.trim()) {
403
749
  lineCount++;
404
- this.logStreamLine(lineBuffer, lineCount, levelPrefix);
750
+ emitStreamLine(lineBufferState.buffer);
751
+ this.logStreamLine(lineBufferState.buffer, lineCount, levelPrefix);
752
+ accumulatePiResponseLine(lineBufferState.buffer, responseState, levelPrefix);
405
753
  }
406
754
 
407
755
  logger.info(`${levelPrefix} Pi CLI completed - received ${lineCount} JSONL events`);
408
756
 
409
757
  // Parse the Pi JSONL response
410
- const parsed = this.parsePiResponse(stdout, level, levelPrefix);
758
+ const parsed = finalizePiResponseParsing({
759
+ textContent: responseState.textContent,
760
+ rawOutput: responseState.rawOutput,
761
+ rawOutputTruncated: responseState.rawOutputTruncated
762
+ }, level, levelPrefix);
411
763
  if (parsed.success) {
412
764
  logger.success(`${levelPrefix} Successfully parsed JSON response`);
413
765
 
@@ -427,8 +779,8 @@ class PiProvider extends AIProvider {
427
779
  // Pass extracted text content to LLM fallback (not raw JSONL stdout).
428
780
  // The text content is the actual LLM response text extracted from JSONL
429
781
  // events and is much smaller and more relevant than the full JSONL stream.
430
- const llmFallbackInput = parsed.textContent || stdout;
431
- logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw stdout'})`);
782
+ const llmFallbackInput = parsed.textContent || responseState.rawOutput;
783
+ logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw fallback output'})`);
432
784
  logger.info(`${levelPrefix} Attempting LLM-based JSON extraction fallback...`);
433
785
 
434
786
  // Use async IIFE to handle the async LLM extraction
@@ -623,55 +975,22 @@ class PiProvider extends AIProvider {
623
975
  try {
624
976
  // Split by newlines and parse each JSON line
625
977
  const lines = stdout.trim().split('\n').filter(line => line.trim());
626
- let textContent = '';
627
- const seenTexts = new Set();
978
+ const responseState = {
979
+ textContent: '',
980
+ seenTexts: new Set(),
981
+ rawOutput: '',
982
+ rawOutputTruncated: false
983
+ };
628
984
 
629
985
  for (const line of lines) {
630
- try {
631
- const event = JSON.parse(line);
632
-
633
- // Extract text from message_end events (complete assistant messages)
634
- // These contain the full message with content blocks
635
- if (event.type === 'message_end' && event.message?.role === 'assistant') {
636
- textContent += extractAssistantText(event.message.content, seenTexts);
637
- }
638
-
639
- // Also collect text from turn_end events which include the message
640
- // (dedup handled by the shared seenTexts Set)
641
- if (event.type === 'turn_end' && event.message?.role === 'assistant') {
642
- textContent += extractAssistantText(event.message.content, seenTexts);
643
- }
644
-
645
- // Fallback: agent_end events contain the full messages array
646
- if (event.type === 'agent_end' && Array.isArray(event.messages)) {
647
- for (const msg of event.messages) {
648
- if (msg.role === 'assistant') {
649
- textContent += extractAssistantText(msg.content, seenTexts);
650
- }
651
- }
652
- }
653
- } catch (lineError) {
654
- // Skip malformed lines
655
- logger.debug(`${levelPrefix} Skipping malformed JSONL line: ${line.substring(0, 100)}`);
656
- }
986
+ accumulatePiResponseLine(line, responseState, levelPrefix);
657
987
  }
658
988
 
659
- if (textContent) {
660
- // Try to extract JSON from the accumulated text content
661
- const extracted = extractJSON(textContent, level);
662
- if (extracted.success) {
663
- return extracted;
664
- }
665
-
666
- // If no JSON found, return with textContent so the caller can
667
- // pass it (not raw JSONL stdout) to the LLM extraction fallback
668
- logger.warn(`${levelPrefix} Text content is not JSON, treating as raw text`);
669
- return { success: false, error: 'Text content is not valid JSON', textContent };
670
- }
671
-
672
- // No text content found, try extracting JSON directly from stdout
673
- const extracted = extractJSON(stdout, level);
674
- return extracted;
989
+ return finalizePiResponseParsing({
990
+ textContent: responseState.textContent,
991
+ rawOutput: responseState.rawOutput,
992
+ rawOutputTruncated: responseState.rawOutputTruncated
993
+ }, level, levelPrefix);
675
994
 
676
995
  } catch (parseError) {
677
996
  // stdout might not be valid JSONL at all, try extracting JSON from it
@@ -862,4 +1181,11 @@ class PiProvider extends AIProvider {
862
1181
  registerProvider('pi', PiProvider);
863
1182
 
864
1183
  module.exports = PiProvider;
1184
+ // Test-only exports. Underscore prefix signals internal helpers that should
1185
+ // not be consumed from production code paths.
865
1186
  module.exports._extractAssistantText = extractAssistantText;
1187
+ module.exports._isPiEventEnvelope = isPiEventEnvelope;
1188
+ module.exports._appendWithLimit = appendWithLimit;
1189
+ module.exports._appendHeadTailBuffer = appendHeadTailBuffer;
1190
+ module.exports._formatHeadTailBuffer = formatHeadTailBuffer;
1191
+ module.exports._finalizePiResponseParsing = finalizePiResponseParsing;
@@ -738,8 +738,11 @@ async function testProviderAvailability(providerId, timeout = 10000) {
738
738
  }
739
739
 
740
740
  /**
741
- * Get tier for a specific model from a provider
742
- * Queries the provider's model definitions (or config overrides) to find the tier
741
+ * Get tier for a specific model from a provider.
742
+ * Queries the provider's model definitions (or config overrides) to find the tier.
743
+ * Matches against both the canonical model `id` and any `aliases` so legacy
744
+ * model IDs (e.g. `gpt-5.4` before reasoning-effort variants were introduced)
745
+ * still resolve their tier for historical analysis runs.
743
746
  * @param {string} providerId - Provider ID (e.g., 'claude', 'gemini')
744
747
  * @param {string} modelId - Model ID (e.g., 'sonnet', 'gemini-2.5-pro')
745
748
  * @returns {string|null} Tier name or null if provider or model not found
@@ -754,7 +757,7 @@ function getTierForModel(providerId, modelId) {
754
757
  const overrides = providerConfigOverrides.get(providerId);
755
758
  const models = mergeModels(ProviderClass.getModels(), overrides?.models);
756
759
 
757
- const model = models.find(m => m.id === modelId);
760
+ const model = models.find(m => m.id === modelId || m.aliases?.includes(modelId));
758
761
  return model?.tier || null;
759
762
  }
760
763
 
package/src/main.js CHANGED
@@ -116,7 +116,7 @@ OPTIONS:
116
116
  --model <name> Override the AI model. Claude Code is the default provider.
117
117
  Available models: opus, sonnet, haiku (Claude Code);
118
118
  also: opus-4.5, opus-4.6-low, opus-4.6-medium, opus-4.6-1m,
119
- opus-4.7-xhigh
119
+ opus-4.7-high, opus-4.7-xhigh
120
120
  or use provider-specific models with Gemini/Codex
121
121
  --use-checkout Use current directory instead of creating worktree
122
122
  (automatic in GitHub Actions)