@in-the-loop-labs/pair-review 1.3.1 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -144,7 +144,7 @@ Configuration is stored in `~/.pair-review/config.json`:
144
144
  "port": 7247,
145
145
  "theme": "light",
146
146
  "default_provider": "claude",
147
- "default_model": "sonnet"
147
+ "default_model": "opus"
148
148
  }
149
149
  ```
150
150
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@in-the-loop-labs/pair-review",
3
- "version": "1.3.1",
3
+ "version": "1.3.3",
4
4
  "description": "Your AI-powered code review partner - Close the feedback loop with AI coding agents",
5
5
  "main": "src/server.js",
6
6
  "bin": {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "pair-review",
3
- "version": "1.3.1",
3
+ "version": "1.3.3",
4
4
  "description": "pair-review app integration — Open PRs and local changes in the pair-review web UI, run server-side AI analysis, and address review feedback. Requires the pair-review MCP server.",
5
5
  "author": {
6
6
  "name": "in-the-loop-labs",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "code-critic",
3
- "version": "1.3.1",
3
+ "version": "1.3.3",
4
4
  "description": "AI-powered code review analysis — Run three-level AI analysis and implement-review-fix loops directly in your coding agent. Works standalone, no server required.",
5
5
  "author": {
6
6
  "name": "in-the-loop-labs",
@@ -11,7 +11,7 @@ class AnalysisConfigModal {
11
11
  this.onCancel = null;
12
12
  this.escapeHandler = null;
13
13
  this.selectedProvider = 'claude';
14
- this.selectedModel = 'sonnet';
14
+ this.selectedModel = 'opus';
15
15
  this.selectedPresets = new Set();
16
16
  this.rememberModel = false;
17
17
  this.repoInstructions = '';
@@ -92,9 +92,9 @@ class AnalysisConfigModal {
92
92
  id: 'claude',
93
93
  name: 'Claude',
94
94
  models: [
95
- { id: 'sonnet', name: 'Sonnet', tier: 'balanced', default: true }
95
+ { id: 'opus', name: 'Opus 4.6 High', tier: 'thorough', default: true }
96
96
  ],
97
- defaultModel: 'sonnet'
97
+ defaultModel: 'opus'
98
98
  }
99
99
  };
100
100
  this.models = this.providers.claude.models;
@@ -363,7 +363,7 @@ class LocalManager {
363
363
  const providerStorageKey = `pair-review-provider:local-${reviewId}`;
364
364
  const rememberedModel = localStorage.getItem(modelStorageKey);
365
365
  const rememberedProvider = localStorage.getItem(providerStorageKey);
366
- const currentModel = rememberedModel || repoSettings?.default_model || 'sonnet';
366
+ const currentModel = rememberedModel || repoSettings?.default_model || 'opus';
367
367
  const currentProvider = rememberedProvider || repoSettings?.default_provider || 'claude';
368
368
 
369
369
  // Show config modal
@@ -1027,7 +1027,7 @@ class LocalManager {
1027
1027
  },
1028
1028
  body: JSON.stringify({
1029
1029
  provider: config.provider || 'claude',
1030
- model: config.model || 'sonnet',
1030
+ model: config.model || 'opus',
1031
1031
  tier: config.tier || 'balanced',
1032
1032
  customInstructions: config.customInstructions || null,
1033
1033
  skipLevel3: config.skipLevel3 || false
@@ -734,6 +734,10 @@ class AnalysisHistoryManager {
734
734
  'haiku': 'fast',
735
735
  'sonnet': 'balanced',
736
736
  'opus': 'thorough',
737
+ 'opus-4.5': 'balanced',
738
+ 'opus-4.6-low': 'balanced',
739
+ 'opus-4.6-medium': 'balanced',
740
+ 'opus-4.6-1m': 'balanced',
737
741
  // Gemini models
738
742
  'flash': 'fast',
739
743
  'pro': 'balanced',
package/public/js/pr.js CHANGED
@@ -3515,7 +3515,7 @@ class PRManager {
3515
3515
  const providerStorageKey = PRManager.getRepoStorageKey('pair-review-provider', owner, repo);
3516
3516
  const rememberedModel = localStorage.getItem(modelStorageKey);
3517
3517
  const rememberedProvider = localStorage.getItem(providerStorageKey);
3518
- const currentModel = rememberedModel || repoSettings?.default_model || 'sonnet';
3518
+ const currentModel = rememberedModel || repoSettings?.default_model || 'opus';
3519
3519
  const currentProvider = rememberedProvider || repoSettings?.default_provider || 'claude';
3520
3520
 
3521
3521
  // Show the config modal
@@ -3593,7 +3593,7 @@ class PRManager {
3593
3593
  },
3594
3594
  body: JSON.stringify({
3595
3595
  provider: config.provider || 'claude',
3596
- model: config.model || 'sonnet',
3596
+ model: config.model || 'opus',
3597
3597
  tier: config.tier || 'balanced',
3598
3598
  customInstructions: config.customInstructions || null,
3599
3599
  skipLevel3: config.skipLevel3 || false
@@ -208,23 +208,11 @@ class RepoSettingsPage {
208
208
 
209
209
  } catch (error) {
210
210
  console.error('Error loading providers:', error);
211
- // Last-resort degraded mode: hardcoded Claude fallback when the /api/providers
212
- // endpoint is unavailable. This allows basic functionality even if the backend
213
- // is partially broken. The canonical provider definitions live in
214
- // src/ai/claude-provider.js - this fallback should mirror those values.
215
- this.providers = {
216
- claude: {
217
- id: 'claude',
218
- name: 'Claude',
219
- models: [
220
- { id: 'haiku', name: 'Haiku', tier: 'fast', badge: 'Fastest', badgeClass: 'badge-speed', tagline: 'Lightning Fast', description: 'Quick analysis for simple changes' },
221
- { id: 'sonnet', name: 'Sonnet', tier: 'balanced', default: true, badge: 'Recommended', badgeClass: 'badge-recommended', tagline: 'Best Balance', description: 'Recommended for most reviews' },
222
- { id: 'opus', name: 'Opus', tier: 'thorough', badge: 'Most Thorough', badgeClass: 'badge-power', tagline: 'Most Capable', description: 'Deep analysis for complex code' }
223
- ],
224
- defaultModel: 'sonnet'
225
- }
226
- };
211
+ // No hardcoded fallback rely on the /api/providers endpoint as the single source of truth.
212
+ // If the endpoint is unavailable, show an empty state rather than stale data.
213
+ this.providers = {};
227
214
  this.renderProviderButtons();
215
+ this.showToast('error', 'Failed to load AI providers. Please refresh the page.');
228
216
  }
229
217
  }
230
218
 
@@ -26,10 +26,10 @@ const { buildSparseCheckoutGuidance } = require('./prompts/sparse-checkout-guida
26
26
  class Analyzer {
27
27
  /**
28
28
  * @param {Object} database - Database instance
29
- * @param {string} model - Model to use (e.g., 'sonnet', 'gemini-2.5-pro')
29
+ * @param {string} model - Model to use (e.g., 'opus', 'gemini-2.5-pro')
30
30
  * @param {string} provider - Provider ID (e.g., 'claude', 'gemini'). Defaults to 'claude'.
31
31
  */
32
- constructor(database, model = 'sonnet', provider = 'claude') {
32
+ constructor(database, model = 'opus', provider = 'claude') {
33
33
  // Store model and provider for creating provider instances per level
34
34
  this.model = model;
35
35
  this.provider = provider;
@@ -5,7 +5,7 @@ const logger = require('../utils/logger');
5
5
  const { extractJSON } = require('../utils/json-extractor');
6
6
 
7
7
  class ClaudeCLI {
8
- constructor(model = 'sonnet') {
8
+ constructor(model = 'opus') {
9
9
  // Check for environment variable to override default command
10
10
  // Use PAIR_REVIEW_CLAUDE_CMD environment variable if set, otherwise default to 'claude'
11
11
  const claudeCmd = process.env.PAIR_REVIEW_CLAUDE_CMD || 'claude';
@@ -123,6 +123,11 @@ class ClaudeCLI {
123
123
  }
124
124
  });
125
125
 
126
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
127
+ claude.stdin.on('error', (err) => {
128
+ logger.error(`${levelPrefix} stdin error: ${err.message}`);
129
+ });
130
+
126
131
  // Send the prompt to stdin with backpressure handling
127
132
  claude.stdin.write(prompt, (err) => {
128
133
  if (err) {
@@ -22,7 +22,7 @@ const BIN_DIR = path.join(__dirname, '..', '..', 'bin');
22
22
  const CLAUDE_MODELS = [
23
23
  {
24
24
  id: 'haiku',
25
- name: 'Haiku',
25
+ name: 'Haiku 4.5',
26
26
  tier: 'fast',
27
27
  tagline: 'Lightning Fast',
28
28
  description: 'Quick analysis for simple changes',
@@ -31,21 +31,65 @@ const CLAUDE_MODELS = [
31
31
  },
32
32
  {
33
33
  id: 'sonnet',
34
- name: 'Sonnet',
34
+ name: 'Sonnet 4.5',
35
35
  tier: 'balanced',
36
36
  tagline: 'Best Balance',
37
37
  description: 'Recommended for most reviews',
38
- badge: 'Recommended',
39
- badgeClass: 'badge-recommended',
40
- default: true
38
+ badge: 'Standard',
39
+ badgeClass: 'badge-recommended'
40
+ },
41
+ {
42
+ id: 'opus-4.5',
43
+ cli_model: 'claude-opus-4-5-20251101',
44
+ name: 'Opus 4.5',
45
+ tier: 'balanced',
46
+ tagline: 'Deep Thinker',
47
+ description: 'Extended thinking for complex analysis',
48
+ badge: 'Previous Gen',
49
+ badgeClass: 'badge-power'
50
+ },
51
+ {
52
+ id: 'opus-4.6-low',
53
+ cli_model: 'opus',
54
+ env: { CLAUDE_CODE_EFFORT_LEVEL: 'low' },
55
+ name: 'Opus 4.6 Low',
56
+ tier: 'balanced',
57
+ tagline: 'Fast Opus',
58
+ description: 'Opus 4.6 with low effort — quick and capable',
59
+ badge: 'Balanced',
60
+ badgeClass: 'badge-recommended'
61
+ },
62
+ {
63
+ id: 'opus-4.6-medium',
64
+ cli_model: 'opus',
65
+ env: { CLAUDE_CODE_EFFORT_LEVEL: 'medium' },
66
+ name: 'Opus 4.6 Medium',
67
+ tier: 'balanced',
68
+ tagline: 'Balanced Opus',
69
+ description: 'Opus 4.6 with medium effort — balanced depth',
70
+ badge: 'Thorough',
71
+ badgeClass: 'badge-power'
41
72
  },
42
73
  {
43
74
  id: 'opus',
44
- name: 'Opus',
75
+ aliases: ['opus-4.6-high'],
76
+ env: { CLAUDE_CODE_EFFORT_LEVEL: 'high' },
77
+ name: 'Opus 4.6 High',
45
78
  tier: 'thorough',
46
- tagline: 'Most Capable',
47
- description: 'Deep analysis for complex code',
79
+ tagline: 'Maximum Depth',
80
+ description: 'Opus 4.6 with high effort — deepest analysis',
48
81
  badge: 'Most Thorough',
82
+ badgeClass: 'badge-power',
83
+ default: true
84
+ },
85
+ {
86
+ id: 'opus-4.6-1m',
87
+ cli_model: 'opus[1m]',
88
+ name: 'Opus 4.6 1M',
89
+ tier: 'balanced',
90
+ tagline: 'Extended Context',
91
+ description: 'Opus 4.6 high effort with 1M token context window',
92
+ badge: 'More Context',
49
93
  badgeClass: 'badge-power'
50
94
  }
51
95
  ];
@@ -59,7 +103,7 @@ class ClaudeProvider extends AIProvider {
59
103
  * @param {Object} configOverrides.env - Additional environment variables
60
104
  * @param {Object[]} configOverrides.models - Custom model definitions
61
105
  */
62
- constructor(model = 'sonnet', configOverrides = {}) {
106
+ constructor(model = 'opus', configOverrides = {}) {
63
107
  super(model);
64
108
 
65
109
  // Command precedence: ENV > config > default
@@ -67,7 +111,7 @@ class ClaudeProvider extends AIProvider {
67
111
  const configCmd = configOverrides.command;
68
112
  const claudeCmd = envCmd || configCmd || 'claude';
69
113
 
70
- // Store for use in getExtractionConfig and testAvailability
114
+ // Store for use in getExtractionConfig, buildArgsForModel, and testAvailability
71
115
  this.claudeCmd = claudeCmd;
72
116
  this.configOverrides = configOverrides;
73
117
 
@@ -77,6 +121,9 @@ class ClaudeProvider extends AIProvider {
77
121
  // Check for budget limit environment variable
78
122
  const maxBudget = process.env.PAIR_REVIEW_MAX_BUDGET_USD;
79
123
 
124
+ // Resolve model config using shared helper
125
+ const { builtIn, configModel, cliModelArgs, extraArgs, env } = this._resolveModelConfig(model);
126
+
80
127
  // Build args: base args + provider extra_args + model extra_args
81
128
  // Use --output-format stream-json for JSONL streaming output (better debugging visibility)
82
129
  //
@@ -116,43 +163,98 @@ class ClaudeProvider extends AIProvider {
116
163
  ].join(',');
117
164
  permissionArgs = ['--allowedTools', allowedTools];
118
165
  }
119
- const baseArgs = ['-p', '--verbose', '--model', model, '--output-format', 'stream-json', ...permissionArgs];
166
+ const baseArgs = ['-p', '--verbose', ...cliModelArgs, '--output-format', 'stream-json', ...permissionArgs];
120
167
  if (maxBudget) {
121
168
  const budgetNum = parseFloat(maxBudget);
122
169
  if (isNaN(budgetNum) || budgetNum <= 0) {
123
- console.warn(`Warning: PAIR_REVIEW_MAX_BUDGET_USD="${maxBudget}" is not a valid positive number, ignoring`);
170
+ logger.warn(`Warning: PAIR_REVIEW_MAX_BUDGET_USD="${maxBudget}" is not a valid positive number, ignoring`);
124
171
  } else {
125
172
  baseArgs.push('--max-budget-usd', String(budgetNum));
126
173
  }
127
174
  }
128
- const providerArgs = configOverrides.extra_args || [];
129
- const modelConfig = configOverrides.models?.find(m => m.id === model);
130
- const modelArgs = modelConfig?.extra_args || [];
131
175
 
132
- // Merge env: provider env + model env
133
- this.extraEnv = {
134
- ...(configOverrides.env || {}),
135
- ...(modelConfig?.env || {})
136
- };
176
+ // Three-way merge for env: built-in model → provider config per-model config
177
+ this.extraEnv = env;
137
178
 
138
179
  if (this.useShell) {
139
- // Quote the allowedTools value to prevent shell interpretation of special characters
140
- // (commas, parentheses in patterns like "Bash(git diff*)")
141
- const quotedBaseArgs = baseArgs.map((arg, i) => {
142
- // The allowedTools value follows the --allowedTools flag
143
- if (baseArgs[i - 1] === '--allowedTools') {
144
- return `'${arg}'`;
145
- }
146
- return arg;
147
- });
148
- this.command = `${claudeCmd} ${[...quotedBaseArgs, ...providerArgs, ...modelArgs].join(' ')}`;
180
+ const allArgs = [...baseArgs, ...extraArgs];
181
+ this.command = `${claudeCmd} ${this._quoteShellArgs(allArgs).join(' ')}`;
149
182
  this.args = [];
150
183
  } else {
151
184
  this.command = claudeCmd;
152
- this.args = [...baseArgs, ...providerArgs, ...modelArgs];
185
+ this.args = [...baseArgs, ...extraArgs];
153
186
  }
154
187
  }
155
188
 
189
+ /**
190
+ * Quote shell-sensitive arguments for safe shell execution.
191
+ * Any arg containing characters that could be interpreted by the shell
192
+ * (brackets, parentheses, commas, etc.) is wrapped in single quotes
193
+ * with internal single quotes escaped using the POSIX pattern.
194
+ *
195
+ * @param {string[]} args - Array of CLI arguments
196
+ * @returns {string[]} Args with shell-sensitive values quoted
197
+ * @private
198
+ */
199
+ _quoteShellArgs(args) {
200
+ return args.map((arg, i) => {
201
+ const prevArg = args[i - 1];
202
+ if (prevArg === '--allowedTools' || prevArg === '--model') {
203
+ if (/[][*?(){}$!&|;<>,\s']/.test(arg)) {
204
+ return `'${arg.replace(/'/g, "'\\''")}'`;
205
+ }
206
+ }
207
+ return arg;
208
+ });
209
+ }
210
+
211
+ /**
212
+ * Resolve model configuration by looking up built-in and config override definitions.
213
+ * Consolidates the CLAUDE_MODELS.find() and configOverrides.models.find() lookups
214
+ * used across the constructor, buildArgsForModel(), and getExtractionConfig().
215
+ *
216
+ * @param {string} modelId - The model identifier to resolve
217
+ * @returns {Object} Resolved configuration
218
+ * @returns {Object|undefined} .builtIn - Built-in model definition from CLAUDE_MODELS
219
+ * @returns {Object|undefined} .configModel - Config override model definition
220
+ * @returns {string[]} .cliModelArgs - Args array for --model (empty if suppressed)
221
+ * @returns {string[]} .extraArgs - Merged extra_args from built-in, provider, and config model
222
+ * @returns {Object} .env - Merged env from built-in, provider, and config model
223
+ * @private
224
+ */
225
+ _resolveModelConfig(modelId) {
226
+ const configOverrides = this.configOverrides || {};
227
+
228
+ // Resolve cli_model: config model > built-in model > id
229
+ // cli_model decouples the app-level model ID from the CLI --model argument.
230
+ // - undefined: fall through the resolution chain
231
+ // - string: use this exact value for --model
232
+ // - null: explicitly suppress --model (for tools that want the model set via env instead)
233
+ const builtIn = CLAUDE_MODELS.find(m => m.id === modelId || (m.aliases && m.aliases.includes(modelId)));
234
+ const configModel = configOverrides.models?.find(m => m.id === modelId);
235
+ const resolvedCliModel = configModel?.cli_model !== undefined
236
+ ? configModel.cli_model
237
+ : (builtIn?.cli_model !== undefined ? builtIn.cli_model : modelId);
238
+
239
+ // Conditionally include --model in base args (null = suppress, empty string passes through to surface CLI error)
240
+ const cliModelArgs = resolvedCliModel !== null ? ['--model', resolvedCliModel] : [];
241
+
242
+ // Three-way merge for extra_args: built-in model → provider config → per-model config
243
+ const builtInArgs = builtIn?.extra_args || [];
244
+ const providerArgs = configOverrides.extra_args || [];
245
+ const configModelArgs = configModel?.extra_args || [];
246
+ const extraArgs = [...builtInArgs, ...providerArgs, ...configModelArgs];
247
+
248
+ // Three-way merge for env: built-in model → provider config → per-model config
249
+ const env = {
250
+ ...(builtIn?.env || {}),
251
+ ...(configOverrides.env || {}),
252
+ ...(configModel?.env || {})
253
+ };
254
+
255
+ return { builtIn, configModel, cliModelArgs, extraArgs, env };
256
+ }
257
+
156
258
  /**
157
259
  * Execute Claude CLI with a prompt
158
260
  * @param {string} prompt - The prompt to send to Claude
@@ -294,24 +396,28 @@ class ClaudeProvider extends AIProvider {
294
396
  } else {
295
397
  // Regex extraction failed, try LLM-based extraction as fallback
296
398
  logger.warn(`${levelPrefix} Regex extraction failed: ${parsed.error}`);
297
- logger.info(`${levelPrefix} Raw response length: ${stdout.length} characters`);
399
+ // Pass extracted text content to LLM fallback (not raw JSONL stdout).
400
+ // The text content is the actual LLM response text extracted from JSONL
401
+ // events and is much smaller and more relevant than the full JSONL stream.
402
+ const llmFallbackInput = parsed.textContent || stdout;
403
+ logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw stdout'})`);
298
404
  logger.info(`${levelPrefix} Attempting LLM-based JSON extraction fallback...`);
299
405
 
300
406
  // Use async IIFE to handle the async LLM extraction
301
407
  (async () => {
302
408
  try {
303
- const llmExtracted = await this.extractJSONWithLLM(stdout, { level, analysisId, registerProcess });
409
+ const llmExtracted = await this.extractJSONWithLLM(llmFallbackInput, { level, analysisId, registerProcess });
304
410
  if (llmExtracted.success) {
305
411
  logger.success(`${levelPrefix} LLM extraction fallback succeeded`);
306
412
  settle(resolve, llmExtracted.data);
307
413
  } else {
308
414
  logger.warn(`${levelPrefix} LLM extraction fallback also failed: ${llmExtracted.error}`);
309
- logger.info(`${levelPrefix} Raw response preview: ${stdout.substring(0, 500)}...`);
310
- settle(resolve, { raw: stdout, parsed: false });
415
+ logger.info(`${levelPrefix} Raw response preview: ${llmFallbackInput.substring(0, 500)}...`);
416
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
311
417
  }
312
418
  } catch (llmError) {
313
419
  logger.warn(`${levelPrefix} LLM extraction fallback error: ${llmError.message}`);
314
- settle(resolve, { raw: stdout, parsed: false });
420
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
315
421
  }
316
422
  })();
317
423
  }
@@ -328,6 +434,11 @@ class ClaudeProvider extends AIProvider {
328
434
  }
329
435
  });
330
436
 
437
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
438
+ claude.stdin.on('error', (err) => {
439
+ logger.error(`${levelPrefix} stdin error: ${err.message}`);
440
+ });
441
+
331
442
  // Send the prompt to stdin
332
443
  claude.stdin.write(prompt, (err) => {
333
444
  if (err) {
@@ -351,15 +462,12 @@ class ClaudeProvider extends AIProvider {
351
462
  * @returns {string[]} Complete args array for the CLI
352
463
  */
353
464
  buildArgsForModel(model) {
465
+ const { cliModelArgs, extraArgs } = this._resolveModelConfig(model);
466
+
354
467
  // Base args for extraction (simple prompt mode, no tools needed)
355
- const baseArgs = ['-p', '--model', model];
356
- // Provider-level extra_args (from configOverrides)
357
- const providerArgs = this.configOverrides?.extra_args || [];
358
- // Model-specific extra_args (from the model config for the given model)
359
- const modelConfig = this.configOverrides?.models?.find(m => m.id === model);
360
- const modelArgs = modelConfig?.extra_args || [];
361
-
362
- return [...baseArgs, ...providerArgs, ...modelArgs];
468
+ const baseArgs = ['-p', ...cliModelArgs];
469
+
470
+ return [...baseArgs, ...extraArgs];
363
471
  }
364
472
 
365
473
  /**
@@ -373,22 +481,26 @@ class ClaudeProvider extends AIProvider {
373
481
  const claudeCmd = this.claudeCmd;
374
482
  const useShell = this.useShell;
375
483
 
376
- // Build args consistently using the shared method, applying provider and model extra_args
377
- const args = this.buildArgsForModel(model);
484
+ // Single call to _resolveModelConfig for both args and env
485
+ const { cliModelArgs, extraArgs, env } = this._resolveModelConfig(model);
486
+ const args = ['-p', ...cliModelArgs, ...extraArgs];
378
487
 
379
488
  if (useShell) {
489
+ const quotedArgs = this._quoteShellArgs(args);
380
490
  return {
381
- command: `${claudeCmd} ${args.join(' ')}`,
491
+ command: `${claudeCmd} ${quotedArgs.join(' ')}`,
382
492
  args: [],
383
493
  useShell: true,
384
- promptViaStdin: true
494
+ promptViaStdin: true,
495
+ env
385
496
  };
386
497
  }
387
498
  return {
388
499
  command: claudeCmd,
389
500
  args,
390
501
  useShell: false,
391
- promptViaStdin: true
502
+ promptViaStdin: true,
503
+ env
392
504
  };
393
505
  }
394
506
 
@@ -623,9 +735,10 @@ class ClaudeProvider extends AIProvider {
623
735
  return extracted;
624
736
  }
625
737
 
626
- // If no JSON found, return the raw text
738
+ // If no JSON found, return with textContent so the caller can
739
+ // pass it (not raw JSONL stdout) to the LLM extraction fallback
627
740
  logger.warn(`${levelPrefix} Text content is not JSON, treating as raw text`);
628
- return { success: false, error: 'Text content is not valid JSON' };
741
+ return { success: false, error: 'Text content is not valid JSON', textContent };
629
742
  }
630
743
 
631
744
  // No text content found - don't fall back to raw stdout extraction
@@ -729,7 +842,7 @@ class ClaudeProvider extends AIProvider {
729
842
  }
730
843
 
731
844
  static getDefaultModel() {
732
- return 'sonnet';
845
+ return 'opus';
733
846
  }
734
847
 
735
848
  static getInstallInstructions() {
@@ -271,24 +271,25 @@ class CodexProvider extends AIProvider {
271
271
  } else {
272
272
  // Regex extraction failed, try LLM-based extraction as fallback
273
273
  logger.warn(`${levelPrefix} Regex extraction failed: ${parsed.error}`);
274
- logger.info(`${levelPrefix} Raw response length: ${stdout.length} characters`);
274
+ const llmFallbackInput = parsed.textContent || stdout;
275
+ logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw stdout'})`);
275
276
  logger.info(`${levelPrefix} Attempting LLM-based JSON extraction fallback...`);
276
277
 
277
278
  // Use async IIFE to handle the async LLM extraction
278
279
  (async () => {
279
280
  try {
280
- const llmExtracted = await this.extractJSONWithLLM(stdout, { level, analysisId, registerProcess });
281
+ const llmExtracted = await this.extractJSONWithLLM(llmFallbackInput, { level, analysisId, registerProcess });
281
282
  if (llmExtracted.success) {
282
283
  logger.success(`${levelPrefix} LLM extraction fallback succeeded`);
283
284
  settle(resolve, llmExtracted.data);
284
285
  } else {
285
286
  logger.warn(`${levelPrefix} LLM extraction fallback also failed: ${llmExtracted.error}`);
286
- logger.info(`${levelPrefix} Raw response preview: ${stdout.substring(0, 500)}...`);
287
- settle(resolve, { raw: stdout, parsed: false });
287
+ logger.info(`${levelPrefix} Raw response preview: ${llmFallbackInput.substring(0, 500)}...`);
288
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
288
289
  }
289
290
  } catch (llmError) {
290
291
  logger.warn(`${levelPrefix} LLM extraction fallback error: ${llmError.message}`);
291
- settle(resolve, { raw: stdout, parsed: false });
292
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
292
293
  }
293
294
  })();
294
295
  }
@@ -305,6 +306,11 @@ class CodexProvider extends AIProvider {
305
306
  }
306
307
  });
307
308
 
309
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
310
+ codex.stdin.on('error', (err) => {
311
+ logger.error(`${levelPrefix} stdin error: ${err.message}`);
312
+ });
313
+
308
314
  // Send the prompt to stdin
309
315
  codex.stdin.write(prompt, (err) => {
310
316
  if (err) {
@@ -368,9 +374,10 @@ class CodexProvider extends AIProvider {
368
374
  return extracted;
369
375
  }
370
376
 
371
- // If no JSON found, return the raw message
377
+ // If no JSON found, return with textContent so the caller can
378
+ // pass it (not raw JSONL stdout) to the LLM extraction fallback
372
379
  logger.warn(`${levelPrefix} Agent message is not JSON, treating as raw text`);
373
- return { success: false, error: 'Agent message is not valid JSON' };
380
+ return { success: false, error: 'Agent message is not valid JSON', textContent: agentMessageText };
374
381
  }
375
382
 
376
383
  // No agent message found, try extracting JSON directly from stdout
@@ -314,7 +314,8 @@ class CursorAgentProvider extends AIProvider {
314
314
  } else {
315
315
  // Regex extraction failed, try LLM-based extraction as fallback
316
316
  logger.warn(`${levelPrefix} Regex extraction failed: ${parsed.error}`);
317
- logger.info(`${levelPrefix} Raw response length: ${stdout.length} characters`);
317
+ const llmFallbackInput = parsed.textContent || stdout;
318
+ logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw stdout'})`);
318
319
  logger.info(`${levelPrefix} Attempting LLM-based JSON extraction fallback...`);
319
320
 
320
321
  // Use async IIFE to handle the async LLM extraction
@@ -324,18 +325,18 @@ class CursorAgentProvider extends AIProvider {
324
325
  // orphan processes if timeout fired between close-handler entry
325
326
  // and reaching this point.
326
327
  if (settled) return;
327
- const llmExtracted = await this.extractJSONWithLLM(stdout, { level, analysisId, registerProcess });
328
+ const llmExtracted = await this.extractJSONWithLLM(llmFallbackInput, { level, analysisId, registerProcess });
328
329
  if (llmExtracted.success) {
329
330
  logger.success(`${levelPrefix} LLM extraction fallback succeeded`);
330
331
  settle(resolve, llmExtracted.data);
331
332
  } else {
332
333
  logger.warn(`${levelPrefix} LLM extraction fallback also failed: ${llmExtracted.error}`);
333
- logger.info(`${levelPrefix} Raw response preview: ${stdout.substring(0, 500)}...`);
334
- settle(resolve, { raw: stdout, parsed: false });
334
+ logger.info(`${levelPrefix} Raw response preview: ${llmFallbackInput.substring(0, 500)}...`);
335
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
335
336
  }
336
337
  } catch (llmError) {
337
338
  logger.warn(`${levelPrefix} LLM extraction fallback error: ${llmError.message}`);
338
- settle(resolve, { raw: stdout, parsed: false });
339
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
339
340
  }
340
341
  })();
341
342
  }
@@ -352,6 +353,11 @@ class CursorAgentProvider extends AIProvider {
352
353
  }
353
354
  });
354
355
 
356
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
357
+ agent.stdin.on('error', (err) => {
358
+ logger.error(`${levelPrefix} stdin error: ${err.message}`);
359
+ });
360
+
355
361
  // Send the prompt to stdin
356
362
  agent.stdin.write(prompt, (err) => {
357
363
  if (err) {
@@ -461,7 +467,9 @@ class CursorAgentProvider extends AIProvider {
461
467
  return extracted;
462
468
  }
463
469
 
464
- return { success: false, error: 'No valid JSON found in assistant or result text' };
470
+ // Include textContent so the caller can pass it to LLM extraction fallback
471
+ const textContent = assistantText || resultText || null;
472
+ return { success: false, error: 'No valid JSON found in assistant or result text', textContent };
465
473
 
466
474
  } catch (parseError) {
467
475
  // stdout might not be valid JSONL at all, try extracting JSON from it
@@ -320,24 +320,25 @@ class GeminiProvider extends AIProvider {
320
320
  } else {
321
321
  // Regex extraction failed, try LLM-based extraction as fallback
322
322
  logger.warn(`${levelPrefix} Regex extraction failed: ${parsed.error}`);
323
- logger.info(`${levelPrefix} Raw response length: ${stdout.length} characters`);
323
+ const llmFallbackInput = parsed.textContent || stdout;
324
+ logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw stdout'})`);
324
325
  logger.info(`${levelPrefix} Attempting LLM-based JSON extraction fallback...`);
325
326
 
326
327
  // Use async IIFE to handle the async LLM extraction
327
328
  (async () => {
328
329
  try {
329
- const llmExtracted = await this.extractJSONWithLLM(stdout, { level, analysisId, registerProcess });
330
+ const llmExtracted = await this.extractJSONWithLLM(llmFallbackInput, { level, analysisId, registerProcess });
330
331
  if (llmExtracted.success) {
331
332
  logger.success(`${levelPrefix} LLM extraction fallback succeeded`);
332
333
  settle(resolve, llmExtracted.data);
333
334
  } else {
334
335
  logger.warn(`${levelPrefix} LLM extraction fallback also failed: ${llmExtracted.error}`);
335
- logger.info(`${levelPrefix} Raw response preview: ${stdout.substring(0, 500)}...`);
336
- settle(resolve, { raw: stdout, parsed: false });
336
+ logger.info(`${levelPrefix} Raw response preview: ${llmFallbackInput.substring(0, 500)}...`);
337
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
337
338
  }
338
339
  } catch (llmError) {
339
340
  logger.warn(`${levelPrefix} LLM extraction fallback error: ${llmError.message}`);
340
- settle(resolve, { raw: stdout, parsed: false });
341
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
341
342
  }
342
343
  })();
343
344
  }
@@ -354,6 +355,11 @@ class GeminiProvider extends AIProvider {
354
355
  }
355
356
  });
356
357
 
358
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
359
+ gemini.stdin.on('error', (err) => {
360
+ logger.error(`${levelPrefix} stdin error: ${err.message}`);
361
+ });
362
+
357
363
  // Send the prompt to stdin
358
364
  gemini.stdin.write(prompt, (err) => {
359
365
  if (err) {
@@ -419,9 +425,10 @@ class GeminiProvider extends AIProvider {
419
425
  return extracted;
420
426
  }
421
427
 
422
- // If no JSON found, return the raw message
428
+ // If no JSON found, return with textContent so the caller can
429
+ // pass it (not raw JSONL stdout) to the LLM extraction fallback
423
430
  logger.warn(`${levelPrefix} Assistant message is not JSON, treating as raw text`);
424
- return { success: false, error: 'Assistant message is not valid JSON' };
431
+ return { success: false, error: 'Assistant message is not valid JSON', textContent: assistantText };
425
432
  }
426
433
 
427
434
  // No assistant message found, try extracting JSON directly from stdout
@@ -255,24 +255,25 @@ class OpenCodeProvider extends AIProvider {
255
255
  } else {
256
256
  // Regex extraction failed, try LLM-based extraction as fallback
257
257
  logger.warn(`${levelPrefix} Regex extraction failed: ${parsed.error}`);
258
- logger.info(`${levelPrefix} Raw response length: ${stdout.length} characters`);
258
+ const llmFallbackInput = parsed.textContent || stdout;
259
+ logger.info(`${levelPrefix} LLM fallback input length: ${llmFallbackInput.length} characters (${parsed.textContent ? 'text content' : 'raw stdout'})`);
259
260
  logger.info(`${levelPrefix} Attempting LLM-based JSON extraction fallback...`);
260
261
 
261
262
  // Use async IIFE to handle the async LLM extraction
262
263
  (async () => {
263
264
  try {
264
- const llmExtracted = await this.extractJSONWithLLM(stdout, { level, analysisId, registerProcess });
265
+ const llmExtracted = await this.extractJSONWithLLM(llmFallbackInput, { level, analysisId, registerProcess });
265
266
  if (llmExtracted.success) {
266
267
  logger.success(`${levelPrefix} LLM extraction fallback succeeded`);
267
268
  settle(resolve, llmExtracted.data);
268
269
  } else {
269
270
  logger.warn(`${levelPrefix} LLM extraction fallback also failed: ${llmExtracted.error}`);
270
- logger.info(`${levelPrefix} Raw response preview: ${stdout.substring(0, 500)}...`);
271
- settle(resolve, { raw: stdout, parsed: false });
271
+ logger.info(`${levelPrefix} Raw response preview: ${llmFallbackInput.substring(0, 500)}...`);
272
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
272
273
  }
273
274
  } catch (llmError) {
274
275
  logger.warn(`${levelPrefix} LLM extraction fallback error: ${llmError.message}`);
275
- settle(resolve, { raw: stdout, parsed: false });
276
+ settle(resolve, { raw: llmFallbackInput, parsed: false });
276
277
  }
277
278
  })();
278
279
  }
@@ -289,6 +290,11 @@ class OpenCodeProvider extends AIProvider {
289
290
  }
290
291
  });
291
292
 
293
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
294
+ opencode.stdin.on('error', (err) => {
295
+ logger.error(`${levelPrefix} stdin error: ${err.message}`);
296
+ });
297
+
292
298
  // Send the prompt to stdin (OpenCode reads from stdin when no positional args)
293
299
  // Note on error handling: When stdin.write fails, we kill the process which
294
300
  // triggers the 'close' event handler. The `settled` guard (line 142) prevents
@@ -490,9 +496,10 @@ class OpenCodeProvider extends AIProvider {
490
496
  return extracted;
491
497
  }
492
498
 
493
- // If no JSON found, return the raw text
499
+ // If no JSON found, return with textContent so the caller can
500
+ // pass it (not raw JSONL stdout) to the LLM extraction fallback
494
501
  logger.warn(`${levelPrefix} Text content is not JSON, treating as raw text`);
495
- return { success: false, error: 'Text content is not valid JSON' };
502
+ return { success: false, error: 'Text content is not valid JSON', textContent };
496
503
  }
497
504
 
498
505
  // No text content found, try extracting JSON directly from stdout
@@ -192,7 +192,7 @@ class AIProvider {
192
192
  };
193
193
  }
194
194
 
195
- const { command, args, useShell, promptViaStdin } = config;
195
+ const { command, args, useShell, promptViaStdin, env: configEnv } = config;
196
196
  const prompt = `Extract the JSON object from the following text. Return ONLY the valid JSON, nothing else. Do not include any explanation, markdown formatting, or code blocks - just the raw JSON.
197
197
 
198
198
  === BEGIN INPUT TEXT ===
@@ -209,6 +209,7 @@ ${rawResponse}
209
209
  cwd: process.cwd(),
210
210
  env: {
211
211
  ...process.env,
212
+ ...(configEnv || {}),
212
213
  PATH: `${BIN_DIR}:${process.env.PATH}`
213
214
  },
214
215
  shell: useShell
@@ -279,6 +280,11 @@ ${rawResponse}
279
280
 
280
281
  // Send prompt via stdin if configured
281
282
  if (promptViaStdin) {
283
+ // Handle stdin errors (e.g., EPIPE if process exits before write completes)
284
+ proc.stdin.on('error', (err) => {
285
+ logger.warn(`${levelPrefix} extraction stdin error: ${err.message}`);
286
+ });
287
+
282
288
  proc.stdin.write(prompt, (err) => {
283
289
  if (err) {
284
290
  logger.warn(`${levelPrefix} Failed to write extraction prompt: ${err}`);
package/src/config.js CHANGED
@@ -14,7 +14,7 @@ const DEFAULT_CONFIG = {
14
14
  port: 7247,
15
15
  theme: "light",
16
16
  default_provider: "claude", // AI provider: 'claude', 'gemini', 'codex', 'copilot', 'opencode'
17
- default_model: "sonnet", // Model within the provider (e.g., 'sonnet' for Claude, 'gemini-2.5-pro' for Gemini)
17
+ default_model: "opus", // Model within the provider (e.g., 'opus' for Claude, 'gemini-2.5-pro' for Gemini)
18
18
  worktree_retention_days: 7,
19
19
  dev_mode: false, // When true, disables static file caching for development
20
20
  debug_stream: false, // When true, logs AI provider streaming events (equivalent to --debug-stream CLI flag)
package/src/main.js CHANGED
@@ -110,6 +110,7 @@ OPTIONS:
110
110
  The web UI also starts for the human reviewer.
111
111
  --model <name> Override the AI model. Claude Code is the default provider.
112
112
  Available models: opus, sonnet, haiku (Claude Code);
113
+ also: opus-4.5, opus-4.6-low, opus-4.6-medium, opus-4.6-1m
113
114
  or use provider-specific models with Gemini/Codex
114
115
  --use-checkout Use current directory instead of creating worktree
115
116
  (automatic in GitHub Actions)
@@ -129,7 +130,7 @@ ENVIRONMENT VARIABLES:
129
130
  PAIR_REVIEW_CLAUDE_CMD Custom command to invoke Claude CLI (default: claude)
130
131
  PAIR_REVIEW_GEMINI_CMD Custom command to invoke Gemini CLI (default: gemini)
131
132
  PAIR_REVIEW_CODEX_CMD Custom command to invoke Codex CLI (default: codex)
132
- PAIR_REVIEW_MODEL Override the AI model (same as --model flag)
133
+ PAIR_REVIEW_MODEL Override the AI model (same as --model flag, default: opus)
133
134
 
134
135
  CONFIGURATION:
135
136
  Config file: ~/.pair-review/config.json
@@ -852,7 +853,7 @@ async function performHeadlessReview(args, config, db, flags, options) {
852
853
 
853
854
  // Run AI analysis
854
855
  console.log('Running AI analysis (all 3 levels)...');
855
- const model = flags.model || process.env.PAIR_REVIEW_MODEL || 'sonnet';
856
+ const model = flags.model || process.env.PAIR_REVIEW_MODEL || 'opus';
856
857
  const analyzer = new Analyzer(db, model);
857
858
 
858
859
  let analysisSummary = null;
package/src/routes/mcp.js CHANGED
@@ -527,7 +527,7 @@ function createMCPServer(db, options = {}) {
527
527
  // Resolve provider and model
528
528
  const repoSettings = repository ? await repoSettingsRepo.getRepoSettings(repository) : null;
529
529
  const provider = process.env.PAIR_REVIEW_PROVIDER || repoSettings?.default_provider || config.default_provider || config.provider || 'claude';
530
- const model = process.env.PAIR_REVIEW_MODEL || repoSettings?.default_model || config.default_model || config.model || 'sonnet';
530
+ const model = process.env.PAIR_REVIEW_MODEL || repoSettings?.default_model || config.default_model || config.model || 'opus';
531
531
 
532
532
  // Create unified run/analysis ID and DB record immediately
533
533
  const runId = uuidv4();
@@ -676,7 +676,7 @@ function createMCPServer(db, options = {}) {
676
676
  // Resolve provider and model
677
677
  const repoSettings = await repoSettingsRepo.getRepoSettings(repository);
678
678
  const provider = process.env.PAIR_REVIEW_PROVIDER || repoSettings?.default_provider || config.default_provider || config.provider || 'claude';
679
- const model = process.env.PAIR_REVIEW_MODEL || repoSettings?.default_model || config.default_model || config.model || 'sonnet';
679
+ const model = process.env.PAIR_REVIEW_MODEL || repoSettings?.default_model || config.default_model || config.model || 'opus';
680
680
 
681
681
  // Create unified run/analysis ID and DB record immediately
682
682
  const runId = uuidv4();
@@ -70,7 +70,7 @@ function getLocalReviewKey(reviewId) {
70
70
 
71
71
  /**
72
72
  * Get the model to use for AI analysis
73
- * Priority: CLI flag (PAIR_REVIEW_MODEL env var) > config.default_model > 'sonnet' default
73
+ * Priority: CLI flag (PAIR_REVIEW_MODEL env var) > config.default_model > 'opus' default
74
74
  * @param {Object} req - Express request object
75
75
  * @returns {string} Model name to use
76
76
  */
@@ -93,7 +93,7 @@ function getModel(req) {
93
93
  }
94
94
 
95
95
  // Default fallback
96
- return 'sonnet';
96
+ return 'opus';
97
97
  }
98
98
 
99
99
  /**
@@ -2,9 +2,18 @@
2
2
  const logger = require('./logger');
3
3
 
4
4
  /**
5
- * Extract JSON from text responses using multiple strategies
6
- * This is a shared utility to ensure consistent JSON extraction across the application
7
- * @param {string} response - Raw response text
5
+ * Extract JSON from text responses using multiple strategies.
6
+ * This is a shared utility to ensure consistent JSON extraction across the application.
7
+ *
8
+ * Strategies are tried in order:
9
+ * 1. Markdown code blocks (```json ... ```)
10
+ * 2. Direct JSON.parse of the trimmed response
11
+ * 3. First { to last } substring
12
+ * 4. Known JSON key anchors (e.g. {"level", {"suggestions")
13
+ * 5. Forward scan: try JSON.parse from every top-level { in the text
14
+ * 6. Bracket-matched substring from the first {
15
+ *
16
+ * @param {string} response - Raw response text (may include preamble/postamble prose)
8
17
  * @param {string|number} level - Level identifier for logging (e.g., 1, 2, 3, 'orchestration', 'unknown')
9
18
  * @returns {Object} Extraction result with success flag and data/error
10
19
  */
@@ -35,51 +44,132 @@ function extractJSON(response, level = 'unknown') {
35
44
  }
36
45
  throw new Error('No JSON code block found');
37
46
  },
38
-
39
- // Strategy 2: Look for JSON between first { and last }
47
+
48
+ // Strategy 2: Try the entire response as JSON (fast path for clean responses)
49
+ () => {
50
+ return JSON.parse(response.trim());
51
+ },
52
+
53
+ // Strategy 3: Look for JSON between first { and last }
54
+ // Works when the response is just JSON or has minimal wrapping
40
55
  () => {
41
56
  const firstBrace = response.indexOf('{');
42
57
  const lastBrace = response.lastIndexOf('}');
43
- if (firstBrace !== -1 && lastBrace !== -1 && lastBrace >= firstBrace) {
58
+ if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) {
44
59
  return JSON.parse(response.substring(firstBrace, lastBrace + 1));
45
60
  }
46
61
  throw new Error('No valid JSON braces found');
47
62
  },
48
-
49
- // Strategy 3: Try to find JSON-like structure with bracket matching
63
+
64
+ // Strategy 4: Anchor-based extraction look for known JSON key patterns
65
+ // that mark the start of our expected response structures.
66
+ // This handles the common case where preamble text contains { characters
67
+ // (e.g. LLM discussing code: "the function handleEvent(event) { ... }")
68
+ // which would cause Strategy 3 to grab the wrong first brace.
50
69
  () => {
51
- const jsonMatch = response.match(/\{[\s\S]*\}/);
52
- if (jsonMatch) {
53
- // Try to find the complete JSON by matching brackets
54
- const jsonStr = jsonMatch[0];
55
- let braceCount = 0;
56
- let endIndex = -1;
57
- const maxIterations = Math.min(jsonStr.length, 100000); // Prevent infinite loops
58
-
59
- for (let i = 0; i < maxIterations; i++) {
60
- if (jsonStr[i] === '{') braceCount++;
61
- else if (jsonStr[i] === '}') {
62
- braceCount--;
63
- if (braceCount === 0) {
64
- endIndex = i;
65
- break;
70
+ // Look for patterns that start our expected JSON structures
71
+ const anchors = [
72
+ /\{"level"\s*:/,
73
+ /\{"suggestions"\s*:/,
74
+ /\{"fileLevelSuggestions"\s*:/,
75
+ /\{"summary"\s*:/,
76
+ /\{"overview"\s*:/,
77
+ ];
78
+
79
+ for (const anchor of anchors) {
80
+ const match = response.match(anchor);
81
+ if (match) {
82
+ const startIdx = match.index;
83
+ // Find the matching closing brace from the end
84
+ const lastBrace = response.lastIndexOf('}');
85
+ if (lastBrace > startIdx) {
86
+ const candidate = response.substring(startIdx, lastBrace + 1);
87
+ return JSON.parse(candidate);
88
+ }
89
+ }
90
+ }
91
+ throw new Error('No known JSON anchor found');
92
+ },
93
+
94
+ // Strategy 5: Forward scan — try JSON.parse starting from each { in the text.
95
+ // Handles arbitrary preamble text with braces by trying every { as a potential
96
+ // JSON start. Stops at the first successful parse.
97
+ () => {
98
+ let searchFrom = 0;
99
+ // Limit attempts to avoid excessive parsing on very large non-JSON text
100
+ const maxAttempts = 20;
101
+ let attempts = 0;
102
+ const lastBrace = response.lastIndexOf('}');
103
+
104
+ while (searchFrom < response.length && attempts < maxAttempts) {
105
+ const braceIdx = response.indexOf('{', searchFrom);
106
+ if (braceIdx === -1) break;
107
+
108
+ attempts++;
109
+ try {
110
+ // Try parsing from this brace to the end of the response.
111
+ // JSON.parse is lenient about trailing content only if we trim to the
112
+ // right boundary, so use lastIndexOf('}') from the end.
113
+ if (lastBrace > braceIdx) {
114
+ const candidate = response.substring(braceIdx, lastBrace + 1);
115
+ const parsed = JSON.parse(candidate);
116
+ if (parsed && typeof parsed === 'object') {
117
+ return parsed;
66
118
  }
67
119
  }
120
+ } catch {
121
+ // This { wasn't the start of valid JSON, try the next one
68
122
  }
69
-
70
- if (endIndex > -1) {
71
- return JSON.parse(jsonStr.substring(0, endIndex + 1));
123
+ searchFrom = braceIdx + 1;
124
+ }
125
+ throw new Error('Forward scan found no valid JSON');
126
+ },
127
+
128
+ // Strategy 6: Bracket-matched substring from the first {.
129
+ // Counts balanced braces (ignoring those inside JSON strings) to find
130
+ // the end of the first top-level object. No iteration cap — the loop
131
+ // runs for the full length of the matched region.
132
+ () => {
133
+ const firstBrace = response.indexOf('{');
134
+ if (firstBrace === -1) throw new Error('No opening brace found');
135
+
136
+ let braceCount = 0;
137
+ let inString = false;
138
+ let escaped = false;
139
+
140
+ for (let i = firstBrace; i < response.length; i++) {
141
+ const ch = response[i];
142
+
143
+ if (escaped) {
144
+ escaped = false;
145
+ continue;
146
+ }
147
+
148
+ if (ch === '\\' && inString) {
149
+ escaped = true;
150
+ continue;
151
+ }
152
+
153
+ if (ch === '"') {
154
+ inString = !inString;
155
+ continue;
156
+ }
157
+
158
+ if (inString) continue;
159
+
160
+ if (ch === '{') braceCount++;
161
+ else if (ch === '}') {
162
+ braceCount--;
163
+ if (braceCount === 0) {
164
+ return JSON.parse(response.substring(firstBrace, i + 1));
165
+ }
72
166
  }
73
167
  }
74
168
  throw new Error('No balanced JSON structure found');
75
169
  },
76
-
77
- // Strategy 4: Try the entire response as JSON (for simple cases)
78
- () => {
79
- return JSON.parse(response.trim());
80
- }
81
170
  ];
82
171
 
172
+ const strategyErrors = [];
83
173
  for (let i = 0; i < strategies.length; i++) {
84
174
  try {
85
175
  const data = strategies[i]();
@@ -88,17 +178,17 @@ function extractJSON(response, level = 'unknown') {
88
178
  return { success: true, data };
89
179
  }
90
180
  } catch (error) {
91
- // Continue to next strategy
92
- if (i === strategies.length - 1) {
93
- // Last strategy failed, log the error
94
- logger.warn(`${levelPrefix} All JSON extraction strategies failed`);
95
- logger.warn(`${levelPrefix} Response preview: ${response.substring(0, 200)}...`);
96
- }
181
+ strategyErrors.push(`S${i + 1}: ${error.message}`);
97
182
  }
98
183
  }
99
184
 
100
- return {
101
- success: false,
185
+ // All strategies failed — log details for debugging
186
+ logger.warn(`${levelPrefix} All JSON extraction strategies failed`);
187
+ logger.warn(`${levelPrefix} Strategy errors: ${strategyErrors.join('; ')}`);
188
+ logger.warn(`${levelPrefix} Response length: ${response.length} chars, preview: ${response.substring(0, 200)}...`);
189
+
190
+ return {
191
+ success: false,
102
192
  error: 'Failed to extract JSON from response',
103
193
  response: response.substring(0, 500) // Include preview for debugging
104
194
  };