polydev-ai 1.6.1 → 1.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/cliManager.js CHANGED
@@ -378,7 +378,7 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
378
378
  }
379
379
  }
380
380
 
381
- async sendCliPrompt(providerId, prompt, mode = 'args', timeoutMs = null) {
381
+ async sendCliPrompt(providerId, prompt, mode = 'args', timeoutMs = null, model = null) {
382
382
  // Set provider-specific default timeouts (180s for all by default, complex prompts take time)
383
383
  if (timeoutMs === null) {
384
384
  timeoutMs = 180000; // 180 seconds default for all providers
@@ -424,6 +424,13 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
424
424
  };
425
425
  }
426
426
 
427
+ // Log model being used
428
+ if (model) {
429
+ console.log(`[Polydev CLI] Using model for ${providerId}: ${model}`);
430
+ } else {
431
+ console.log(`[Polydev CLI] No model specified for ${providerId}, using CLI default`);
432
+ }
433
+
427
434
  const promptVariants = [
428
435
  provider.subcommands?.test_prompt ? [...provider.subcommands.test_prompt] : []
429
436
  ];
@@ -437,7 +444,7 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
437
444
  if (providerId === 'codex_cli') {
438
445
  const execArgs = promptVariants.find(args => args.includes('exec')) || promptVariants[0];
439
446
  try {
440
- const content = await this.executeCodexExec(provider.command, execArgs, prompt, timeoutMs);
447
+ const content = await this.executeCodexExec(provider.command, execArgs, prompt, timeoutMs, model);
441
448
  return {
442
449
  success: true,
443
450
  content,
@@ -445,6 +452,7 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
445
452
  latency_ms: Date.now() - startTime,
446
453
  provider: providerId,
447
454
  mode: 'args',
455
+ model_used: model || 'cli_default',
448
456
  timestamp: new Date()
449
457
  };
450
458
  } catch (error) {
@@ -462,7 +470,25 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
462
470
  let lastErrorMessage = null;
463
471
 
464
472
  for (const promptArgs of promptVariants) {
465
- const args = Array.isArray(promptArgs) ? [...promptArgs, prompt] : [prompt];
473
+ // Build args with model flag if specified
474
+ let args = Array.isArray(promptArgs) ? [...promptArgs] : [];
475
+
476
+ // Add model flag based on CLI type
477
+ if (model) {
478
+ if (providerId === 'claude_code') {
479
+ // Claude Code uses --model flag
480
+ args = ['--model', model, ...args, prompt];
481
+ } else if (providerId === 'gemini_cli') {
482
+ // Gemini CLI uses -m flag
483
+ args = ['-m', model, ...args, prompt];
484
+ } else {
485
+ // Default: just append prompt
486
+ args = [...args, prompt];
487
+ }
488
+ } else {
489
+ args = [...args, prompt];
490
+ }
491
+
466
492
  try {
467
493
  const result = await this.executeCliCommand(
468
494
  provider.command,
@@ -481,6 +507,7 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
481
507
  latency_ms: Date.now() - startTime,
482
508
  provider: providerId,
483
509
  mode: 'args',
510
+ model_used: model || 'cli_default',
484
511
  timestamp: new Date()
485
512
  };
486
513
  }
@@ -488,6 +515,37 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
488
515
  lastErrorMessage = result.error;
489
516
  } catch (error) {
490
517
  lastErrorMessage = error instanceof Error ? error.message : String(error);
518
+
519
+ // If model was specified and command failed, retry without model (graceful fallback)
520
+ if (model && lastErrorMessage.includes('model')) {
521
+ console.log(`[Polydev CLI] Model ${model} may be invalid for ${providerId}, retrying without model flag`);
522
+ try {
523
+ const fallbackArgs = Array.isArray(promptArgs) ? [...promptArgs, prompt] : [prompt];
524
+ const fallbackResult = await this.executeCliCommand(
525
+ provider.command,
526
+ fallbackArgs,
527
+ 'args',
528
+ timeoutMs,
529
+ undefined
530
+ );
531
+
532
+ if (!fallbackResult.error) {
533
+ const content = this.cleanCliResponse(fallbackResult.stdout || '');
534
+ return {
535
+ success: true,
536
+ content,
537
+ tokens_used: this.estimateTokens(prompt + content),
538
+ latency_ms: Date.now() - startTime,
539
+ provider: providerId,
540
+ mode: 'args',
541
+ model_used: 'cli_default_fallback',
542
+ timestamp: new Date()
543
+ };
544
+ }
545
+ } catch (fallbackError) {
546
+ // Fallback also failed, continue with original error
547
+ }
548
+ }
491
549
  }
492
550
  }
493
551
 
@@ -609,7 +667,7 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
609
667
  });
610
668
  }
611
669
 
612
- async executeCodexExec(executable, commandArgs, prompt, timeoutMs) {
670
+ async executeCodexExec(executable, commandArgs, prompt, timeoutMs, model = null) {
613
671
  if (!executable) {
614
672
  throw new Error('Missing Codex executable');
615
673
  }
@@ -619,15 +677,26 @@ This is a known issue with @google/gemini-cli@0.3.4 and older Node.js versions.`
619
677
  }
620
678
 
621
679
  const workingDir = process.cwd();
622
- const args = [
623
- ...commandArgs,
680
+
681
+ // Build args with optional model flag
682
+ // Codex CLI uses -m or --model flag
683
+ let args = [...commandArgs];
684
+
685
+ // Add model flag if specified
686
+ if (model) {
687
+ args.push('-m', model);
688
+ console.log(`[CLI Debug] Codex using model: ${model}`);
689
+ }
690
+
691
+ // Add standard flags and prompt
692
+ args.push(
624
693
  '--sandbox',
625
694
  'workspace-write',
626
695
  '--skip-git-repo-check',
627
696
  '--cd',
628
697
  workingDir,
629
698
  prompt
630
- ];
699
+ );
631
700
 
632
701
  return new Promise((resolve, reject) => {
633
702
  const baseTmp = process.env.POLYDEV_CLI_TMPDIR || process.env.TMPDIR || os.tmpdir();
@@ -116,6 +116,11 @@ class StdioMCPWrapper {
116
116
 
117
117
  // Smart refresh scheduler (will be started after initialization)
118
118
  this.refreshScheduler = null;
119
+
120
+ // Cache for user model preferences (provider -> model)
121
+ this.userModelPreferences = null;
122
+ this.modelPreferencesCacheTime = null;
123
+ this.MODEL_PREFERENCES_CACHE_TTL = 5 * 60 * 1000; // 5 minutes cache
119
124
  }
120
125
 
121
126
  loadManifest() {
@@ -452,12 +457,26 @@ class StdioMCPWrapper {
452
457
  // Use reasonable timeout for CLI responses (180 seconds for complex prompts)
453
458
  const gracefulTimeout = Math.min(timeout_ms, 180000);
454
459
 
460
+ // Fetch user's model preferences (cached, non-blocking on failure)
461
+ let modelPreferences = {};
462
+ try {
463
+ modelPreferences = await this.fetchUserModelPreferences();
464
+ } catch (prefError) {
465
+ console.error('[Stdio Wrapper] Model preferences fetch failed (will use CLI defaults):', prefError.message);
466
+ }
467
+
455
468
  let localResults = [];
456
469
 
457
470
  if (provider_id) {
458
471
  // Specific provider requested - use only that one
459
472
  console.error(`[Stdio Wrapper] Using specific provider: ${provider_id}`);
460
- const result = await this.cliManager.sendCliPrompt(provider_id, prompt, mode, gracefulTimeout);
473
+ const model = modelPreferences[provider_id] || null;
474
+ if (model) {
475
+ console.error(`[Stdio Wrapper] Using user's preferred model for ${provider_id}: ${model}`);
476
+ } else {
477
+ console.error(`[Stdio Wrapper] No model preference for ${provider_id}, using CLI default`);
478
+ }
479
+ const result = await this.cliManager.sendCliPrompt(provider_id, prompt, mode, gracefulTimeout, model);
461
480
  localResults = [{ provider_id, ...result }];
462
481
  } else {
463
482
  // No specific provider - use ALL available local CLIs
@@ -473,7 +492,11 @@ class StdioMCPWrapper {
473
492
  // Run all CLI prompts concurrently
474
493
  const cliPromises = availableProviders.map(async (providerId) => {
475
494
  try {
476
- const result = await this.cliManager.sendCliPrompt(providerId, prompt, mode, gracefulTimeout);
495
+ const model = modelPreferences[providerId] || null;
496
+ if (model) {
497
+ console.error(`[Stdio Wrapper] Using user's preferred model for ${providerId}: ${model}`);
498
+ }
499
+ const result = await this.cliManager.sendCliPrompt(providerId, prompt, mode, gracefulTimeout, model);
477
500
  return { provider_id: providerId, ...result };
478
501
  } catch (error) {
479
502
  console.error(`[Stdio Wrapper] CLI ${providerId} failed:`, error.message);
@@ -490,6 +513,11 @@ class StdioMCPWrapper {
490
513
  }
491
514
  }
492
515
 
516
+ // Report CLI results to server for dashboard storage (non-blocking)
517
+ this.reportCliResultsToServer(prompt, localResults, args).catch(err => {
518
+ console.error('[Stdio Wrapper] CLI results reporting failed (non-critical):', err.message);
519
+ });
520
+
493
521
  // Get remote perspectives (only for models not covered by local CLIs)
494
522
  const perspectivesResult = await this.callPerspectivesForCli(args, localResults);
495
523
 
@@ -542,11 +570,124 @@ class StdioMCPWrapper {
542
570
  }
543
571
  }
544
572
 
573
+ /**
574
+ * Report CLI results to server for dashboard storage
575
+ * This stores CLI results in Supabase so they appear in the dashboard
576
+ */
577
+ async reportCliResultsToServer(prompt, localResults, args = {}) {
578
+ // Only report if we have successful CLI results
579
+ const successfulResults = localResults.filter(r => r.success);
580
+ if (successfulResults.length === 0) {
581
+ console.error('[Stdio Wrapper] No successful CLI results to report');
582
+ return;
583
+ }
584
+
585
+ if (!this.userToken) {
586
+ console.error('[Stdio Wrapper] No user token available for CLI results reporting');
587
+ return;
588
+ }
589
+
590
+ try {
591
+ const cliResults = localResults.map(result => ({
592
+ provider_id: result.provider_id,
593
+ model: result.model || this.getDefaultModelForCli(result.provider_id),
594
+ content: result.content || '',
595
+ tokens_used: result.tokens_used || 0,
596
+ latency_ms: result.latency_ms || 0,
597
+ success: result.success || false,
598
+ error: result.error || null
599
+ }));
600
+
601
+ const reportPayload = {
602
+ prompt: prompt,
603
+ cli_results: cliResults,
604
+ temperature: args.temperature || 0.7,
605
+ max_tokens: args.max_tokens || 20000
606
+ };
607
+
608
+ const response = await fetch(`${this.serverUrl.replace('/mcp', '')}/api/mcp/report-cli-results`, {
609
+ method: 'POST',
610
+ headers: {
611
+ 'Authorization': `Bearer ${this.userToken}`,
612
+ 'Content-Type': 'application/json',
613
+ 'User-Agent': 'polydev-stdio-wrapper/1.0.0'
614
+ },
615
+ body: JSON.stringify(reportPayload)
616
+ });
617
+
618
+ if (!response.ok) {
619
+ const errorText = await response.text();
620
+ console.error('[Stdio Wrapper] Failed to report CLI results:', response.status, errorText);
621
+ return;
622
+ }
623
+
624
+ const result = await response.json();
625
+ console.error(`[Stdio Wrapper] CLI results reported to dashboard: ${result.stored} results stored`);
626
+
627
+ } catch (error) {
628
+ // Non-critical - log and continue
629
+ console.error('[Stdio Wrapper] Error reporting CLI results (non-critical):', error.message);
630
+ }
631
+ }
632
+
633
+ /**
634
+ * Get default model name for a CLI tool (used when model not specified in result)
635
+ */
636
+ getDefaultModelForCli(providerId) {
637
+ const defaults = {
638
+ 'claude_code': 'claude-sonnet-4-20250514',
639
+ 'codex_cli': 'gpt-4.1',
640
+ 'gemini_cli': 'gemini-2.5-pro'
641
+ };
642
+ return defaults[providerId] || providerId;
643
+ }
644
+
545
645
  /**
546
646
  * Call remote perspectives for CLI prompts
647
+ * Only calls remote APIs for providers NOT covered by successful local CLIs
547
648
  */
548
649
  async callPerspectivesForCli(args, localResults) {
549
- console.error(`[Stdio Wrapper] Calling remote perspectives for CLI prompt`);
650
+ // Determine which providers succeeded locally
651
+ const successfulLocalProviders = localResults
652
+ .filter(r => r.success)
653
+ .map(r => r.provider_id);
654
+
655
+ // Map CLI provider IDs to API provider names for exclusion
656
+ const cliToApiProvider = {
657
+ 'claude_code': 'anthropic',
658
+ 'codex_cli': 'openai',
659
+ 'gemini_cli': 'google'
660
+ };
661
+
662
+ const excludeProviders = successfulLocalProviders
663
+ .map(cli => cliToApiProvider[cli])
664
+ .filter(Boolean);
665
+
666
+ // If all major providers are covered locally, skip remote call entirely
667
+ if (excludeProviders.length >= 3 ||
668
+ (excludeProviders.includes('anthropic') && excludeProviders.includes('openai') && excludeProviders.includes('google'))) {
669
+ console.error(`[Stdio Wrapper] All providers covered by local CLIs, skipping remote perspectives`);
670
+ return {
671
+ success: true,
672
+ content: '',
673
+ skipped: true,
674
+ reason: 'All providers covered by local CLIs',
675
+ timestamp: new Date().toISOString()
676
+ };
677
+ }
678
+
679
+ console.error(`[Stdio Wrapper] Calling remote perspectives (excluding: ${excludeProviders.join(', ') || 'none'})`);
680
+
681
+ // Format CLI responses for logging on the server
682
+ const cliResponses = localResults.map(result => ({
683
+ provider_id: result.provider_id,
684
+ model: result.model || this.getDefaultModelForCli(result.provider_id),
685
+ content: result.content || '',
686
+ tokens_used: result.tokens_used || 0,
687
+ latency_ms: result.latency_ms || 0,
688
+ success: result.success || false,
689
+ error: result.error || null
690
+ }));
550
691
 
551
692
  try {
552
693
  const perspectivesRequest = {
@@ -557,11 +698,13 @@ class StdioMCPWrapper {
557
698
  arguments: {
558
699
  prompt: args.prompt,
559
700
  user_token: this.userToken,
560
- // Let the remote server use user's configured preferences for models
561
- // Don't specify models to use dashboard defaults
701
+ // Exclude providers that succeeded locally
702
+ exclude_providers: excludeProviders,
703
+ // Pass CLI responses for dashboard logging
704
+ cli_responses: cliResponses,
562
705
  project_memory: 'none',
563
706
  temperature: 0.7,
564
- max_tokens: 2000
707
+ max_tokens: 20000
565
708
  }
566
709
  },
567
710
  id: `perspectives-${Date.now()}`
@@ -1020,6 +1163,87 @@ class StdioMCPWrapper {
1020
1163
  }
1021
1164
  }
1022
1165
 
1166
+ /**
1167
+ * Fetch user's model preferences from API keys
1168
+ * Returns a map of CLI provider -> default_model
1169
+ */
1170
+ async fetchUserModelPreferences() {
1171
+ // Check cache first
1172
+ if (this.userModelPreferences && this.modelPreferencesCacheTime) {
1173
+ const cacheAge = Date.now() - this.modelPreferencesCacheTime;
1174
+ if (cacheAge < this.MODEL_PREFERENCES_CACHE_TTL) {
1175
+ console.error('[Stdio Wrapper] Using cached model preferences');
1176
+ return this.userModelPreferences;
1177
+ }
1178
+ }
1179
+
1180
+ console.error('[Stdio Wrapper] Fetching user model preferences from API...');
1181
+
1182
+ try {
1183
+ // Call the dedicated model-preferences endpoint
1184
+ const response = await fetch('https://www.polydev.ai/api/model-preferences', {
1185
+ method: 'GET',
1186
+ headers: {
1187
+ 'Authorization': `Bearer ${this.userToken}`,
1188
+ 'User-Agent': 'polydev-stdio-wrapper/1.0.0'
1189
+ }
1190
+ });
1191
+
1192
+ if (!response.ok) {
1193
+ console.error('[Stdio Wrapper] Failed to fetch model preferences:', response.status);
1194
+ return this.userModelPreferences || {};
1195
+ }
1196
+
1197
+ const result = await response.json();
1198
+
1199
+ if (result.success && result.modelPreferences) {
1200
+ // Cache the preferences
1201
+ this.userModelPreferences = result.modelPreferences;
1202
+ this.modelPreferencesCacheTime = Date.now();
1203
+
1204
+ console.error('[Stdio Wrapper] Model preferences loaded:', JSON.stringify(result.modelPreferences));
1205
+ return result.modelPreferences;
1206
+ } else {
1207
+ console.error('[Stdio Wrapper] No model preferences in response');
1208
+ return this.userModelPreferences || {};
1209
+ }
1210
+
1211
+ } catch (error) {
1212
+ console.error('[Stdio Wrapper] Error fetching model preferences:', error.message);
1213
+ return this.userModelPreferences || {};
1214
+ }
1215
+ }
1216
+
1217
+ /**
1218
+ * Map provider name to CLI provider ID
1219
+ */
1220
+ mapProviderToCli(provider) {
1221
+ const providerLower = (provider || '').toLowerCase().trim();
1222
+
1223
+ // Map provider names to CLI tool IDs
1224
+ const providerMap = {
1225
+ 'anthropic': 'claude_code',
1226
+ 'anthropic-ai': 'claude_code',
1227
+ 'claude': 'claude_code',
1228
+ 'openai': 'codex_cli',
1229
+ 'open-ai': 'codex_cli',
1230
+ 'gpt': 'codex_cli',
1231
+ 'google': 'gemini_cli',
1232
+ 'google-ai': 'gemini_cli',
1233
+ 'gemini': 'gemini_cli'
1234
+ };
1235
+
1236
+ return providerMap[providerLower] || null;
1237
+ }
1238
+
1239
+ /**
1240
+ * Get model for a specific CLI provider
1241
+ */
1242
+ async getModelForProvider(providerId) {
1243
+ const preferences = await this.fetchUserModelPreferences();
1244
+ return preferences[providerId] || null;
1245
+ }
1246
+
1023
1247
  async start() {
1024
1248
  console.log('Starting Polydev Stdio MCP Wrapper...');
1025
1249
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "polydev-ai",
3
- "version": "1.6.1",
3
+ "version": "1.8.1",
4
4
  "description": "Agentic workflow assistant with CLI integration - get diverse perspectives from multiple LLMs when stuck or need enhanced reasoning",
5
5
  "keywords": [
6
6
  "mcp",