polydev-ai 1.7.0 → 1.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -513,6 +513,11 @@ class StdioMCPWrapper {
513
513
  }
514
514
  }
515
515
 
516
+ // Report CLI results to server for dashboard storage (non-blocking)
517
+ this.reportCliResultsToServer(prompt, localResults, args).catch(err => {
518
+ console.error('[Stdio Wrapper] CLI results reporting failed (non-critical):', err.message);
519
+ });
520
+
516
521
  // Get remote perspectives (only for models not covered by local CLIs)
517
522
  const perspectivesResult = await this.callPerspectivesForCli(args, localResults);
518
523
 
@@ -565,11 +570,124 @@ class StdioMCPWrapper {
565
570
  }
566
571
  }
567
572
 
573
+ /**
574
+ * Report CLI results to server for dashboard storage
575
+ * This stores CLI results in Supabase so they appear in the dashboard
576
+ */
577
+ async reportCliResultsToServer(prompt, localResults, args = {}) {
578
+ // Only report if we have successful CLI results
579
+ const successfulResults = localResults.filter(r => r.success);
580
+ if (successfulResults.length === 0) {
581
+ console.error('[Stdio Wrapper] No successful CLI results to report');
582
+ return;
583
+ }
584
+
585
+ if (!this.userToken) {
586
+ console.error('[Stdio Wrapper] No user token available for CLI results reporting');
587
+ return;
588
+ }
589
+
590
+ try {
591
+ const cliResults = localResults.map(result => ({
592
+ provider_id: result.provider_id,
593
+ model: result.model || this.getDefaultModelForCli(result.provider_id),
594
+ content: result.content || '',
595
+ tokens_used: result.tokens_used || 0,
596
+ latency_ms: result.latency_ms || 0,
597
+ success: result.success || false,
598
+ error: result.error || null
599
+ }));
600
+
601
+ const reportPayload = {
602
+ prompt: prompt,
603
+ cli_results: cliResults,
604
+ temperature: args.temperature || 0.7,
605
+ max_tokens: args.max_tokens || 20000
606
+ };
607
+
608
+ const response = await fetch(`${this.serverUrl.replace('/mcp', '')}/api/mcp/report-cli-results`, {
609
+ method: 'POST',
610
+ headers: {
611
+ 'Authorization': `Bearer ${this.userToken}`,
612
+ 'Content-Type': 'application/json',
613
+ 'User-Agent': 'polydev-stdio-wrapper/1.0.0'
614
+ },
615
+ body: JSON.stringify(reportPayload)
616
+ });
617
+
618
+ if (!response.ok) {
619
+ const errorText = await response.text();
620
+ console.error('[Stdio Wrapper] Failed to report CLI results:', response.status, errorText);
621
+ return;
622
+ }
623
+
624
+ const result = await response.json();
625
+ console.error(`[Stdio Wrapper] CLI results reported to dashboard: ${result.stored} results stored`);
626
+
627
+ } catch (error) {
628
+ // Non-critical - log and continue
629
+ console.error('[Stdio Wrapper] Error reporting CLI results (non-critical):', error.message);
630
+ }
631
+ }
632
+
633
+ /**
634
+ * Get default model name for a CLI tool (used when model not specified in result)
635
+ */
636
+ getDefaultModelForCli(providerId) {
637
+ const defaults = {
638
+ 'claude_code': 'claude-sonnet-4-20250514',
639
+ 'codex_cli': 'gpt-4.1',
640
+ 'gemini_cli': 'gemini-2.5-pro'
641
+ };
642
+ return defaults[providerId] || providerId;
643
+ }
644
+
568
645
  /**
569
646
  * Call remote perspectives for CLI prompts
647
+ * Only calls remote APIs for providers NOT covered by successful local CLIs
570
648
  */
571
649
  async callPerspectivesForCli(args, localResults) {
572
- console.error(`[Stdio Wrapper] Calling remote perspectives for CLI prompt`);
650
+ // Determine which providers succeeded locally
651
+ const successfulLocalProviders = localResults
652
+ .filter(r => r.success)
653
+ .map(r => r.provider_id);
654
+
655
+ // Map CLI provider IDs to API provider names for exclusion
656
+ const cliToApiProvider = {
657
+ 'claude_code': 'anthropic',
658
+ 'codex_cli': 'openai',
659
+ 'gemini_cli': 'google'
660
+ };
661
+
662
+ const excludeProviders = successfulLocalProviders
663
+ .map(cli => cliToApiProvider[cli])
664
+ .filter(Boolean);
665
+
666
+ // If all major providers are covered locally, skip remote call entirely
667
+ if (excludeProviders.length >= 3 ||
668
+ (excludeProviders.includes('anthropic') && excludeProviders.includes('openai') && excludeProviders.includes('google'))) {
669
+ console.error(`[Stdio Wrapper] All providers covered by local CLIs, skipping remote perspectives`);
670
+ return {
671
+ success: true,
672
+ content: '',
673
+ skipped: true,
674
+ reason: 'All providers covered by local CLIs',
675
+ timestamp: new Date().toISOString()
676
+ };
677
+ }
678
+
679
+ console.error(`[Stdio Wrapper] Calling remote perspectives (excluding: ${excludeProviders.join(', ') || 'none'})`);
680
+
681
+ // Format CLI responses for logging on the server
682
+ const cliResponses = localResults.map(result => ({
683
+ provider_id: result.provider_id,
684
+ model: result.model || this.getDefaultModelForCli(result.provider_id),
685
+ content: result.content || '',
686
+ tokens_used: result.tokens_used || 0,
687
+ latency_ms: result.latency_ms || 0,
688
+ success: result.success || false,
689
+ error: result.error || null
690
+ }));
573
691
 
574
692
  try {
575
693
  const perspectivesRequest = {
@@ -580,11 +698,13 @@ class StdioMCPWrapper {
580
698
  arguments: {
581
699
  prompt: args.prompt,
582
700
  user_token: this.userToken,
583
- // Let the remote server use user's configured preferences for models
584
- // Don't specify models to use dashboard defaults
701
+ // Exclude providers that succeeded locally
702
+ exclude_providers: excludeProviders,
703
+ // Pass CLI responses for dashboard logging
704
+ cli_responses: cliResponses,
585
705
  project_memory: 'none',
586
706
  temperature: 0.7,
587
- max_tokens: 2000
707
+ max_tokens: 20000
588
708
  }
589
709
  },
590
710
  id: `perspectives-${Date.now()}`
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "polydev-ai",
3
- "version": "1.7.0",
3
+ "version": "1.8.1",
4
4
  "description": "Agentic workflow assistant with CLI integration - get diverse perspectives from multiple LLMs when stuck or need enhanced reasoning",
5
5
  "keywords": [
6
6
  "mcp",