@probelabs/probe 0.6.0-rc268 → 0.6.0-rc270

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1879,7 +1879,15 @@ export class ProbeAgent {
1879
1879
  if (this.mcpBridge && !options._disableTools) {
1880
1880
  const mcpTools = this.mcpBridge.getVercelTools(this._filterMcpTools(this.mcpBridge.getToolNames()));
1881
1881
  for (const [name, mcpTool] of Object.entries(mcpTools)) {
1882
- nativeTools[name] = mcpTool;
1882
+ // MCP tools have raw JSON Schema inputSchema that must be wrapped with jsonSchema()
1883
+ // for the Vercel AI SDK. Without wrapping, asSchema() misidentifies them as Zod schemas.
1884
+ const mcpSchema = mcpTool.inputSchema || mcpTool.parameters;
1885
+ const wrappedSchema = mcpSchema && mcpSchema._def ? mcpSchema : jsonSchema(mcpSchema || { type: 'object', properties: {} });
1886
+ nativeTools[name] = tool({
1887
+ description: mcpTool.description || `MCP tool: ${name}`,
1888
+ inputSchema: wrappedSchema,
1889
+ execute: mcpTool.execute,
1890
+ });
1883
1891
  }
1884
1892
  }
1885
1893
 
@@ -3574,23 +3582,24 @@ Follow these instructions carefully:
3574
3582
  // Continue even if storage fails
3575
3583
  }
3576
3584
 
3577
- // Completion prompt handling - run a follow-up prompt after attempt_completion for validation/review
3578
- // This runs BEFORE mermaid validation and JSON schema validation
3579
- // Skip if we're already in a completion prompt follow-up call or if no completion prompt is configured
3585
+ // Completion prompt handling - inject one more user message into the existing conversation
3586
+ // This continues the SAME agentic session (same tools, same TaskManager, same history)
3587
+ // rather than spawning a recursive this.answer() call which would reset state
3580
3588
  if (completionAttempted && this.completionPrompt && !options._completionPromptProcessed) {
3581
3589
  if (this.debug) {
3582
- console.log('[DEBUG] Running completion prompt for post-completion validation/review...');
3590
+ console.log('[DEBUG] Running completion prompt as continuation of current session...');
3583
3591
  }
3584
3592
 
3585
3593
  try {
3586
- // Record completion prompt start in telemetry
3594
+ const originalResult = finalResult;
3595
+
3587
3596
  if (this.tracer) {
3588
3597
  this.tracer.recordEvent('completion_prompt.started', {
3589
3598
  'completion_prompt.original_result_length': finalResult?.length || 0
3590
3599
  });
3591
3600
  }
3592
3601
 
3593
- // Create the completion prompt with the current result as context
3602
+ // Append completion prompt as a user message to the existing conversation
3594
3603
  const completionPromptMessage = `${this.completionPrompt}
3595
3604
 
3596
3605
  Here is the result to review:
@@ -3598,34 +3607,82 @@ Here is the result to review:
3598
3607
  ${finalResult}
3599
3608
  </result>
3600
3609
 
3601
- After reviewing, provide your final answer using attempt_completion.`;
3610
+ Double-check your response based on the criteria above. If everything looks good, respond with your previous answer exactly as-is using attempt_completion. If something needs to be fixed or is missing, do it now, then respond with the COMPLETE updated answer (everything you did in total, not just the fix) using attempt_completion.`;
3602
3611
 
3603
- // Make a follow-up call with the completion prompt
3604
- // Pass _completionPromptProcessed to prevent infinite loops
3605
- // Save output buffers the recursive answer() must not destroy DSL output() content
3606
- const savedOutputItems = this._outputBuffer ? [...this._outputBuffer.items] : [];
3607
- const savedExtractedBlocks = this._extractedRawBlocks ? [...this._extractedRawBlocks] : [];
3608
- const completionResult = await this.answer(completionPromptMessage, [], {
3609
- ...options,
3610
- _completionPromptProcessed: true
3611
- });
3612
- // Restore output buffers so the parent call can append them to the final result
3613
- if (this._outputBuffer) {
3614
- this._outputBuffer.items = savedOutputItems;
3612
+ currentMessages.push({ role: 'user', content: completionPromptMessage });
3613
+
3614
+ // Reset completion tracking for the follow-up turn
3615
+ completionResult = null;
3616
+ completionAttempted = false;
3617
+
3618
+ // Run one more streamText pass with the same tools and conversation context
3619
+ // Give a small number of extra iterations for the follow-up
3620
+ const completionMaxIterations = 5;
3621
+ const completionStreamOptions = {
3622
+ model: this.provider ? this.provider(this.model) : this.model,
3623
+ messages: this.prepareMessagesWithImages(currentMessages),
3624
+ tools,
3625
+ stopWhen: stepCountIs(completionMaxIterations),
3626
+ maxTokens: maxResponseTokens,
3627
+ temperature: 0.3,
3628
+ onStepFinish: ({ toolResults, text, finishReason, usage }) => {
3629
+ if (usage) {
3630
+ this.tokenCounter.recordUsage(usage);
3631
+ }
3632
+ if (options.onStream && text) {
3633
+ options.onStream(text);
3634
+ }
3635
+ if (this.debug) {
3636
+ console.log(`[DEBUG] Completion prompt step finished (reason: ${finishReason}, tools: ${toolResults?.length || 0})`);
3637
+ }
3638
+ }
3639
+ };
3640
+
3641
+ const providerOpts = this._buildThinkingProviderOptions(maxResponseTokens);
3642
+ if (providerOpts) {
3643
+ completionStreamOptions.providerOptions = providerOpts;
3644
+ }
3645
+
3646
+ const cpResult = await this.streamTextWithRetryAndFallback(completionStreamOptions);
3647
+ const cpFinalText = await cpResult.text;
3648
+ const cpUsage = await cpResult.usage;
3649
+ if (cpUsage) {
3650
+ this.tokenCounter.recordUsage(cpUsage, cpResult.experimental_providerMetadata);
3651
+ }
3652
+
3653
+ // Append follow-up messages to conversation history
3654
+ const cpMessages = await cpResult.response?.messages;
3655
+ if (cpMessages) {
3656
+ for (const msg of cpMessages) {
3657
+ currentMessages.push(msg);
3658
+ }
3615
3659
  }
3616
- this._extractedRawBlocks = savedExtractedBlocks;
3617
3660
 
3618
- // Update finalResult with the result from the completion prompt
3619
- finalResult = completionResult;
3661
+ // Use new completion result if the agent called attempt_completion again,
3662
+ // otherwise keep the original result (the follow-up may have just done side-effects)
3663
+ if (completionResult) {
3664
+ finalResult = completionResult;
3665
+ completionAttempted = true;
3666
+ } else if (cpFinalText && cpFinalText.trim().length > 0) {
3667
+ finalResult = cpFinalText;
3668
+ completionAttempted = true;
3669
+ } else {
3670
+ // Follow-up produced nothing useful — keep the original
3671
+ finalResult = originalResult;
3672
+ completionAttempted = true;
3673
+ if (this.debug) {
3674
+ console.log('[DEBUG] Completion prompt returned empty result, keeping original.');
3675
+ }
3676
+ }
3620
3677
 
3621
3678
  if (this.debug) {
3622
- console.log(`[DEBUG] Completion prompt finished. New result length: ${finalResult?.length || 0}`);
3679
+ console.log(`[DEBUG] Completion prompt finished. Final result length: ${finalResult?.length || 0}`);
3623
3680
  }
3624
3681
 
3625
- // Record completion prompt completion in telemetry
3626
3682
  if (this.tracer) {
3627
3683
  this.tracer.recordEvent('completion_prompt.completed', {
3628
- 'completion_prompt.final_result_length': finalResult?.length || 0
3684
+ 'completion_prompt.final_result_length': finalResult?.length || 0,
3685
+ 'completion_prompt.used_original': finalResult === originalResult
3629
3686
  });
3630
3687
  }
3631
3688
  } catch (error) {
@@ -83190,7 +83190,13 @@ var init_ProbeAgent = __esm({
83190
83190
  if (this.mcpBridge && !options._disableTools) {
83191
83191
  const mcpTools = this.mcpBridge.getVercelTools(this._filterMcpTools(this.mcpBridge.getToolNames()));
83192
83192
  for (const [name, mcpTool] of Object.entries(mcpTools)) {
83193
- nativeTools[name] = mcpTool;
83193
+ const mcpSchema = mcpTool.inputSchema || mcpTool.parameters;
83194
+ const wrappedSchema = mcpSchema && mcpSchema._def ? mcpSchema : jsonSchema(mcpSchema || { type: "object", properties: {} });
83195
+ nativeTools[name] = tool5({
83196
+ description: mcpTool.description || `MCP tool: ${name}`,
83197
+ inputSchema: wrappedSchema,
83198
+ execute: mcpTool.execute
83199
+ });
83194
83200
  }
83195
83201
  }
83196
83202
  if (this.apiType === "google" && this._geminiToolsEnabled && !options._disableTools) {
@@ -84548,9 +84554,10 @@ You are working with a workspace. Available paths: ${workspaceDesc}
84548
84554
  }
84549
84555
  if (completionAttempted && this.completionPrompt && !options._completionPromptProcessed) {
84550
84556
  if (this.debug) {
84551
- console.log("[DEBUG] Running completion prompt for post-completion validation/review...");
84557
+ console.log("[DEBUG] Running completion prompt as continuation of current session...");
84552
84558
  }
84553
84559
  try {
84560
+ const originalResult = finalResult;
84554
84561
  if (this.tracer) {
84555
84562
  this.tracer.recordEvent("completion_prompt.started", {
84556
84563
  "completion_prompt.original_result_length": finalResult?.length || 0
@@ -84563,24 +84570,66 @@ Here is the result to review:
84563
84570
  ${finalResult}
84564
84571
  </result>
84565
84572
 
84566
- After reviewing, provide your final answer using attempt_completion.`;
84567
- const savedOutputItems = this._outputBuffer ? [...this._outputBuffer.items] : [];
84568
- const savedExtractedBlocks = this._extractedRawBlocks ? [...this._extractedRawBlocks] : [];
84569
- const completionResult2 = await this.answer(completionPromptMessage, [], {
84570
- ...options,
84571
- _completionPromptProcessed: true
84572
- });
84573
- if (this._outputBuffer) {
84574
- this._outputBuffer.items = savedOutputItems;
84573
+ Double-check your response based on the criteria above. If everything looks good, respond with your previous answer exactly as-is using attempt_completion. If something needs to be fixed or is missing, do it now, then respond with the COMPLETE updated answer (everything you did in total, not just the fix) using attempt_completion.`;
84574
+ currentMessages.push({ role: "user", content: completionPromptMessage });
84575
+ completionResult = null;
84576
+ completionAttempted = false;
84577
+ const completionMaxIterations = 5;
84578
+ const completionStreamOptions = {
84579
+ model: this.provider ? this.provider(this.model) : this.model,
84580
+ messages: this.prepareMessagesWithImages(currentMessages),
84581
+ tools: tools2,
84582
+ stopWhen: stepCountIs(completionMaxIterations),
84583
+ maxTokens: maxResponseTokens,
84584
+ temperature: 0.3,
84585
+ onStepFinish: ({ toolResults, text, finishReason, usage }) => {
84586
+ if (usage) {
84587
+ this.tokenCounter.recordUsage(usage);
84588
+ }
84589
+ if (options.onStream && text) {
84590
+ options.onStream(text);
84591
+ }
84592
+ if (this.debug) {
84593
+ console.log(`[DEBUG] Completion prompt step finished (reason: ${finishReason}, tools: ${toolResults?.length || 0})`);
84594
+ }
84595
+ }
84596
+ };
84597
+ const providerOpts = this._buildThinkingProviderOptions(maxResponseTokens);
84598
+ if (providerOpts) {
84599
+ completionStreamOptions.providerOptions = providerOpts;
84600
+ }
84601
+ const cpResult = await this.streamTextWithRetryAndFallback(completionStreamOptions);
84602
+ const cpFinalText = await cpResult.text;
84603
+ const cpUsage = await cpResult.usage;
84604
+ if (cpUsage) {
84605
+ this.tokenCounter.recordUsage(cpUsage, cpResult.experimental_providerMetadata);
84606
+ }
84607
+ const cpMessages = await cpResult.response?.messages;
84608
+ if (cpMessages) {
84609
+ for (const msg of cpMessages) {
84610
+ currentMessages.push(msg);
84611
+ }
84612
+ }
84613
+ if (completionResult) {
84614
+ finalResult = completionResult;
84615
+ completionAttempted = true;
84616
+ } else if (cpFinalText && cpFinalText.trim().length > 0) {
84617
+ finalResult = cpFinalText;
84618
+ completionAttempted = true;
84619
+ } else {
84620
+ finalResult = originalResult;
84621
+ completionAttempted = true;
84622
+ if (this.debug) {
84623
+ console.log("[DEBUG] Completion prompt returned empty result, keeping original.");
84624
+ }
84575
84625
  }
84576
- this._extractedRawBlocks = savedExtractedBlocks;
84577
- finalResult = completionResult2;
84578
84626
  if (this.debug) {
84579
- console.log(`[DEBUG] Completion prompt finished. New result length: ${finalResult?.length || 0}`);
84627
+ console.log(`[DEBUG] Completion prompt finished. Final result length: ${finalResult?.length || 0}`);
84580
84628
  }
84581
84629
  if (this.tracer) {
84582
84630
  this.tracer.recordEvent("completion_prompt.completed", {
84583
- "completion_prompt.final_result_length": finalResult?.length || 0
84631
+ "completion_prompt.final_result_length": finalResult?.length || 0,
84632
+ "completion_prompt.used_original": finalResult === originalResult
84584
84633
  });
84585
84634
  }
84586
84635
  } catch (error) {
@@ -110138,7 +110138,13 @@ var init_ProbeAgent = __esm({
110138
110138
  if (this.mcpBridge && !options._disableTools) {
110139
110139
  const mcpTools = this.mcpBridge.getVercelTools(this._filterMcpTools(this.mcpBridge.getToolNames()));
110140
110140
  for (const [name14, mcpTool] of Object.entries(mcpTools)) {
110141
- nativeTools[name14] = mcpTool;
110141
+ const mcpSchema = mcpTool.inputSchema || mcpTool.parameters;
110142
+ const wrappedSchema = mcpSchema && mcpSchema._def ? mcpSchema : (0, import_ai6.jsonSchema)(mcpSchema || { type: "object", properties: {} });
110143
+ nativeTools[name14] = (0, import_ai6.tool)({
110144
+ description: mcpTool.description || `MCP tool: ${name14}`,
110145
+ inputSchema: wrappedSchema,
110146
+ execute: mcpTool.execute
110147
+ });
110142
110148
  }
110143
110149
  }
110144
110150
  if (this.apiType === "google" && this._geminiToolsEnabled && !options._disableTools) {
@@ -111496,9 +111502,10 @@ You are working with a workspace. Available paths: ${workspaceDesc}
111496
111502
  }
111497
111503
  if (completionAttempted && this.completionPrompt && !options._completionPromptProcessed) {
111498
111504
  if (this.debug) {
111499
- console.log("[DEBUG] Running completion prompt for post-completion validation/review...");
111505
+ console.log("[DEBUG] Running completion prompt as continuation of current session...");
111500
111506
  }
111501
111507
  try {
111508
+ const originalResult = finalResult;
111502
111509
  if (this.tracer) {
111503
111510
  this.tracer.recordEvent("completion_prompt.started", {
111504
111511
  "completion_prompt.original_result_length": finalResult?.length || 0
@@ -111511,24 +111518,66 @@ Here is the result to review:
111511
111518
  ${finalResult}
111512
111519
  </result>
111513
111520
 
111514
- After reviewing, provide your final answer using attempt_completion.`;
111515
- const savedOutputItems = this._outputBuffer ? [...this._outputBuffer.items] : [];
111516
- const savedExtractedBlocks = this._extractedRawBlocks ? [...this._extractedRawBlocks] : [];
111517
- const completionResult2 = await this.answer(completionPromptMessage, [], {
111518
- ...options,
111519
- _completionPromptProcessed: true
111520
- });
111521
- if (this._outputBuffer) {
111522
- this._outputBuffer.items = savedOutputItems;
111521
+ Double-check your response based on the criteria above. If everything looks good, respond with your previous answer exactly as-is using attempt_completion. If something needs to be fixed or is missing, do it now, then respond with the COMPLETE updated answer (everything you did in total, not just the fix) using attempt_completion.`;
111522
+ currentMessages.push({ role: "user", content: completionPromptMessage });
111523
+ completionResult = null;
111524
+ completionAttempted = false;
111525
+ const completionMaxIterations = 5;
111526
+ const completionStreamOptions = {
111527
+ model: this.provider ? this.provider(this.model) : this.model,
111528
+ messages: this.prepareMessagesWithImages(currentMessages),
111529
+ tools: tools2,
111530
+ stopWhen: (0, import_ai6.stepCountIs)(completionMaxIterations),
111531
+ maxTokens: maxResponseTokens,
111532
+ temperature: 0.3,
111533
+ onStepFinish: ({ toolResults, text, finishReason, usage }) => {
111534
+ if (usage) {
111535
+ this.tokenCounter.recordUsage(usage);
111536
+ }
111537
+ if (options.onStream && text) {
111538
+ options.onStream(text);
111539
+ }
111540
+ if (this.debug) {
111541
+ console.log(`[DEBUG] Completion prompt step finished (reason: ${finishReason}, tools: ${toolResults?.length || 0})`);
111542
+ }
111543
+ }
111544
+ };
111545
+ const providerOpts = this._buildThinkingProviderOptions(maxResponseTokens);
111546
+ if (providerOpts) {
111547
+ completionStreamOptions.providerOptions = providerOpts;
111548
+ }
111549
+ const cpResult = await this.streamTextWithRetryAndFallback(completionStreamOptions);
111550
+ const cpFinalText = await cpResult.text;
111551
+ const cpUsage = await cpResult.usage;
111552
+ if (cpUsage) {
111553
+ this.tokenCounter.recordUsage(cpUsage, cpResult.experimental_providerMetadata);
111554
+ }
111555
+ const cpMessages = await cpResult.response?.messages;
111556
+ if (cpMessages) {
111557
+ for (const msg of cpMessages) {
111558
+ currentMessages.push(msg);
111559
+ }
111560
+ }
111561
+ if (completionResult) {
111562
+ finalResult = completionResult;
111563
+ completionAttempted = true;
111564
+ } else if (cpFinalText && cpFinalText.trim().length > 0) {
111565
+ finalResult = cpFinalText;
111566
+ completionAttempted = true;
111567
+ } else {
111568
+ finalResult = originalResult;
111569
+ completionAttempted = true;
111570
+ if (this.debug) {
111571
+ console.log("[DEBUG] Completion prompt returned empty result, keeping original.");
111572
+ }
111523
111573
  }
111524
- this._extractedRawBlocks = savedExtractedBlocks;
111525
- finalResult = completionResult2;
111526
111574
  if (this.debug) {
111527
- console.log(`[DEBUG] Completion prompt finished. New result length: ${finalResult?.length || 0}`);
111575
+ console.log(`[DEBUG] Completion prompt finished. Final result length: ${finalResult?.length || 0}`);
111528
111576
  }
111529
111577
  if (this.tracer) {
111530
111578
  this.tracer.recordEvent("completion_prompt.completed", {
111531
- "completion_prompt.final_result_length": finalResult?.length || 0
111579
+ "completion_prompt.final_result_length": finalResult?.length || 0,
111580
+ "completion_prompt.used_original": finalResult === originalResult
111532
111581
  });
111533
111582
  }
111534
111583
  } catch (error2) {
package/cjs/index.cjs CHANGED
@@ -107433,7 +107433,13 @@ var init_ProbeAgent = __esm({
107433
107433
  if (this.mcpBridge && !options._disableTools) {
107434
107434
  const mcpTools = this.mcpBridge.getVercelTools(this._filterMcpTools(this.mcpBridge.getToolNames()));
107435
107435
  for (const [name14, mcpTool] of Object.entries(mcpTools)) {
107436
- nativeTools[name14] = mcpTool;
107436
+ const mcpSchema = mcpTool.inputSchema || mcpTool.parameters;
107437
+ const wrappedSchema = mcpSchema && mcpSchema._def ? mcpSchema : (0, import_ai4.jsonSchema)(mcpSchema || { type: "object", properties: {} });
107438
+ nativeTools[name14] = (0, import_ai4.tool)({
107439
+ description: mcpTool.description || `MCP tool: ${name14}`,
107440
+ inputSchema: wrappedSchema,
107441
+ execute: mcpTool.execute
107442
+ });
107437
107443
  }
107438
107444
  }
107439
107445
  if (this.apiType === "google" && this._geminiToolsEnabled && !options._disableTools) {
@@ -108791,9 +108797,10 @@ You are working with a workspace. Available paths: ${workspaceDesc}
108791
108797
  }
108792
108798
  if (completionAttempted && this.completionPrompt && !options._completionPromptProcessed) {
108793
108799
  if (this.debug) {
108794
- console.log("[DEBUG] Running completion prompt for post-completion validation/review...");
108800
+ console.log("[DEBUG] Running completion prompt as continuation of current session...");
108795
108801
  }
108796
108802
  try {
108803
+ const originalResult = finalResult;
108797
108804
  if (this.tracer) {
108798
108805
  this.tracer.recordEvent("completion_prompt.started", {
108799
108806
  "completion_prompt.original_result_length": finalResult?.length || 0
@@ -108806,24 +108813,66 @@ Here is the result to review:
108806
108813
  ${finalResult}
108807
108814
  </result>
108808
108815
 
108809
- After reviewing, provide your final answer using attempt_completion.`;
108810
- const savedOutputItems = this._outputBuffer ? [...this._outputBuffer.items] : [];
108811
- const savedExtractedBlocks = this._extractedRawBlocks ? [...this._extractedRawBlocks] : [];
108812
- const completionResult2 = await this.answer(completionPromptMessage, [], {
108813
- ...options,
108814
- _completionPromptProcessed: true
108815
- });
108816
- if (this._outputBuffer) {
108817
- this._outputBuffer.items = savedOutputItems;
108816
+ Double-check your response based on the criteria above. If everything looks good, respond with your previous answer exactly as-is using attempt_completion. If something needs to be fixed or is missing, do it now, then respond with the COMPLETE updated answer (everything you did in total, not just the fix) using attempt_completion.`;
108817
+ currentMessages.push({ role: "user", content: completionPromptMessage });
108818
+ completionResult = null;
108819
+ completionAttempted = false;
108820
+ const completionMaxIterations = 5;
108821
+ const completionStreamOptions = {
108822
+ model: this.provider ? this.provider(this.model) : this.model,
108823
+ messages: this.prepareMessagesWithImages(currentMessages),
108824
+ tools: tools2,
108825
+ stopWhen: (0, import_ai4.stepCountIs)(completionMaxIterations),
108826
+ maxTokens: maxResponseTokens,
108827
+ temperature: 0.3,
108828
+ onStepFinish: ({ toolResults, text, finishReason, usage }) => {
108829
+ if (usage) {
108830
+ this.tokenCounter.recordUsage(usage);
108831
+ }
108832
+ if (options.onStream && text) {
108833
+ options.onStream(text);
108834
+ }
108835
+ if (this.debug) {
108836
+ console.log(`[DEBUG] Completion prompt step finished (reason: ${finishReason}, tools: ${toolResults?.length || 0})`);
108837
+ }
108838
+ }
108839
+ };
108840
+ const providerOpts = this._buildThinkingProviderOptions(maxResponseTokens);
108841
+ if (providerOpts) {
108842
+ completionStreamOptions.providerOptions = providerOpts;
108843
+ }
108844
+ const cpResult = await this.streamTextWithRetryAndFallback(completionStreamOptions);
108845
+ const cpFinalText = await cpResult.text;
108846
+ const cpUsage = await cpResult.usage;
108847
+ if (cpUsage) {
108848
+ this.tokenCounter.recordUsage(cpUsage, cpResult.experimental_providerMetadata);
108849
+ }
108850
+ const cpMessages = await cpResult.response?.messages;
108851
+ if (cpMessages) {
108852
+ for (const msg of cpMessages) {
108853
+ currentMessages.push(msg);
108854
+ }
108855
+ }
108856
+ if (completionResult) {
108857
+ finalResult = completionResult;
108858
+ completionAttempted = true;
108859
+ } else if (cpFinalText && cpFinalText.trim().length > 0) {
108860
+ finalResult = cpFinalText;
108861
+ completionAttempted = true;
108862
+ } else {
108863
+ finalResult = originalResult;
108864
+ completionAttempted = true;
108865
+ if (this.debug) {
108866
+ console.log("[DEBUG] Completion prompt returned empty result, keeping original.");
108867
+ }
108818
108868
  }
108819
- this._extractedRawBlocks = savedExtractedBlocks;
108820
- finalResult = completionResult2;
108821
108869
  if (this.debug) {
108822
- console.log(`[DEBUG] Completion prompt finished. New result length: ${finalResult?.length || 0}`);
108870
+ console.log(`[DEBUG] Completion prompt finished. Final result length: ${finalResult?.length || 0}`);
108823
108871
  }
108824
108872
  if (this.tracer) {
108825
108873
  this.tracer.recordEvent("completion_prompt.completed", {
108826
- "completion_prompt.final_result_length": finalResult?.length || 0
108874
+ "completion_prompt.final_result_length": finalResult?.length || 0,
108875
+ "completion_prompt.used_original": finalResult === originalResult
108827
108876
  });
108828
108877
  }
108829
108878
  } catch (error2) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@probelabs/probe",
3
- "version": "0.6.0-rc268",
3
+ "version": "0.6.0-rc270",
4
4
  "description": "Node.js wrapper for the probe code search tool",
5
5
  "main": "src/index.js",
6
6
  "module": "src/index.js",
@@ -1879,7 +1879,15 @@ export class ProbeAgent {
1879
1879
  if (this.mcpBridge && !options._disableTools) {
1880
1880
  const mcpTools = this.mcpBridge.getVercelTools(this._filterMcpTools(this.mcpBridge.getToolNames()));
1881
1881
  for (const [name, mcpTool] of Object.entries(mcpTools)) {
1882
- nativeTools[name] = mcpTool;
1882
+ // MCP tools have raw JSON Schema inputSchema that must be wrapped with jsonSchema()
1883
+ // for the Vercel AI SDK. Without wrapping, asSchema() misidentifies them as Zod schemas.
1884
+ const mcpSchema = mcpTool.inputSchema || mcpTool.parameters;
1885
+ const wrappedSchema = mcpSchema && mcpSchema._def ? mcpSchema : jsonSchema(mcpSchema || { type: 'object', properties: {} });
1886
+ nativeTools[name] = tool({
1887
+ description: mcpTool.description || `MCP tool: ${name}`,
1888
+ inputSchema: wrappedSchema,
1889
+ execute: mcpTool.execute,
1890
+ });
1883
1891
  }
1884
1892
  }
1885
1893
 
@@ -3574,23 +3582,24 @@ Follow these instructions carefully:
3574
3582
  // Continue even if storage fails
3575
3583
  }
3576
3584
 
3577
- // Completion prompt handling - run a follow-up prompt after attempt_completion for validation/review
3578
- // This runs BEFORE mermaid validation and JSON schema validation
3579
- // Skip if we're already in a completion prompt follow-up call or if no completion prompt is configured
3585
+ // Completion prompt handling - inject one more user message into the existing conversation
3586
+ // This continues the SAME agentic session (same tools, same TaskManager, same history)
3587
+ // rather than spawning a recursive this.answer() call which would reset state
3580
3588
  if (completionAttempted && this.completionPrompt && !options._completionPromptProcessed) {
3581
3589
  if (this.debug) {
3582
- console.log('[DEBUG] Running completion prompt for post-completion validation/review...');
3590
+ console.log('[DEBUG] Running completion prompt as continuation of current session...');
3583
3591
  }
3584
3592
 
3585
3593
  try {
3586
- // Record completion prompt start in telemetry
3594
+ const originalResult = finalResult;
3595
+
3587
3596
  if (this.tracer) {
3588
3597
  this.tracer.recordEvent('completion_prompt.started', {
3589
3598
  'completion_prompt.original_result_length': finalResult?.length || 0
3590
3599
  });
3591
3600
  }
3592
3601
 
3593
- // Create the completion prompt with the current result as context
3602
+ // Append completion prompt as a user message to the existing conversation
3594
3603
  const completionPromptMessage = `${this.completionPrompt}
3595
3604
 
3596
3605
  Here is the result to review:
@@ -3598,34 +3607,82 @@ Here is the result to review:
3598
3607
  ${finalResult}
3599
3608
  </result>
3600
3609
 
3601
- After reviewing, provide your final answer using attempt_completion.`;
3610
+ Double-check your response based on the criteria above. If everything looks good, respond with your previous answer exactly as-is using attempt_completion. If something needs to be fixed or is missing, do it now, then respond with the COMPLETE updated answer (everything you did in total, not just the fix) using attempt_completion.`;
3602
3611
 
3603
- // Make a follow-up call with the completion prompt
3604
- // Pass _completionPromptProcessed to prevent infinite loops
3605
- // Save output buffers the recursive answer() must not destroy DSL output() content
3606
- const savedOutputItems = this._outputBuffer ? [...this._outputBuffer.items] : [];
3607
- const savedExtractedBlocks = this._extractedRawBlocks ? [...this._extractedRawBlocks] : [];
3608
- const completionResult = await this.answer(completionPromptMessage, [], {
3609
- ...options,
3610
- _completionPromptProcessed: true
3611
- });
3612
- // Restore output buffers so the parent call can append them to the final result
3613
- if (this._outputBuffer) {
3614
- this._outputBuffer.items = savedOutputItems;
3612
+ currentMessages.push({ role: 'user', content: completionPromptMessage });
3613
+
3614
+ // Reset completion tracking for the follow-up turn
3615
+ completionResult = null;
3616
+ completionAttempted = false;
3617
+
3618
+ // Run one more streamText pass with the same tools and conversation context
3619
+ // Give a small number of extra iterations for the follow-up
3620
+ const completionMaxIterations = 5;
3621
+ const completionStreamOptions = {
3622
+ model: this.provider ? this.provider(this.model) : this.model,
3623
+ messages: this.prepareMessagesWithImages(currentMessages),
3624
+ tools,
3625
+ stopWhen: stepCountIs(completionMaxIterations),
3626
+ maxTokens: maxResponseTokens,
3627
+ temperature: 0.3,
3628
+ onStepFinish: ({ toolResults, text, finishReason, usage }) => {
3629
+ if (usage) {
3630
+ this.tokenCounter.recordUsage(usage);
3631
+ }
3632
+ if (options.onStream && text) {
3633
+ options.onStream(text);
3634
+ }
3635
+ if (this.debug) {
3636
+ console.log(`[DEBUG] Completion prompt step finished (reason: ${finishReason}, tools: ${toolResults?.length || 0})`);
3637
+ }
3638
+ }
3639
+ };
3640
+
3641
+ const providerOpts = this._buildThinkingProviderOptions(maxResponseTokens);
3642
+ if (providerOpts) {
3643
+ completionStreamOptions.providerOptions = providerOpts;
3644
+ }
3645
+
3646
+ const cpResult = await this.streamTextWithRetryAndFallback(completionStreamOptions);
3647
+ const cpFinalText = await cpResult.text;
3648
+ const cpUsage = await cpResult.usage;
3649
+ if (cpUsage) {
3650
+ this.tokenCounter.recordUsage(cpUsage, cpResult.experimental_providerMetadata);
3651
+ }
3652
+
3653
+ // Append follow-up messages to conversation history
3654
+ const cpMessages = await cpResult.response?.messages;
3655
+ if (cpMessages) {
3656
+ for (const msg of cpMessages) {
3657
+ currentMessages.push(msg);
3658
+ }
3615
3659
  }
3616
- this._extractedRawBlocks = savedExtractedBlocks;
3617
3660
 
3618
- // Update finalResult with the result from the completion prompt
3619
- finalResult = completionResult;
3661
+ // Use new completion result if the agent called attempt_completion again,
3662
+ // otherwise keep the original result (the follow-up may have just done side-effects)
3663
+ if (completionResult) {
3664
+ finalResult = completionResult;
3665
+ completionAttempted = true;
3666
+ } else if (cpFinalText && cpFinalText.trim().length > 0) {
3667
+ finalResult = cpFinalText;
3668
+ completionAttempted = true;
3669
+ } else {
3670
+ // Follow-up produced nothing useful — keep the original
3671
+ finalResult = originalResult;
3672
+ completionAttempted = true;
3673
+ if (this.debug) {
3674
+ console.log('[DEBUG] Completion prompt returned empty result, keeping original.');
3675
+ }
3676
+ }
3620
3677
 
3621
3678
  if (this.debug) {
3622
- console.log(`[DEBUG] Completion prompt finished. New result length: ${finalResult?.length || 0}`);
3679
+ console.log(`[DEBUG] Completion prompt finished. Final result length: ${finalResult?.length || 0}`);
3623
3680
  }
3624
3681
 
3625
- // Record completion prompt completion in telemetry
3626
3682
  if (this.tracer) {
3627
3683
  this.tracer.recordEvent('completion_prompt.completed', {
3628
- 'completion_prompt.final_result_length': finalResult?.length || 0
3684
+ 'completion_prompt.final_result_length': finalResult?.length || 0,
3685
+ 'completion_prompt.used_original': finalResult === originalResult
3629
3686
  });
3630
3687
  }
3631
3688
  } catch (error) {