@mastra/agent-builder 0.0.0-roamin-openaivoice-speak-options-passing-20250926163614 → 0.0.0-safe-stringify-telemetry-20251205024938

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- import { Agent } from '@mastra/core/agent';
1
+ import { Agent, tryGenerateWithJsonFallback, tryStreamWithJsonFallback } from '@mastra/core/agent';
2
2
  import { Memory } from '@mastra/memory';
3
3
  import { TokenLimiter } from '@mastra/memory/processors';
4
4
  import { exec as exec$1, execFile as execFile$1, spawn as spawn$1 } from 'child_process';
@@ -10,7 +10,7 @@ import { z } from 'zod';
10
10
  import { existsSync, readFileSync } from 'fs';
11
11
  import { createRequire } from 'module';
12
12
  import { promisify } from 'util';
13
- import { openai as openai$1 } from '@ai-sdk/openai-v5';
13
+ import { ModelRouterLanguageModel } from '@mastra/core/llm';
14
14
  import { MemoryProcessor } from '@mastra/core/memory';
15
15
  import { tmpdir } from 'os';
16
16
  import { openai } from '@ai-sdk/openai';
@@ -658,31 +658,9 @@ var createModelInstance = async (provider, modelId, version = "v2") => {
658
658
  const { google } = await import('@ai-sdk/google');
659
659
  return google(modelId);
660
660
  }
661
- },
662
- v2: {
663
- openai: async () => {
664
- const { openai: openai2 } = await import('@ai-sdk/openai-v5');
665
- return openai2(modelId);
666
- },
667
- anthropic: async () => {
668
- const { anthropic } = await import('@ai-sdk/anthropic-v5');
669
- return anthropic(modelId);
670
- },
671
- groq: async () => {
672
- const { groq } = await import('@ai-sdk/groq-v5');
673
- return groq(modelId);
674
- },
675
- xai: async () => {
676
- const { xai } = await import('@ai-sdk/xai-v5');
677
- return xai(modelId);
678
- },
679
- google: async () => {
680
- const { google } = await import('@ai-sdk/google-v5');
681
- return google(modelId);
682
- }
683
661
  }
684
662
  };
685
- const providerFn = providerMap[version][provider];
663
+ const providerFn = version === `v1` ? providerMap[version][provider] : () => new ModelRouterLanguageModel(`${provider}/${modelId}`);
686
664
  if (!providerFn) {
687
665
  console.error(`Unsupported provider: ${provider}`);
688
666
  return null;
@@ -697,7 +675,7 @@ var createModelInstance = async (provider, modelId, version = "v2") => {
697
675
  };
698
676
  var resolveModel = async ({
699
677
  runtimeContext,
700
- defaultModel = openai$1("gpt-4.1"),
678
+ defaultModel = "openai/gpt-4.1",
701
679
  projectPath
702
680
  }) => {
703
681
  const modelFromContext = runtimeContext.get("model");
@@ -721,7 +699,7 @@ var resolveModel = async ({
721
699
  }
722
700
  }
723
701
  console.info("Using default model");
724
- return defaultModel;
702
+ return typeof defaultModel === `string` ? new ModelRouterLanguageModel(defaultModel) : defaultModel;
725
703
  };
726
704
 
727
705
  // src/defaults.ts
@@ -3234,7 +3212,7 @@ ${config.instructions}` : "";
3234
3212
  * Enhanced generate method with AgentBuilder-specific configuration
3235
3213
  * Overrides the base Agent generate method to provide additional project context
3236
3214
  */
3237
- generate = async (messages, generateOptions = {}) => {
3215
+ generateLegacy = async (messages, generateOptions = {}) => {
3238
3216
  const { maxSteps, ...baseOptions } = generateOptions;
3239
3217
  const originalInstructions = await this.getInstructions({ runtimeContext: generateOptions?.runtimeContext });
3240
3218
  const additionalInstructions = baseOptions.instructions;
@@ -3257,13 +3235,13 @@ ${additionalInstructions}`;
3257
3235
  this.logger.debug(`[AgentBuilder:${this.name}] Starting generation with enhanced context`, {
3258
3236
  projectPath: this.builderConfig.projectPath
3259
3237
  });
3260
- return super.generate(messages, enhancedOptions);
3238
+ return super.generateLegacy(messages, enhancedOptions);
3261
3239
  };
3262
3240
  /**
3263
3241
  * Enhanced stream method with AgentBuilder-specific configuration
3264
3242
  * Overrides the base Agent stream method to provide additional project context
3265
3243
  */
3266
- stream = async (messages, streamOptions = {}) => {
3244
+ streamLegacy = async (messages, streamOptions = {}) => {
3267
3245
  const { maxSteps, ...baseOptions } = streamOptions;
3268
3246
  const originalInstructions = await this.getInstructions({ runtimeContext: streamOptions?.runtimeContext });
3269
3247
  const additionalInstructions = baseOptions.instructions;
@@ -3286,13 +3264,13 @@ ${additionalInstructions}`;
3286
3264
  this.logger.debug(`[AgentBuilder:${this.name}] Starting streaming with enhanced context`, {
3287
3265
  projectPath: this.builderConfig.projectPath
3288
3266
  });
3289
- return super.stream(messages, enhancedOptions);
3267
+ return super.streamLegacy(messages, enhancedOptions);
3290
3268
  };
3291
3269
  /**
3292
3270
  * Enhanced stream method with AgentBuilder-specific configuration
3293
3271
  * Overrides the base Agent stream method to provide additional project context
3294
3272
  */
3295
- async streamVNext(messages, streamOptions) {
3273
+ async stream(messages, streamOptions) {
3296
3274
  const { ...baseOptions } = streamOptions || {};
3297
3275
  const originalInstructions = await this.getInstructions({ runtimeContext: streamOptions?.runtimeContext });
3298
3276
  const additionalInstructions = baseOptions.instructions;
@@ -3314,9 +3292,9 @@ ${additionalInstructions}`;
3314
3292
  this.logger.debug(`[AgentBuilder:${this.name}] Starting streaming with enhanced context`, {
3315
3293
  projectPath: this.builderConfig.projectPath
3316
3294
  });
3317
- return super.streamVNext(messages, enhancedOptions);
3295
+ return super.stream(messages, enhancedOptions);
3318
3296
  }
3319
- async generateVNext(messages, options) {
3297
+ async generate(messages, options) {
3320
3298
  const { ...baseOptions } = options || {};
3321
3299
  const originalInstructions = await this.getInstructions({ runtimeContext: options?.runtimeContext });
3322
3300
  const additionalInstructions = baseOptions.instructions;
@@ -3338,7 +3316,7 @@ ${additionalInstructions}`;
3338
3316
  this.logger.debug(`[AgentBuilder:${this.name}] Starting streaming with enhanced context`, {
3339
3317
  projectPath: this.builderConfig.projectPath
3340
3318
  });
3341
- return super.generateVNext(messages, enhancedOptions);
3319
+ return super.generate(messages, enhancedOptions);
3342
3320
  }
3343
3321
  };
3344
3322
  var cloneTemplateStep = createStep({
@@ -3491,10 +3469,12 @@ Return the actual exported names of the units, as well as the file names.`,
3491
3469
  networks: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3492
3470
  other: z.array(z.object({ name: z.string(), file: z.string() })).optional()
3493
3471
  });
3494
- const result = isV2 ? await agent.generateVNext(prompt, {
3495
- output,
3472
+ const result = isV2 ? await tryGenerateWithJsonFallback(agent, prompt, {
3473
+ structuredOutput: {
3474
+ schema: output
3475
+ },
3496
3476
  maxSteps: 100
3497
- }) : await agent.generate(prompt, {
3477
+ }) : await agent.generateLegacy(prompt, {
3498
3478
  experimental_output: output,
3499
3479
  maxSteps: 100
3500
3480
  });
@@ -4240,7 +4220,7 @@ For each task:
4240
4220
  Start by listing your tasks and work through them systematically!
4241
4221
  `;
4242
4222
  const isV2 = model.specificationVersion === "v2";
4243
- const result = isV2 ? await agentBuilder.streamVNext(prompt) : await agentBuilder.stream(prompt);
4223
+ const result = isV2 ? await agentBuilder.stream(prompt) : await agentBuilder.streamLegacy(prompt);
4244
4224
  const actualResolutions = [];
4245
4225
  for await (const chunk of result.fullStream) {
4246
4226
  if (chunk.type === "step-finish" || chunk.type === "step-start") {
@@ -4482,9 +4462,11 @@ Start by running validateCode with all validation types to get a complete pictur
4482
4462
  Previous iterations may have fixed some issues, so start by re-running validateCode to see the current state, then fix any remaining issues.`;
4483
4463
  const isV2 = model.specificationVersion === "v2";
4484
4464
  const output = z.object({ success: z.boolean() });
4485
- const result = isV2 ? await validationAgent.streamVNext(iterationPrompt, {
4486
- output
4487
- }) : await validationAgent.stream(iterationPrompt, {
4465
+ const result = isV2 ? await tryStreamWithJsonFallback(validationAgent, iterationPrompt, {
4466
+ structuredOutput: {
4467
+ schema: output
4468
+ }
4469
+ }) : await validationAgent.streamLegacy(iterationPrompt, {
4488
4470
  experimental_output: output
4489
4471
  });
4490
4472
  let iterationErrors = 0;
@@ -5111,7 +5093,7 @@ var planningIterationStep = createStep({
5111
5093
  projectStructure,
5112
5094
  research
5113
5095
  });
5114
- const result = await planningAgent.generateVNext(planningPrompt, {
5096
+ const result = await planningAgent.generate(planningPrompt, {
5115
5097
  output: PlanningAgentOutputSchema
5116
5098
  // maxSteps: 15,
5117
5099
  });
@@ -5821,7 +5803,7 @@ var workflowResearchStep = createStep({
5821
5803
  dependencies: inputData.dependencies,
5822
5804
  hasWorkflowsDir: inputData.structure.hasWorkflowsDir
5823
5805
  });
5824
- const result = await researchAgent.generateVNext(researchPrompt, {
5806
+ const result = await researchAgent.generate(researchPrompt, {
5825
5807
  output: WorkflowResearchResultSchema
5826
5808
  // stopWhen: stepCountIs(10),
5827
5809
  });
@@ -5973,7 +5955,7 @@ ${additionalInstructions}`;
5973
5955
  })}
5974
5956
 
5975
5957
  ${workflowBuilderPrompts.validation.instructions}`;
5976
- const stream = await executionAgent.streamVNext(iterationPrompt, {
5958
+ const stream = await executionAgent.stream(iterationPrompt, {
5977
5959
  structuredOutput: {
5978
5960
  schema: TaskExecutionIterationInputSchema(tasks.length),
5979
5961
  model