@mastra/agent-builder 0.0.0-scorer-agentnames-conditional-20250926065249 → 0.0.0-sidebar-window-undefined-fix-20251029233656

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- import { Agent } from '@mastra/core/agent';
1
+ import { Agent, tryGenerateWithJsonFallback, tryStreamWithJsonFallback } from '@mastra/core/agent';
2
2
  import { Memory } from '@mastra/memory';
3
3
  import { TokenLimiter } from '@mastra/memory/processors';
4
4
  import { exec as exec$1, execFile as execFile$1, spawn as spawn$1 } from 'child_process';
@@ -10,7 +10,7 @@ import { z } from 'zod';
10
10
  import { existsSync, readFileSync } from 'fs';
11
11
  import { createRequire } from 'module';
12
12
  import { promisify } from 'util';
13
- import { openai as openai$1 } from '@ai-sdk/openai-v5';
13
+ import { ModelRouterLanguageModel } from '@mastra/core/llm';
14
14
  import { MemoryProcessor } from '@mastra/core/memory';
15
15
  import { tmpdir } from 'os';
16
16
  import { openai } from '@ai-sdk/openai';
@@ -658,31 +658,9 @@ var createModelInstance = async (provider, modelId, version = "v2") => {
658
658
  const { google } = await import('@ai-sdk/google');
659
659
  return google(modelId);
660
660
  }
661
- },
662
- v2: {
663
- openai: async () => {
664
- const { openai: openai2 } = await import('@ai-sdk/openai-v5');
665
- return openai2(modelId);
666
- },
667
- anthropic: async () => {
668
- const { anthropic } = await import('@ai-sdk/anthropic-v5');
669
- return anthropic(modelId);
670
- },
671
- groq: async () => {
672
- const { groq } = await import('@ai-sdk/groq-v5');
673
- return groq(modelId);
674
- },
675
- xai: async () => {
676
- const { xai } = await import('@ai-sdk/xai-v5');
677
- return xai(modelId);
678
- },
679
- google: async () => {
680
- const { google } = await import('@ai-sdk/google-v5');
681
- return google(modelId);
682
- }
683
661
  }
684
662
  };
685
- const providerFn = providerMap[version][provider];
663
+ const providerFn = version === `v1` ? providerMap[version][provider] : () => new ModelRouterLanguageModel(`${provider}/${modelId}`);
686
664
  if (!providerFn) {
687
665
  console.error(`Unsupported provider: ${provider}`);
688
666
  return null;
@@ -697,7 +675,7 @@ var createModelInstance = async (provider, modelId, version = "v2") => {
697
675
  };
698
676
  var resolveModel = async ({
699
677
  runtimeContext,
700
- defaultModel = openai$1("gpt-4.1"),
678
+ defaultModel = "openai/gpt-4.1",
701
679
  projectPath
702
680
  }) => {
703
681
  const modelFromContext = runtimeContext.get("model");
@@ -721,7 +699,7 @@ var resolveModel = async ({
721
699
  }
722
700
  }
723
701
  console.info("Using default model");
724
- return defaultModel;
702
+ return typeof defaultModel === `string` ? new ModelRouterLanguageModel(defaultModel) : defaultModel;
725
703
  };
726
704
 
727
705
  // src/defaults.ts
@@ -890,7 +868,7 @@ You have access to an enhanced set of tools based on production coding agent pat
890
868
  ### Task Management
891
869
  - **taskManager**: Create and track multi-step coding tasks with states (pending, in_progress, completed, blocked). Use this for complex projects that require systematic progress tracking.
892
870
 
893
- ### Code Discovery & Analysis
871
+ ### Code Discovery & Analysis
894
872
  - **codeAnalyzer**: Analyze codebase structure, discover definitions (functions, classes, interfaces), map dependencies, and understand architectural patterns.
895
873
  - **smartSearch**: Intelligent search with context awareness, pattern matching, and relevance scoring.
896
874
 
@@ -1100,7 +1078,7 @@ export const mastra = new Mastra({
1100
1078
  workflows: { weatherWorkflow },
1101
1079
  agents: { weatherAgent },
1102
1080
  storage: new LibSQLStore({
1103
- // stores telemetry, evals, ... into memory storage, if it needs to persist, change to file:../mastra.db
1081
+ // stores observability, evals, ... into memory storage, if it needs to persist, change to file:../mastra.db
1104
1082
  url: ":memory:",
1105
1083
  }),
1106
1084
  logger: new PinoLogger({
@@ -3234,7 +3212,7 @@ ${config.instructions}` : "";
3234
3212
  * Enhanced generate method with AgentBuilder-specific configuration
3235
3213
  * Overrides the base Agent generate method to provide additional project context
3236
3214
  */
3237
- generate = async (messages, generateOptions = {}) => {
3215
+ generateLegacy = async (messages, generateOptions = {}) => {
3238
3216
  const { maxSteps, ...baseOptions } = generateOptions;
3239
3217
  const originalInstructions = await this.getInstructions({ runtimeContext: generateOptions?.runtimeContext });
3240
3218
  const additionalInstructions = baseOptions.instructions;
@@ -3257,13 +3235,13 @@ ${additionalInstructions}`;
3257
3235
  this.logger.debug(`[AgentBuilder:${this.name}] Starting generation with enhanced context`, {
3258
3236
  projectPath: this.builderConfig.projectPath
3259
3237
  });
3260
- return super.generate(messages, enhancedOptions);
3238
+ return super.generateLegacy(messages, enhancedOptions);
3261
3239
  };
3262
3240
  /**
3263
3241
  * Enhanced stream method with AgentBuilder-specific configuration
3264
3242
  * Overrides the base Agent stream method to provide additional project context
3265
3243
  */
3266
- stream = async (messages, streamOptions = {}) => {
3244
+ streamLegacy = async (messages, streamOptions = {}) => {
3267
3245
  const { maxSteps, ...baseOptions } = streamOptions;
3268
3246
  const originalInstructions = await this.getInstructions({ runtimeContext: streamOptions?.runtimeContext });
3269
3247
  const additionalInstructions = baseOptions.instructions;
@@ -3286,13 +3264,13 @@ ${additionalInstructions}`;
3286
3264
  this.logger.debug(`[AgentBuilder:${this.name}] Starting streaming with enhanced context`, {
3287
3265
  projectPath: this.builderConfig.projectPath
3288
3266
  });
3289
- return super.stream(messages, enhancedOptions);
3267
+ return super.streamLegacy(messages, enhancedOptions);
3290
3268
  };
3291
3269
  /**
3292
3270
  * Enhanced stream method with AgentBuilder-specific configuration
3293
3271
  * Overrides the base Agent stream method to provide additional project context
3294
3272
  */
3295
- async streamVNext(messages, streamOptions) {
3273
+ async stream(messages, streamOptions) {
3296
3274
  const { ...baseOptions } = streamOptions || {};
3297
3275
  const originalInstructions = await this.getInstructions({ runtimeContext: streamOptions?.runtimeContext });
3298
3276
  const additionalInstructions = baseOptions.instructions;
@@ -3314,9 +3292,9 @@ ${additionalInstructions}`;
3314
3292
  this.logger.debug(`[AgentBuilder:${this.name}] Starting streaming with enhanced context`, {
3315
3293
  projectPath: this.builderConfig.projectPath
3316
3294
  });
3317
- return super.streamVNext(messages, enhancedOptions);
3295
+ return super.stream(messages, enhancedOptions);
3318
3296
  }
3319
- async generateVNext(messages, options) {
3297
+ async generate(messages, options) {
3320
3298
  const { ...baseOptions } = options || {};
3321
3299
  const originalInstructions = await this.getInstructions({ runtimeContext: options?.runtimeContext });
3322
3300
  const additionalInstructions = baseOptions.instructions;
@@ -3338,7 +3316,7 @@ ${additionalInstructions}`;
3338
3316
  this.logger.debug(`[AgentBuilder:${this.name}] Starting streaming with enhanced context`, {
3339
3317
  projectPath: this.builderConfig.projectPath
3340
3318
  });
3341
- return super.generateVNext(messages, enhancedOptions);
3319
+ return super.generate(messages, enhancedOptions);
3342
3320
  }
3343
3321
  };
3344
3322
  var cloneTemplateStep = createStep({
@@ -3491,10 +3469,12 @@ Return the actual exported names of the units, as well as the file names.`,
3491
3469
  networks: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3492
3470
  other: z.array(z.object({ name: z.string(), file: z.string() })).optional()
3493
3471
  });
3494
- const result = isV2 ? await agent.generateVNext(prompt, {
3495
- output,
3472
+ const result = isV2 ? await tryGenerateWithJsonFallback(agent, prompt, {
3473
+ structuredOutput: {
3474
+ schema: output
3475
+ },
3496
3476
  maxSteps: 100
3497
- }) : await agent.generate(prompt, {
3477
+ }) : await agent.generateLegacy(prompt, {
3498
3478
  experimental_output: output,
3499
3479
  maxSteps: 100
3500
3480
  });
@@ -4240,7 +4220,7 @@ For each task:
4240
4220
  Start by listing your tasks and work through them systematically!
4241
4221
  `;
4242
4222
  const isV2 = model.specificationVersion === "v2";
4243
- const result = isV2 ? await agentBuilder.streamVNext(prompt) : await agentBuilder.stream(prompt);
4223
+ const result = isV2 ? await agentBuilder.stream(prompt) : await agentBuilder.streamLegacy(prompt);
4244
4224
  const actualResolutions = [];
4245
4225
  for await (const chunk of result.fullStream) {
4246
4226
  if (chunk.type === "step-finish" || chunk.type === "step-start") {
@@ -4482,9 +4462,11 @@ Start by running validateCode with all validation types to get a complete pictur
4482
4462
  Previous iterations may have fixed some issues, so start by re-running validateCode to see the current state, then fix any remaining issues.`;
4483
4463
  const isV2 = model.specificationVersion === "v2";
4484
4464
  const output = z.object({ success: z.boolean() });
4485
- const result = isV2 ? await validationAgent.streamVNext(iterationPrompt, {
4486
- output
4487
- }) : await validationAgent.stream(iterationPrompt, {
4465
+ const result = isV2 ? await tryStreamWithJsonFallback(validationAgent, iterationPrompt, {
4466
+ structuredOutput: {
4467
+ schema: output
4468
+ }
4469
+ }) : await validationAgent.streamLegacy(iterationPrompt, {
4488
4470
  experimental_output: output
4489
4471
  });
4490
4472
  let iterationErrors = 0;
@@ -5111,8 +5093,10 @@ var planningIterationStep = createStep({
5111
5093
  projectStructure,
5112
5094
  research
5113
5095
  });
5114
- const result = await planningAgent.generateVNext(planningPrompt, {
5115
- output: PlanningAgentOutputSchema
5096
+ const result = await planningAgent.generate(planningPrompt, {
5097
+ structuredOutput: {
5098
+ schema: PlanningAgentOutputSchema
5099
+ }
5116
5100
  // maxSteps: 15,
5117
5101
  });
5118
5102
  const planResult = await result.object;
@@ -5821,8 +5805,10 @@ var workflowResearchStep = createStep({
5821
5805
  dependencies: inputData.dependencies,
5822
5806
  hasWorkflowsDir: inputData.structure.hasWorkflowsDir
5823
5807
  });
5824
- const result = await researchAgent.generateVNext(researchPrompt, {
5825
- output: WorkflowResearchResultSchema
5808
+ const result = await researchAgent.generate(researchPrompt, {
5809
+ structuredOutput: {
5810
+ schema: WorkflowResearchResultSchema
5811
+ }
5826
5812
  // stopWhen: stepCountIs(10),
5827
5813
  });
5828
5814
  const researchResult = await result.object;
@@ -5934,17 +5920,10 @@ ${workflowBuilderPrompts.validation.instructions}`
5934
5920
  resumeData
5935
5921
  });
5936
5922
  const originalInstructions = await executionAgent.getInstructions({ runtimeContext });
5937
- const additionalInstructions = executionAgent.instructions;
5938
- let enhancedInstructions = originalInstructions;
5939
- if (additionalInstructions) {
5940
- enhancedInstructions = `${originalInstructions}
5941
-
5942
- ${additionalInstructions}`;
5943
- }
5944
5923
  const enhancedOptions = {
5945
5924
  stopWhen: stepCountIs(100),
5946
5925
  temperature: 0.3,
5947
- instructions: enhancedInstructions
5926
+ instructions: originalInstructions
5948
5927
  };
5949
5928
  let finalResult = null;
5950
5929
  let allTasksCompleted = false;
@@ -5973,7 +5952,7 @@ ${additionalInstructions}`;
5973
5952
  })}
5974
5953
 
5975
5954
  ${workflowBuilderPrompts.validation.instructions}`;
5976
- const stream = await executionAgent.streamVNext(iterationPrompt, {
5955
+ const stream = await executionAgent.stream(iterationPrompt, {
5977
5956
  structuredOutput: {
5978
5957
  schema: TaskExecutionIterationInputSchema(tasks.length),
5979
5958
  model