@mastra/core 0.24.1 → 0.24.2-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/agent/agent.d.ts +1 -1
- package/dist/agent/agent.d.ts.map +1 -1
- package/dist/agent/agent.types.d.ts +2 -2
- package/dist/agent/agent.types.d.ts.map +1 -1
- package/dist/agent/index.cjs +13 -13
- package/dist/agent/index.js +2 -2
- package/dist/agent/input-processor/index.cjs +6 -6
- package/dist/agent/input-processor/index.js +1 -1
- package/dist/agent/message-list/index.d.ts.map +1 -1
- package/dist/agent/message-list/prompt/convert-file.d.ts +1 -1
- package/dist/agent/message-list/prompt/convert-file.d.ts.map +1 -1
- package/dist/agent/types.d.ts +1 -0
- package/dist/agent/types.d.ts.map +1 -1
- package/dist/agent/utils.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/index.d.ts +2 -1
- package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/map-results-step.d.ts +3 -1
- package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts +2 -1
- package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts +2 -1
- package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
- package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -1
- package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
- package/dist/ai-tracing/index.cjs +36 -36
- package/dist/ai-tracing/index.js +1 -1
- package/dist/{chunk-EDVPIVAB.js → chunk-4OPPKDPO.js} +3 -3
- package/dist/{chunk-EDVPIVAB.js.map → chunk-4OPPKDPO.js.map} +1 -1
- package/dist/{chunk-BCFLCIAX.cjs → chunk-B6RCPK2E.cjs} +173 -15
- package/dist/chunk-B6RCPK2E.cjs.map +1 -0
- package/dist/{chunk-UBVIPDVD.cjs → chunk-BC5S7YUC.cjs} +4 -4
- package/dist/{chunk-UBVIPDVD.cjs.map → chunk-BC5S7YUC.cjs.map} +1 -1
- package/dist/{chunk-DNYWTNND.js → chunk-DBVX3GFJ.js} +27 -22
- package/dist/chunk-DBVX3GFJ.js.map +1 -0
- package/dist/{chunk-6OGG24PY.cjs → chunk-DE7YOOVP.cjs} +55 -52
- package/dist/chunk-DE7YOOVP.cjs.map +1 -0
- package/dist/{chunk-II37LKES.js → chunk-EFNEFT4U.js} +173 -15
- package/dist/chunk-EFNEFT4U.js.map +1 -0
- package/dist/{chunk-HKT62GIP.js → chunk-FKV5666D.js} +3 -3
- package/dist/{chunk-HKT62GIP.js.map → chunk-FKV5666D.js.map} +1 -1
- package/dist/{chunk-LILCMG3L.cjs → chunk-GCMRHVCZ.cjs} +8 -8
- package/dist/{chunk-LILCMG3L.cjs.map → chunk-GCMRHVCZ.cjs.map} +1 -1
- package/dist/{chunk-MXLKXD6Q.js → chunk-IZJTEPQQ.js} +4 -4
- package/dist/{chunk-MXLKXD6Q.js.map → chunk-IZJTEPQQ.js.map} +1 -1
- package/dist/{chunk-Q7VW22SF.js → chunk-J2YC7IYA.js} +5 -5
- package/dist/{chunk-Q7VW22SF.js.map → chunk-J2YC7IYA.js.map} +1 -1
- package/dist/{chunk-TVZB5572.cjs → chunk-JBYJAIFU.cjs} +12 -12
- package/dist/chunk-JBYJAIFU.cjs.map +1 -0
- package/dist/{chunk-Z3BWWT7E.js → chunk-K7MEUZ3O.js} +3 -3
- package/dist/{chunk-Z3BWWT7E.js.map → chunk-K7MEUZ3O.js.map} +1 -1
- package/dist/{chunk-O5PROZMW.cjs → chunk-LZFCR2SE.cjs} +4 -4
- package/dist/chunk-LZFCR2SE.cjs.map +1 -0
- package/dist/{chunk-TPYSVJ5T.cjs → chunk-NJKNZXAS.cjs} +12 -12
- package/dist/{chunk-TPYSVJ5T.cjs.map → chunk-NJKNZXAS.cjs.map} +1 -1
- package/dist/{chunk-V66IXQAJ.js → chunk-OLERJ2OU.js} +3 -3
- package/dist/{chunk-V66IXQAJ.js.map → chunk-OLERJ2OU.js.map} +1 -1
- package/dist/{chunk-EJPCH5WB.cjs → chunk-RHZQQQZ4.cjs} +28 -23
- package/dist/chunk-RHZQQQZ4.cjs.map +1 -0
- package/dist/{chunk-ZVAWVX5J.js → chunk-SHPLHQFH.js} +20 -17
- package/dist/chunk-SHPLHQFH.js.map +1 -0
- package/dist/{chunk-VBQZFQXS.cjs → chunk-UD7DS7OY.cjs} +4 -4
- package/dist/{chunk-VBQZFQXS.cjs.map → chunk-UD7DS7OY.cjs.map} +1 -1
- package/dist/{chunk-CESZ2AOS.js → chunk-XC2OEALC.js} +5 -5
- package/dist/chunk-XC2OEALC.js.map +1 -0
- package/dist/{chunk-KDNLQBCW.cjs → chunk-YDFWVGCR.cjs} +11 -11
- package/dist/{chunk-KDNLQBCW.cjs.map → chunk-YDFWVGCR.cjs.map} +1 -1
- package/dist/{chunk-5D6TMUAG.cjs → chunk-YSQE5IHK.cjs} +6 -6
- package/dist/{chunk-5D6TMUAG.cjs.map → chunk-YSQE5IHK.cjs.map} +1 -1
- package/dist/{chunk-MBMPTC7F.js → chunk-ZIHEKHUB.js} +4 -4
- package/dist/chunk-ZIHEKHUB.js.map +1 -0
- package/dist/index.cjs +49 -49
- package/dist/index.js +8 -8
- package/dist/llm/index.cjs +7 -7
- package/dist/llm/index.js +1 -1
- package/dist/llm/model/aisdk/v5/model.d.ts +47 -0
- package/dist/llm/model/aisdk/v5/model.d.ts.map +1 -0
- package/dist/llm/model/is-v2-model.d.ts +3 -0
- package/dist/llm/model/is-v2-model.d.ts.map +1 -0
- package/dist/llm/model/model-method-from-agent.d.ts +4 -0
- package/dist/llm/model/model-method-from-agent.d.ts.map +1 -0
- package/dist/llm/model/model.loop.d.ts +2 -2
- package/dist/llm/model/model.loop.d.ts.map +1 -1
- package/dist/llm/model/model.loop.types.d.ts +2 -0
- package/dist/llm/model/model.loop.types.d.ts.map +1 -1
- package/dist/llm/model/resolve-model.d.ts +1 -1
- package/dist/llm/model/resolve-model.d.ts.map +1 -1
- package/dist/llm/model/router.d.ts +6 -4
- package/dist/llm/model/router.d.ts.map +1 -1
- package/dist/llm/model/shared.types.d.ts +10 -4
- package/dist/llm/model/shared.types.d.ts.map +1 -1
- package/dist/loop/index.cjs +2 -2
- package/dist/loop/index.js +1 -1
- package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts +37 -0
- package/dist/loop/test-utils/MastraLanguageModelV2Mock.d.ts.map +1 -0
- package/dist/loop/test-utils/fullStream.d.ts.map +1 -1
- package/dist/loop/test-utils/generateText.d.ts.map +1 -1
- package/dist/loop/test-utils/options.d.ts.map +1 -1
- package/dist/loop/test-utils/resultObject.d.ts.map +1 -1
- package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
- package/dist/loop/test-utils/telemetry.d.ts.map +1 -1
- package/dist/loop/test-utils/textStream.d.ts.map +1 -1
- package/dist/loop/test-utils/toUIMessageStream.d.ts.map +1 -1
- package/dist/loop/test-utils/tools.d.ts.map +1 -1
- package/dist/loop/test-utils/utils.d.ts +1 -1
- package/dist/loop/test-utils/utils.d.ts.map +1 -1
- package/dist/loop/types.d.ts +8 -3
- package/dist/loop/types.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-execution/index.d.ts +12 -12
- package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +9 -9
- package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
- package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +4 -4
- package/dist/loop/workflows/agentic-loop/index.d.ts +12 -12
- package/dist/loop/workflows/run-state.d.ts +2 -2
- package/dist/loop/workflows/run-state.d.ts.map +1 -1
- package/dist/loop/workflows/schema.d.ts +4 -4
- package/dist/mastra/index.cjs +2 -2
- package/dist/mastra/index.d.ts +2 -2
- package/dist/mastra/index.d.ts.map +1 -1
- package/dist/mastra/index.js +1 -1
- package/dist/memory/index.cjs +4 -4
- package/dist/memory/index.js +1 -1
- package/dist/processors/index.cjs +11 -11
- package/dist/processors/index.js +1 -1
- package/dist/relevance/index.cjs +4 -4
- package/dist/relevance/index.js +1 -1
- package/dist/scores/index.cjs +9 -9
- package/dist/scores/index.js +2 -2
- package/dist/scores/scoreTraces/index.cjs +8 -8
- package/dist/scores/scoreTraces/index.js +3 -3
- package/dist/storage/index.cjs +3 -3
- package/dist/storage/index.js +1 -1
- package/dist/stream/aisdk/v5/execute.d.ts +6 -3
- package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
- package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
- package/dist/stream/base/output.d.ts.map +1 -1
- package/dist/stream/index.cjs +11 -11
- package/dist/stream/index.js +2 -2
- package/dist/stream/types.d.ts +4 -3
- package/dist/stream/types.d.ts.map +1 -1
- package/dist/test-utils/llm-mock.cjs +2 -2
- package/dist/test-utils/llm-mock.js +1 -1
- package/dist/tools/tool-builder/builder.d.ts.map +1 -1
- package/dist/utils.cjs +17 -17
- package/dist/utils.d.ts +1 -1
- package/dist/utils.d.ts.map +1 -1
- package/dist/utils.js +1 -1
- package/dist/workflows/evented/index.cjs +10 -10
- package/dist/workflows/evented/index.js +1 -1
- package/dist/workflows/index.cjs +14 -14
- package/dist/workflows/index.js +1 -1
- package/dist/workflows/legacy/index.cjs +22 -22
- package/dist/workflows/legacy/index.js +1 -1
- package/dist/workflows/types.d.ts +1 -1
- package/dist/workflows/types.d.ts.map +1 -1
- package/dist/workflows/workflow.d.ts +2 -2
- package/dist/workflows/workflow.d.ts.map +1 -1
- package/package.json +2 -2
- package/dist/chunk-6OGG24PY.cjs.map +0 -1
- package/dist/chunk-BCFLCIAX.cjs.map +0 -1
- package/dist/chunk-CESZ2AOS.js.map +0 -1
- package/dist/chunk-DNYWTNND.js.map +0 -1
- package/dist/chunk-EJPCH5WB.cjs.map +0 -1
- package/dist/chunk-II37LKES.js.map +0 -1
- package/dist/chunk-MBMPTC7F.js.map +0 -1
- package/dist/chunk-O5PROZMW.cjs.map +0 -1
- package/dist/chunk-TVZB5572.cjs.map +0 -1
- package/dist/chunk-ZVAWVX5J.js.map +0 -1
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
import { DefaultVoice } from './chunk-WCHE6FJ7.js';
|
|
2
2
|
import { STREAM_FORMAT_SYMBOL, EMITTER_SYMBOL } from './chunk-NLNKQD2T.js';
|
|
3
3
|
import { InstrumentClass, Telemetry } from './chunk-BLUDYAPI.js';
|
|
4
|
-
import { MessageList, DefaultGeneratedFile, DefaultGeneratedFileWithType } from './chunk-
|
|
5
|
-
import { resolveModelConfig } from './chunk-
|
|
6
|
-
import { MastraLLMV1 } from './chunk-
|
|
4
|
+
import { MessageList, DefaultGeneratedFile, DefaultGeneratedFileWithType } from './chunk-DBVX3GFJ.js';
|
|
5
|
+
import { resolveModelConfig } from './chunk-EFNEFT4U.js';
|
|
6
|
+
import { MastraLLMV1 } from './chunk-OLERJ2OU.js';
|
|
7
7
|
import { executeHook } from './chunk-TTELJD4F.js';
|
|
8
8
|
import { zodToJsonSchema } from './chunk-PJKCPRYF.js';
|
|
9
|
-
import { wrapMastra, selectFields, getOrCreateSpan, getValidTraceId, ensureToolProperties, makeCoreTool, createMastraProxy, ModelSpanTracker, delay } from './chunk-
|
|
9
|
+
import { wrapMastra, selectFields, getOrCreateSpan, getValidTraceId, ensureToolProperties, makeCoreTool, createMastraProxy, ModelSpanTracker, delay } from './chunk-ZIHEKHUB.js';
|
|
10
10
|
import { RuntimeContext } from './chunk-HLRWYUFN.js';
|
|
11
11
|
import { MastraError, safeParseErrorObject, getErrorFromUnknown } from './chunk-PZUZNPFM.js';
|
|
12
12
|
import { ToolStream } from './chunk-HGNRQ3OG.js';
|
|
@@ -199,7 +199,8 @@ var DelayedPromise=class{status={type:"pending"};_promise;_resolve=void 0;_rejec
|
|
|
199
199
|
function prepareToolsAndToolChoice({tools,toolChoice,activeTools}){if(Object.keys(tools||{}).length===0){return {tools:void 0,toolChoice:void 0};}const filteredTools=activeTools!=null?Object.entries(tools||{}).filter(([name])=>activeTools.includes(name)):Object.entries(tools||{});return {tools:filteredTools.map(([name,tool$1])=>{try{let inputSchema;if("inputSchema"in tool$1){inputSchema=tool$1.inputSchema;}else if("parameters"in tool$1){inputSchema=tool$1.parameters;}const sdkTool=tool({type:"function",...tool$1,inputSchema});const toolType=sdkTool?.type??"function";switch(toolType){case void 0:case "dynamic":case "function":return {type:"function",name,description:sdkTool.description,inputSchema:asSchema(sdkTool.inputSchema).jsonSchema,providerOptions:sdkTool.providerOptions};case "provider-defined":return {type:"provider-defined",name,// TODO: as any seems wrong here. are there cases where we don't have an id?
|
|
200
200
|
id:sdkTool.id,args:sdkTool.args};default:{const exhaustiveCheck=toolType;throw new Error(`Unsupported tool type: ${exhaustiveCheck}`);}}}catch(e){console.error("Error preparing tool",e);return null;}}).filter(tool=>tool!==null),toolChoice:toolChoice==null?{type:"auto"}:typeof toolChoice==="string"?{type:toolChoice}:{type:"tool",toolName:toolChoice.toolName}};}// src/stream/aisdk/v5/compat/consume-stream.ts
|
|
201
201
|
async function consumeStream({stream,onError}){const reader=stream.getReader();try{while(true){const{done}=await reader.read();if(done)break;}}catch(error){console.error("consumeStream error",error);onError?.(error);}finally{reader.releaseLock();}}// src/agent/agent.ts
|
|
202
|
-
var import_fast_deep_equal2=__toESM(require_fast_deep_equal(),1);
|
|
202
|
+
var import_fast_deep_equal2=__toESM(require_fast_deep_equal(),1);function isV2Model(model){return model.specificationVersion==="v2";}// src/llm/model/model.loop.ts
|
|
203
|
+
async function consumeStream2({stream,onError}){const reader=stream.getReader();try{while(true){const{done}=await reader.read();if(done)break;}}catch(error){onError==null?void 0:onError(error);}finally{reader.releaseLock();}}// src/stream/RunOutput.ts
|
|
203
204
|
var WorkflowRunOutput=class{#status="running";#usageCount={inputTokens:0,outputTokens:0,totalTokens:0,cachedInputTokens:0,reasoningTokens:0};#consumptionStarted=false;#baseStream;#emitter=new EventEmitter();#bufferedChunks=[];#streamFinished=false;#streamError;#delayedPromises={usage:new DelayedPromise(),result:new DelayedPromise()};/**
|
|
204
205
|
* Unique identifier for this workflow run
|
|
205
206
|
*/runId;/**
|
|
@@ -401,10 +402,10 @@ case "reasoning-end":return {type:"reasoning-end",id:chunk.payload.id,providerMe
|
|
|
401
402
|
};case "tool-error":return {type:"tool-error",error:chunk.payload.error,input:chunk.payload.args,toolCallId:chunk.payload.toolCallId,providerExecuted:chunk.payload.providerExecuted,toolName:chunk.payload.toolName// providerMetadata: chunk.payload.providerMetadata, // AI v5 types don't show this?
|
|
402
403
|
};case "abort":return {type:"abort"};case "error":return {type:"error",error:chunk.payload.error};case "object":return {type:"object",object:chunk.object};default:if(chunk.type&&"payload"in chunk&&chunk.payload){return {type:chunk.type,...(chunk.payload||{})};}return;}}// src/stream/aisdk/v5/input.ts
|
|
403
404
|
var AISDKV5InputStream=class extends MastraModelInput{constructor({component,name}){super({component,name});}async transform({runId,stream,controller}){for await(const chunk of stream){const transformedChunk=convertFullStreamChunkToMastra(chunk,{runId});if(transformedChunk){controller.enqueue(transformedChunk);}}}};// src/stream/aisdk/v5/execute.ts
|
|
404
|
-
function omit(obj,keys){const newObj={...obj};for(const key of keys){delete newObj[key];}return newObj;}var hasLoggedModelSettingsAbortSignalDeprecation=false;function execute({runId,model,providerOptions,inputMessages,tools,toolChoice,options,onResult,modelStreamSpan,telemetry_settings,includeRawChunks,modelSettings,structuredOutput,headers,shouldThrowError}){if(modelSettings?.abortSignal&&!hasLoggedModelSettingsAbortSignalDeprecation){console.warn("[Deprecation Warning] Using `modelSettings.abortSignal` is deprecated. Please use top-level `abortSignal` instead. The `modelSettings.abortSignal` option will be removed in a future version.");hasLoggedModelSettingsAbortSignalDeprecation=true;}const v5=new AISDKV5InputStream({component:"LLM",name:model.modelId});const toolsAndToolChoice=prepareToolsAndToolChoice({tools,toolChoice,activeTools:options?.activeTools});if(modelStreamSpan&&toolsAndToolChoice?.tools?.length&&telemetry_settings?.recordOutputs!==false){modelStreamSpan.setAttributes({"stream.prompt.tools":toolsAndToolChoice?.tools?.map(tool=>JSON.stringify(tool))});}const structuredOutputMode=structuredOutput?.schema?structuredOutput?.model?"processor":"direct":void 0;const responseFormat=structuredOutput?.schema?getResponseFormat(structuredOutput?.schema):void 0;let prompt=inputMessages;if(structuredOutputMode==="direct"&&responseFormat?.type==="json"&&structuredOutput?.jsonPromptInjection){prompt=injectJsonInstructionIntoMessages({messages:inputMessages,schema:responseFormat.schema});}if(structuredOutputMode==="processor"&&responseFormat?.type==="json"&&responseFormat?.schema){prompt=injectJsonInstructionIntoMessages({messages:inputMessages,schema:responseFormat.schema,schemaPrefix:`Your response will be processed by another agent to extract structured data. Please ensure your response contains comprehensive information for all the following fields that will be extracted:
|
|
405
|
+
function omit(obj,keys){const newObj={...obj};for(const key of keys){delete newObj[key];}return newObj;}var hasLoggedModelSettingsAbortSignalDeprecation=false;function execute({runId,model,providerOptions,inputMessages,tools,toolChoice,options,onResult,modelStreamSpan,telemetry_settings,includeRawChunks,modelSettings,structuredOutput,headers,shouldThrowError,methodType}){if(modelSettings?.abortSignal&&!hasLoggedModelSettingsAbortSignalDeprecation){console.warn("[Deprecation Warning] Using `modelSettings.abortSignal` is deprecated. Please use top-level `abortSignal` instead. The `modelSettings.abortSignal` option will be removed in a future version.");hasLoggedModelSettingsAbortSignalDeprecation=true;}const v5=new AISDKV5InputStream({component:"LLM",name:model.modelId});const toolsAndToolChoice=prepareToolsAndToolChoice({tools,toolChoice,activeTools:options?.activeTools});if(modelStreamSpan&&toolsAndToolChoice?.tools?.length&&telemetry_settings?.recordOutputs!==false){modelStreamSpan.setAttributes({"stream.prompt.tools":toolsAndToolChoice?.tools?.map(tool=>JSON.stringify(tool))});}const structuredOutputMode=structuredOutput?.schema?structuredOutput?.model?"processor":"direct":void 0;const responseFormat=structuredOutput?.schema?getResponseFormat(structuredOutput?.schema):void 0;let prompt=inputMessages;if(structuredOutputMode==="direct"&&responseFormat?.type==="json"&&structuredOutput?.jsonPromptInjection){prompt=injectJsonInstructionIntoMessages({messages:inputMessages,schema:responseFormat.schema});}if(structuredOutputMode==="processor"&&responseFormat?.type==="json"&&responseFormat?.schema){prompt=injectJsonInstructionIntoMessages({messages:inputMessages,schema:responseFormat.schema,schemaPrefix:`Your response will be processed by another agent to extract structured data. Please ensure your response contains comprehensive information for all the following fields that will be extracted:
|
|
405
406
|
`,schemaSuffix:`
|
|
406
407
|
|
|
407
|
-
You don't need to format your response as JSON unless the user asks you to. Just ensure your natural language response includes relevant information for each field in the schema above.`});}const providerOptionsToUse=model.provider.startsWith("openai")&&responseFormat?.type==="json"&&!structuredOutput?.jsonPromptInjection?{...(providerOptions??{}),openai:{strictJsonSchema:true,...(providerOptions?.openai??{})}}:providerOptions;const stream=v5.initialize({runId,onResult,createStream:async()=>{try{const filteredModelSettings=omit(modelSettings||{},["maxRetries","headers","abortSignal"]);const abortSignal=options?.abortSignal||modelSettings?.abortSignal;const pRetry=await import('p-retry');return await pRetry.default(async()=>{const
|
|
408
|
+
You don't need to format your response as JSON unless the user asks you to. Just ensure your natural language response includes relevant information for each field in the schema above.`});}const providerOptionsToUse=model.provider.startsWith("openai")&&responseFormat?.type==="json"&&!structuredOutput?.jsonPromptInjection?{...(providerOptions??{}),openai:{strictJsonSchema:true,...(providerOptions?.openai??{})}}:providerOptions;const stream=v5.initialize({runId,onResult,createStream:async()=>{try{const filteredModelSettings=omit(modelSettings||{},["maxRetries","headers","abortSignal"]);const abortSignal=options?.abortSignal||modelSettings?.abortSignal;const pRetry=await import('p-retry');return await pRetry.default(async()=>{const fn=(methodType==="stream"?model.doStream:model.doGenerate).bind(model);const streamResult=await fn({...toolsAndToolChoice,prompt,providerOptions:providerOptionsToUse,abortSignal,includeRawChunks,responseFormat:structuredOutputMode==="direct"&&!structuredOutput?.jsonPromptInjection?responseFormat:void 0,...filteredModelSettings,headers});return streamResult;},{retries:modelSettings?.maxRetries??2,signal:abortSignal,shouldRetry(context){if(APICallError.isInstance(context.error)){return context.error.isRetryable;}return true;}});}catch(error){if(shouldThrowError){throw error;}return {stream:new ReadableStream({start:async controller=>{controller.enqueue({type:"error",error});controller.close();}}),warnings:[],request:{},rawResponse:{}};}}});return stream;}// src/stream/aisdk/v5/output-helpers.ts
|
|
408
409
|
var DefaultStepResult=class{content;finishReason;usage;warnings;request;response;providerMetadata;constructor({content,finishReason,usage,warnings,request,response,providerMetadata}){this.content=content;this.finishReason=finishReason;this.usage=usage;this.warnings=warnings;this.request=request;this.response=response;this.providerMetadata=providerMetadata;}get text(){return this.content.filter(part=>part.type==="text").map(part=>part.text).join("");}get reasoning(){return this.content.filter(part=>part.type==="reasoning");}get reasoningText(){return this.reasoning.length===0?void 0:this.reasoning.map(part=>part.text).join("");}get files(){return this.content.filter(part=>part.type==="file").map(part=>part.file);}get sources(){return this.content.filter(part=>part.type==="source");}get toolCalls(){return this.content.filter(part=>part.type==="tool-call");}get staticToolCalls(){return this.toolCalls.filter(toolCall=>toolCall.dynamic===false);}get dynamicToolCalls(){return this.toolCalls.filter(toolCall=>toolCall.dynamic===true);}get toolResults(){return this.content.filter(part=>part.type==="tool-result");}get staticToolResults(){return this.toolResults.filter(toolResult=>toolResult.dynamic===false);}get dynamicToolResults(){return this.toolResults.filter(toolResult=>toolResult.dynamic===true);}};// src/loop/workflows/run-state.ts
|
|
409
410
|
var AgenticRunState=class{#state;constructor({_internal,model}){this.#state={responseMetadata:{id:_internal?.generateId?.(),timestamp:_internal?.currentDate?.(),modelId:model.modelId,modelVersion:model.specificationVersion,modelProvider:model.provider,headers:void 0},modelMetadata:{modelId:model.modelId,modelVersion:model.specificationVersion,modelProvider:model.provider},isReasoning:false,isStreaming:false,providerOptions:void 0,hasToolCallStreaming:false,hasErrored:false,reasoningDeltas:[],textDeltas:[],stepResult:void 0};}setState(state){this.#state={...this.#state,...state};}get state(){return this.#state;}};// src/loop/workflows/agentic-execution/llm-execution-step.ts
|
|
410
411
|
async function processOutputStream({tools,messageId,messageList,outputStream,runState,options,controller,responseFromModel,includeRawChunks}){for await(const chunk of outputStream._getBaseStream()){if(!chunk){continue;}if(chunk.type=="object"||chunk.type=="object-result"){controller.enqueue(chunk);continue;}if(chunk.type!=="text-delta"&&chunk.type!=="tool-call"&&// not 100% sure about this being the right fix.
|
|
@@ -415,7 +416,7 @@ async function processOutputStream({tools,messageId,messageList,outputStream,run
|
|
|
415
416
|
// BUT does this cause other issues?
|
|
416
417
|
// Alternative solution: in message list allow combining text deltas together when the message source is "response" and the text parts are directly next to each other
|
|
417
418
|
// simple solution for now is to not flush text deltas on response-metadata
|
|
418
|
-
chunk.type!=="response-metadata"&&runState.state.isStreaming){if(runState.state.textDeltas.length){const textStartPayload=chunk.payload;const providerMetadata=textStartPayload.providerMetadata??runState.state.providerOptions;messageList.add({id:messageId,role:"assistant",content:[providerMetadata?{type:"text",text:runState.state.textDeltas.join(""),providerOptions:providerMetadata}:{type:"text",text:runState.state.textDeltas.join("")}]},"response");}runState.setState({isStreaming:false,textDeltas:[]});}if(chunk.type!=="reasoning-start"&&chunk.type!=="reasoning-delta"&&chunk.type!=="reasoning-end"&&chunk.type!=="redacted-reasoning"&&chunk.type!=="reasoning-signature"&&chunk.type!=="response-metadata"&&runState.state.isReasoning){runState.setState({isReasoning:false,reasoningDeltas:[]});}switch(chunk.type){case "response-metadata":runState.setState({responseMetadata:{id:chunk.payload.id,timestamp:chunk.payload.timestamp,modelId:chunk.payload.modelId,headers:chunk.payload.headers}});break;case "text-delta":{const textDeltasFromState=runState.state.textDeltas;textDeltasFromState.push(chunk.payload.text);runState.setState({textDeltas:textDeltasFromState,isStreaming:true});if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "tool-call-input-streaming-start":{const tool=tools?.[chunk.payload.toolName]||Object.values(tools||{})?.find(tool2=>`id`in tool2&&tool2.id===chunk.payload.toolName);if(tool&&"onInputStart"in tool){try{await tool?.onInputStart?.({toolCallId:chunk.payload.toolCallId,messages:messageList.get.input.aiV5.model(),abortSignal:options?.abortSignal});}catch(error2){console.error("Error calling onInputStart",error2);}}if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "tool-call-delta":{const tool=tools?.[chunk.payload.toolName||""]||Object.values(tools||{})?.find(tool2=>`id`in tool2&&tool2.id===chunk.payload.toolName);if(tool&&"onInputDelta"in tool){try{await tool?.onInputDelta?.({inputTextDelta:chunk.payload.argsTextDelta,toolCallId:chunk.payload.toolCallId,messages:messageList.get.input.aiV5.model(),abortSignal:options?.abortSignal});}catch(error2){console.error("Error calling onInputDelta",error2);}}if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "reasoning-start":{runState.setState({isReasoning:true,reasoningDeltas:[],providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions});if(Object.values(chunk.payload.providerMetadata||{}).find(v=>v?.redactedData)){messageList.add({id:messageId,role:"assistant",content:[{type:"reasoning",text:"",providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions}]},"response");if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "reasoning-delta":{const reasoningDeltasFromState=runState.state.reasoningDeltas;reasoningDeltasFromState.push(chunk.payload.text);runState.setState({isReasoning:true,reasoningDeltas:reasoningDeltasFromState,providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions});if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "reasoning-end":{if(runState.state.reasoningDeltas.length>0){messageList.add({id:messageId,role:"assistant",content:[{type:"reasoning",text:runState.state.reasoningDeltas.join(""),providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions}]},"response");}runState.setState({isReasoning:false,reasoningDeltas:[]});if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "file":messageList.add({id:messageId,role:"assistant",content:[{type:"file",data:chunk.payload.data,mimeType:chunk.payload.mimeType}]},"response");controller.enqueue(chunk);break;case "source":messageList.add({id:messageId,role:"assistant",content:{format:2,parts:[{type:"source",source:{sourceType:"url",id:chunk.payload.id,url:chunk.payload.url||"",title:chunk.payload.title,providerMetadata:chunk.payload.providerMetadata}}]},createdAt:/* @__PURE__ */new Date()},"response");controller.enqueue(chunk);break;case "finish":runState.setState({providerOptions:chunk.payload.metadata.providerMetadata,stepResult:{reason:chunk.payload.reason,logprobs:chunk.payload.logprobs,warnings:responseFromModel.warnings,totalUsage:chunk.payload.totalUsage,headers:responseFromModel.rawResponse?.headers,messageId,isContinued:!["stop","error"].includes(chunk.payload.stepResult.reason),request:responseFromModel.request}});break;case "error":if(isAbortError(chunk.payload.error)&&options?.abortSignal?.aborted){break;}runState.setState({hasErrored:true});runState.setState({stepResult:{isContinued:false,reason:"error"}});const error=getErrorFromUnknown(chunk.payload.error,{fallbackMessage:"Unknown error in agent stream"});controller.enqueue({...chunk,payload:{...chunk.payload,error}});await options?.onError?.({error});break;default:if(isControllerOpen(controller)){controller.enqueue(chunk);}}if(["text-delta","reasoning-delta","source","tool-call","tool-call-input-streaming-start","tool-call-delta","raw"].includes(chunk.type)){if(chunk.type==="raw"&&!includeRawChunks){continue;}await options?.onChunk?.(chunk);}if(runState.state.hasErrored){break;}}}function executeStreamWithFallbackModels(models){return async callback=>{let index=0;let finalResult;let done=false;for(const modelConfig of models){index++;const maxRetries=modelConfig.maxRetries||0;let attempt=0;if(done){break;}while(attempt<=maxRetries){try{const isLastModel=attempt===maxRetries&&index===models.length;const result=await callback(modelConfig.model,isLastModel);finalResult=result;done=true;break;}catch(err){attempt++;console.error(`Error executing model ${modelConfig.model.modelId}, attempt ${attempt}====`,err);if(attempt>maxRetries){break;}}}}if(typeof finalResult==="undefined"){console.error("Exhausted all fallback models and reached the maximum number of retries.");throw new Error("Exhausted all fallback models and reached the maximum number of retries.");}return finalResult;};}function createLLMExecutionStep({models,_internal,messageId,runId,modelStreamSpan,telemetry_settings,tools,toolChoice,messageList,includeRawChunks,modelSettings,providerOptions,options,toolCallStreaming,controller,structuredOutput,outputProcessors,headers,downloadRetries,downloadConcurrency,processorStates}){return createStep({id:"llm-execution",inputSchema:llmIterationOutputSchema,outputSchema:llmIterationOutputSchema,execute:async({inputData,bail,tracingContext})=>{let modelResult;let warnings;let request;let rawResponse;const{outputStream,callBail,runState}=await executeStreamWithFallbackModels(models)(async(model,isLastModel)=>{const runState2=new AgenticRunState({_internal,model});switch(model.specificationVersion){case "v2":{const messageListPromptArgs={downloadRetries,downloadConcurrency,supportedUrls:model?.supportedUrls};let inputMessages=await messageList.get.all.aiV5.llmPrompt(messageListPromptArgs);let stepModel=model;let stepToolChoice=toolChoice;let stepTools=tools;if(options?.prepareStep){try{const prepareStepResult=await options.prepareStep({stepNumber:inputData.output?.steps?.length||0,steps:inputData.output?.steps||[],model,messages:messageList.get.all.aiV5.model()});if(prepareStepResult){if(prepareStepResult.model){stepModel=prepareStepResult.model;}if(prepareStepResult.toolChoice){stepToolChoice=prepareStepResult.toolChoice;}if(prepareStepResult.activeTools&&stepTools){const activeToolsSet=new Set(prepareStepResult.activeTools);stepTools=Object.fromEntries(Object.entries(stepTools).filter(([toolName])=>activeToolsSet.has(toolName)));}if(prepareStepResult.messages){const newMessages=prepareStepResult.messages;const newMessageList=new MessageList();for(const message of newMessages){if(message.role==="system"){newMessageList.addSystem(message);}else if(message.role==="user"){newMessageList.add(message,"input");}else if(message.role==="assistant"||message.role==="tool"){newMessageList.add(message,"response");}}inputMessages=await newMessageList.get.all.aiV5.llmPrompt(messageListPromptArgs);}}}catch(error){console.error("Error in prepareStep callback:",error);}}modelResult=execute({runId,model:stepModel,providerOptions,inputMessages,tools:stepTools,toolChoice:stepToolChoice,options,modelSettings,telemetry_settings,includeRawChunks,structuredOutput,headers,onResult:({warnings:warningsFromStream,request:requestFromStream,rawResponse:rawResponseFromStream})=>{warnings=warningsFromStream;request=requestFromStream||{};rawResponse=rawResponseFromStream;if(!isControllerOpen(controller)){return;}controller.enqueue({runId,from:"AGENT"/* AGENT */,type:"step-start",payload:{request:request||{},warnings:warnings||[],messageId}});},modelStreamSpan,shouldThrowError:!isLastModel});break;}default:{throw new Error(`Unsupported model version: ${model.specificationVersion}`);}}const outputStream2=new MastraModelOutput({model:{modelId:model.modelId,provider:model.provider,version:model.specificationVersion},stream:modelResult,messageList,messageId,options:{runId,rootSpan:modelStreamSpan,toolCallStreaming,telemetry_settings,includeRawChunks,structuredOutput,outputProcessors,isLLMExecutionStep:true,tracingContext,processorStates}});try{await processOutputStream({outputStream:outputStream2,includeRawChunks,tools,messageId,messageList,runState:runState2,options,controller,responseFromModel:{warnings,request,rawResponse}});}catch(error){console.error("Error in LLM Execution Step",error);if(isAbortError(error)&&options?.abortSignal?.aborted){await options?.onAbort?.({steps:inputData?.output?.steps??[]});if(isControllerOpen(controller)){controller.enqueue({type:"abort",runId,from:"AGENT"/* AGENT */,payload:{}});}return {callBail:true,outputStream:outputStream2,runState:runState2};}if(isLastModel){if(isControllerOpen(controller)){controller.enqueue({type:"error",runId,from:"AGENT"/* AGENT */,payload:{error}});}runState2.setState({hasErrored:true,stepResult:{isContinued:false,reason:"error"}});}else {throw error;}}return {outputStream:outputStream2,callBail:false,runState:runState2};});if(callBail){const usage2=outputStream._getImmediateUsage();const responseMetadata2=runState.state.responseMetadata;const text2=outputStream._getImmediateText();return bail({messageId,stepResult:{reason:"abort",warnings,isContinued:false},metadata:{providerMetadata:runState.state.providerOptions,...responseMetadata2,modelMetadata:runState.state.modelMetadata,headers:rawResponse?.headers,request},output:{text:text2,toolCalls:[],usage:usage2??inputData.output?.usage,steps:[]},messages:{all:messageList.get.all.aiV5.model(),user:messageList.get.input.aiV5.model(),nonUser:messageList.get.response.aiV5.model()}});}if(outputStream.tripwire){runState.setState({stepResult:{isContinued:false,reason:"abort"}});}const toolCalls=outputStream._getImmediateToolCalls()?.map(chunk=>{return chunk.payload;});if(toolCalls.length>0){const assistantContent=[...toolCalls.map(toolCall=>{return {type:"tool-call",toolCallId:toolCall.toolCallId,toolName:toolCall.toolName,args:toolCall.args};})];messageList.add({id:messageId,role:"assistant",content:assistantContent},"response");}const finishReason=runState?.state?.stepResult?.reason??outputStream._getImmediateFinishReason();const hasErrored=runState.state.hasErrored;const usage=outputStream._getImmediateUsage();const responseMetadata=runState.state.responseMetadata;const text=outputStream._getImmediateText();const object=outputStream._getImmediateObject();const tripwireTriggered=outputStream.tripwire;const steps=inputData.output?.steps||[];const existingResponseCount=inputData.messages?.nonUser?.length||0;const allResponseContent=messageList.get.response.aiV5.modelContent(steps.length);const currentIterationContent=allResponseContent.slice(existingResponseCount);steps.push(new DefaultStepResult({warnings:outputStream._getImmediateWarnings(),providerMetadata:runState.state.providerOptions,finishReason:runState.state.stepResult?.reason,content:currentIterationContent,response:{...responseMetadata,...rawResponse,messages:messageList.get.response.aiV5.model()},request,usage:outputStream._getImmediateUsage()}));const messages={all:messageList.get.all.aiV5.model(),user:messageList.get.input.aiV5.model(),nonUser:messageList.get.response.aiV5.model()};return {messageId,stepResult:{reason:tripwireTriggered?"abort":hasErrored?"error":finishReason,warnings,isContinued:tripwireTriggered?false:!["stop","error"].includes(finishReason)},metadata:{providerMetadata:runState.state.providerOptions,...responseMetadata,...rawResponse,modelMetadata:runState.state.modelMetadata,headers:rawResponse?.headers,request},output:{text,toolCalls,usage:usage??inputData.output?.usage,steps,...(object?{object}:{})},messages};}});}// src/loop/workflows/agentic-execution/llm-mapping-step.ts
|
|
419
|
+
chunk.type!=="response-metadata"&&runState.state.isStreaming){if(runState.state.textDeltas.length){const textStartPayload=chunk.payload;const providerMetadata=textStartPayload.providerMetadata??runState.state.providerOptions;messageList.add({id:messageId,role:"assistant",content:[providerMetadata?{type:"text",text:runState.state.textDeltas.join(""),providerOptions:providerMetadata}:{type:"text",text:runState.state.textDeltas.join("")}]},"response");}runState.setState({isStreaming:false,textDeltas:[]});}if(chunk.type!=="reasoning-start"&&chunk.type!=="reasoning-delta"&&chunk.type!=="reasoning-end"&&chunk.type!=="redacted-reasoning"&&chunk.type!=="reasoning-signature"&&chunk.type!=="response-metadata"&&runState.state.isReasoning){runState.setState({isReasoning:false,reasoningDeltas:[]});}switch(chunk.type){case "response-metadata":runState.setState({responseMetadata:{id:chunk.payload.id,timestamp:chunk.payload.timestamp,modelId:chunk.payload.modelId,headers:chunk.payload.headers}});break;case "text-delta":{const textDeltasFromState=runState.state.textDeltas;textDeltasFromState.push(chunk.payload.text);runState.setState({textDeltas:textDeltasFromState,isStreaming:true});if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "tool-call-input-streaming-start":{const tool=tools?.[chunk.payload.toolName]||Object.values(tools||{})?.find(tool2=>`id`in tool2&&tool2.id===chunk.payload.toolName);if(tool&&"onInputStart"in tool){try{await tool?.onInputStart?.({toolCallId:chunk.payload.toolCallId,messages:messageList.get.input.aiV5.model(),abortSignal:options?.abortSignal});}catch(error2){console.error("Error calling onInputStart",error2);}}if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "tool-call-delta":{const tool=tools?.[chunk.payload.toolName||""]||Object.values(tools||{})?.find(tool2=>`id`in tool2&&tool2.id===chunk.payload.toolName);if(tool&&"onInputDelta"in tool){try{await tool?.onInputDelta?.({inputTextDelta:chunk.payload.argsTextDelta,toolCallId:chunk.payload.toolCallId,messages:messageList.get.input.aiV5.model(),abortSignal:options?.abortSignal});}catch(error2){console.error("Error calling onInputDelta",error2);}}if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "reasoning-start":{runState.setState({isReasoning:true,reasoningDeltas:[],providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions});if(Object.values(chunk.payload.providerMetadata||{}).find(v=>v?.redactedData)){messageList.add({id:messageId,role:"assistant",content:[{type:"reasoning",text:"",providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions}]},"response");if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "reasoning-delta":{const reasoningDeltasFromState=runState.state.reasoningDeltas;reasoningDeltasFromState.push(chunk.payload.text);runState.setState({isReasoning:true,reasoningDeltas:reasoningDeltasFromState,providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions});if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "reasoning-end":{if(runState.state.reasoningDeltas.length>0){messageList.add({id:messageId,role:"assistant",content:[{type:"reasoning",text:runState.state.reasoningDeltas.join(""),providerOptions:chunk.payload.providerMetadata??runState.state.providerOptions}]},"response");}runState.setState({isReasoning:false,reasoningDeltas:[]});if(isControllerOpen(controller)){controller.enqueue(chunk);}break;}case "file":messageList.add({id:messageId,role:"assistant",content:[{type:"file",data:chunk.payload.data,mimeType:chunk.payload.mimeType}]},"response");controller.enqueue(chunk);break;case "source":messageList.add({id:messageId,role:"assistant",content:{format:2,parts:[{type:"source",source:{sourceType:"url",id:chunk.payload.id,url:chunk.payload.url||"",title:chunk.payload.title,providerMetadata:chunk.payload.providerMetadata}}]},createdAt:/* @__PURE__ */new Date()},"response");controller.enqueue(chunk);break;case "finish":runState.setState({providerOptions:chunk.payload.metadata.providerMetadata,stepResult:{reason:chunk.payload.reason,logprobs:chunk.payload.logprobs,warnings:responseFromModel.warnings,totalUsage:chunk.payload.totalUsage,headers:responseFromModel.rawResponse?.headers,messageId,isContinued:!["stop","error"].includes(chunk.payload.stepResult.reason),request:responseFromModel.request}});break;case "error":if(isAbortError(chunk.payload.error)&&options?.abortSignal?.aborted){break;}runState.setState({hasErrored:true});runState.setState({stepResult:{isContinued:false,reason:"error"}});const error=getErrorFromUnknown(chunk.payload.error,{fallbackMessage:"Unknown error in agent stream"});controller.enqueue({...chunk,payload:{...chunk.payload,error}});await options?.onError?.({error});break;default:if(isControllerOpen(controller)){controller.enqueue(chunk);}}if(["text-delta","reasoning-delta","source","tool-call","tool-call-input-streaming-start","tool-call-delta","raw"].includes(chunk.type)){if(chunk.type==="raw"&&!includeRawChunks){continue;}await options?.onChunk?.(chunk);}if(runState.state.hasErrored){break;}}}function executeStreamWithFallbackModels(models){return async callback=>{let index=0;let finalResult;let done=false;for(const modelConfig of models){index++;const maxRetries=modelConfig.maxRetries||0;let attempt=0;if(done){break;}while(attempt<=maxRetries){try{const isLastModel=attempt===maxRetries&&index===models.length;const result=await callback(modelConfig.model,isLastModel);finalResult=result;done=true;break;}catch(err){attempt++;console.error(`Error executing model ${modelConfig.model.modelId}, attempt ${attempt}====`,err);if(attempt>maxRetries){break;}}}}if(typeof finalResult==="undefined"){console.error("Exhausted all fallback models and reached the maximum number of retries.");throw new Error("Exhausted all fallback models and reached the maximum number of retries.");}return finalResult;};}function createLLMExecutionStep({models,_internal,messageId,runId,modelStreamSpan,telemetry_settings,tools,toolChoice,messageList,includeRawChunks,modelSettings,providerOptions,options,toolCallStreaming,controller,structuredOutput,outputProcessors,headers,downloadRetries,downloadConcurrency,processorStates,methodType}){return createStep({id:"llm-execution",inputSchema:llmIterationOutputSchema,outputSchema:llmIterationOutputSchema,execute:async({inputData,bail,tracingContext})=>{let modelResult;let warnings;let request;let rawResponse;const{outputStream,callBail,runState}=await executeStreamWithFallbackModels(models)(async(model,isLastModel)=>{const runState2=new AgenticRunState({_internal,model});switch(model.specificationVersion){case "v2":{const messageListPromptArgs={downloadRetries,downloadConcurrency,supportedUrls:model?.supportedUrls};let inputMessages=await messageList.get.all.aiV5.llmPrompt(messageListPromptArgs);let stepModel=model;let stepToolChoice=toolChoice;let stepTools=tools;if(options?.prepareStep){try{const prepareStepResult=await options.prepareStep({stepNumber:inputData.output?.steps?.length||0,steps:inputData.output?.steps||[],model,messages:messageList.get.all.aiV5.model()});if(prepareStepResult){if(prepareStepResult.model){stepModel=prepareStepResult.model;}if(prepareStepResult.toolChoice){stepToolChoice=prepareStepResult.toolChoice;}if(prepareStepResult.activeTools&&stepTools){const activeToolsSet=new Set(prepareStepResult.activeTools);stepTools=Object.fromEntries(Object.entries(stepTools).filter(([toolName])=>activeToolsSet.has(toolName)));}if(prepareStepResult.messages){const newMessages=prepareStepResult.messages;const newMessageList=new MessageList();for(const message of newMessages){if(message.role==="system"){newMessageList.addSystem(message);}else if(message.role==="user"){newMessageList.add(message,"input");}else if(message.role==="assistant"||message.role==="tool"){newMessageList.add(message,"response");}}inputMessages=await newMessageList.get.all.aiV5.llmPrompt(messageListPromptArgs);}}}catch(error){console.error("Error in prepareStep callback:",error);}}modelResult=execute({runId,model:stepModel,providerOptions,inputMessages,tools:stepTools,toolChoice:stepToolChoice,options,modelSettings,telemetry_settings,includeRawChunks,structuredOutput,headers,methodType,onResult:({warnings:warningsFromStream,request:requestFromStream,rawResponse:rawResponseFromStream})=>{warnings=warningsFromStream;request=requestFromStream||{};rawResponse=rawResponseFromStream;if(!isControllerOpen(controller)){return;}controller.enqueue({runId,from:"AGENT"/* AGENT */,type:"step-start",payload:{request:request||{},warnings:warnings||[],messageId}});},modelStreamSpan,shouldThrowError:!isLastModel});break;}default:{throw new Error(`Unsupported model version: ${model.specificationVersion}`);}}const outputStream2=new MastraModelOutput({model:{modelId:model.modelId,provider:model.provider,version:model.specificationVersion},stream:modelResult,messageList,messageId,options:{runId,rootSpan:modelStreamSpan,toolCallStreaming,telemetry_settings,includeRawChunks,structuredOutput,outputProcessors,isLLMExecutionStep:true,tracingContext,processorStates}});try{await processOutputStream({outputStream:outputStream2,includeRawChunks,tools,messageId,messageList,runState:runState2,options,controller,responseFromModel:{warnings,request,rawResponse}});}catch(error){console.error("Error in LLM Execution Step",error);if(isAbortError(error)&&options?.abortSignal?.aborted){await options?.onAbort?.({steps:inputData?.output?.steps??[]});if(isControllerOpen(controller)){controller.enqueue({type:"abort",runId,from:"AGENT"/* AGENT */,payload:{}});}return {callBail:true,outputStream:outputStream2,runState:runState2};}if(isLastModel){if(isControllerOpen(controller)){controller.enqueue({type:"error",runId,from:"AGENT"/* AGENT */,payload:{error}});}runState2.setState({hasErrored:true,stepResult:{isContinued:false,reason:"error"}});}else {throw error;}}return {outputStream:outputStream2,callBail:false,runState:runState2};});if(callBail){const usage2=outputStream._getImmediateUsage();const responseMetadata2=runState.state.responseMetadata;const text2=outputStream._getImmediateText();return bail({messageId,stepResult:{reason:"abort",warnings,isContinued:false},metadata:{providerMetadata:runState.state.providerOptions,...responseMetadata2,modelMetadata:runState.state.modelMetadata,headers:rawResponse?.headers,request},output:{text:text2,toolCalls:[],usage:usage2??inputData.output?.usage,steps:[]},messages:{all:messageList.get.all.aiV5.model(),user:messageList.get.input.aiV5.model(),nonUser:messageList.get.response.aiV5.model()}});}if(outputStream.tripwire){runState.setState({stepResult:{isContinued:false,reason:"abort"}});}const toolCalls=outputStream._getImmediateToolCalls()?.map(chunk=>{return chunk.payload;});if(toolCalls.length>0){const assistantContent=[...toolCalls.map(toolCall=>{return {type:"tool-call",toolCallId:toolCall.toolCallId,toolName:toolCall.toolName,args:toolCall.args};})];messageList.add({id:messageId,role:"assistant",content:assistantContent},"response");}const finishReason=runState?.state?.stepResult?.reason??outputStream._getImmediateFinishReason();const hasErrored=runState.state.hasErrored;const usage=outputStream._getImmediateUsage();const responseMetadata=runState.state.responseMetadata;const text=outputStream._getImmediateText();const object=outputStream._getImmediateObject();const tripwireTriggered=outputStream.tripwire;const steps=inputData.output?.steps||[];const existingResponseCount=inputData.messages?.nonUser?.length||0;const allResponseContent=messageList.get.response.aiV5.modelContent(steps.length);const currentIterationContent=allResponseContent.slice(existingResponseCount);steps.push(new DefaultStepResult({warnings:outputStream._getImmediateWarnings(),providerMetadata:runState.state.providerOptions,finishReason:runState.state.stepResult?.reason,content:currentIterationContent,response:{...responseMetadata,...rawResponse,messages:messageList.get.response.aiV5.model()},request,usage:outputStream._getImmediateUsage()}));const messages={all:messageList.get.all.aiV5.model(),user:messageList.get.input.aiV5.model(),nonUser:messageList.get.response.aiV5.model()};return {messageId,stepResult:{reason:tripwireTriggered?"abort":hasErrored?"error":finishReason,warnings,isContinued:tripwireTriggered?false:!["stop","error"].includes(finishReason)},metadata:{providerMetadata:runState.state.providerOptions,...responseMetadata,...rawResponse,modelMetadata:runState.state.modelMetadata,headers:rawResponse?.headers,request},output:{text,toolCalls,usage:usage??inputData.output?.usage,steps,...(object?{object}:{})},messages};}});}// src/loop/workflows/agentic-execution/llm-mapping-step.ts
|
|
419
420
|
function createLLMMappingStep({models,telemetry_settings,_internal,modelStreamSpan,...rest},llmExecutionStep){return createStep({id:"llmExecutionMappingStep",inputSchema:z9.array(toolCallOutputSchema),outputSchema:llmIterationOutputSchema,execute:async({inputData,getStepResult:getStepResult3,bail})=>{const initialResult=getStepResult3(llmExecutionStep);if(inputData?.every(toolCall=>toolCall?.result===void 0)){const errorResults=inputData.filter(toolCall=>toolCall?.error);const toolResultMessageId=rest.experimental_generateMessageId?.()||_internal?.generateId?.();if(errorResults?.length){errorResults.forEach(toolCall=>{const chunk={type:"tool-error",runId:rest.runId,from:"AGENT"/* AGENT */,payload:{error:toolCall.error,args:toolCall.args,toolCallId:toolCall.toolCallId,toolName:toolCall.toolName,providerMetadata:toolCall.providerMetadata}};rest.controller.enqueue(chunk);});rest.messageList.add({id:toolResultMessageId,role:"tool",content:errorResults.map(toolCall=>{return {type:"tool-result",args:toolCall.args,toolCallId:toolCall.toolCallId,toolName:toolCall.toolName,result:{tool_execution_error:toolCall.error?.message??toolCall.error}};})},"response");}initialResult.stepResult.isContinued=false;return bail(initialResult);}if(inputData?.length){for(const toolCall of inputData){const chunk={type:"tool-result",runId:rest.runId,from:"AGENT"/* AGENT */,payload:{args:toolCall.args,toolCallId:toolCall.toolCallId,toolName:toolCall.toolName,result:toolCall.result,providerMetadata:toolCall.providerMetadata,providerExecuted:toolCall.providerExecuted}};rest.controller.enqueue(chunk);if(initialResult?.metadata?.modelVersion==="v2"){await rest.options?.onChunk?.({chunk:convertMastraChunkToAISDKv5({chunk})});}const toolResultMessageId=rest.experimental_generateMessageId?.()||_internal?.generateId?.();rest.messageList.add({id:toolResultMessageId,role:"tool",content:inputData.map(toolCall2=>{return {type:"tool-result",args:toolCall2.args,toolCallId:toolCall2.toolCallId,toolName:toolCall2.toolName,result:toolCall2.result};})},"response");}return {...initialResult,messages:{all:rest.messageList.get.all.aiV5.model(),user:rest.messageList.get.input.aiV5.model(),nonUser:rest.messageList.get.response.aiV5.model()}};}}});}// src/loop/workflows/agentic-execution/tool-call-step.ts
|
|
420
421
|
function createToolCallStep({tools,messageList,options,telemetry_settings,writer,controller,runId,streamState,modelSpanTracker}){return createStep({id:"toolCallStep",inputSchema:toolCallInputSchema,outputSchema:toolCallOutputSchema,execute:async({inputData,suspend,resumeData,runtimeContext})=>{if(inputData.providerExecuted){const tracer2=getTracer({isEnabled:telemetry_settings?.isEnabled,tracer:telemetry_settings?.tracer});const span2=tracer2.startSpan("mastra.stream.toolCall").setAttributes({...assembleOperationName({operationId:"mastra.stream.toolCall",telemetry:telemetry_settings}),"stream.toolCall.toolName":inputData.toolName,"stream.toolCall.toolCallId":inputData.toolCallId,"stream.toolCall.args":JSON.stringify(inputData.args),"stream.toolCall.providerExecuted":true});if(inputData.output){span2.setAttributes({"stream.toolCall.result":JSON.stringify(inputData.output)});}span2.end();return {...inputData,result:inputData.output};}const tool=tools?.[inputData.toolName]||Object.values(tools||{})?.find(tool2=>`id`in tool2&&tool2.id===inputData.toolName);if(!tool){throw new Error(`Tool ${inputData.toolName} not found`);}if(tool&&"onInputAvailable"in tool){try{await tool?.onInputAvailable?.({toolCallId:inputData.toolCallId,input:inputData.args,messages:messageList.get.input.aiV5.model(),abortSignal:options?.abortSignal});}catch(error){console.error("Error calling onInputAvailable",error);}}if(!tool.execute){return inputData;}const tracer=getTracer({isEnabled:telemetry_settings?.isEnabled,tracer:telemetry_settings?.tracer});const span=tracer.startSpan("mastra.stream.toolCall").setAttributes({...assembleOperationName({operationId:"mastra.stream.toolCall",telemetry:telemetry_settings}),"stream.toolCall.toolName":inputData.toolName,"stream.toolCall.toolCallId":inputData.toolCallId,"stream.toolCall.args":JSON.stringify(inputData.args)});try{const requireToolApproval=runtimeContext.get("__mastra_requireToolApproval");if(requireToolApproval||tool.requireApproval){if(!resumeData){controller.enqueue({type:"tool-call-approval",runId,from:"AGENT"/* AGENT */,payload:{toolCallId:inputData.toolCallId,toolName:inputData.toolName,args:inputData.args}});return suspend({requireToolApproval:{toolCallId:inputData.toolCallId,toolName:inputData.toolName,args:inputData.args},__streamState:streamState.serialize()},{resumeLabel:inputData.toolCallId});}else {if(!resumeData.approved){span.end();span.setAttributes({"stream.toolCall.result":"Tool call was not approved by the user"});return {result:"Tool call was not approved by the user",...inputData};}}}const toolOptions={abortSignal:options?.abortSignal,toolCallId:inputData.toolCallId,messages:messageList.get.input.aiV5.model(),writableStream:writer,// Pass current step span as parent for tool call spans
|
|
421
422
|
tracingContext:modelSpanTracker?.getTracingContext(),suspend:async suspendPayload=>{controller.enqueue({type:"tool-call-suspended",runId,from:"AGENT"/* AGENT */,payload:{toolCallId:inputData.toolCallId,toolName:inputData.toolName,suspendPayload}});return await suspend({toolCallSuspended:suspendPayload,__streamState:streamState.serialize()},{resumeLabel:inputData.toolCallId});},resumeData};const result=await tool.execute(inputData.args,toolOptions);span.setAttributes({"stream.toolCall.result":JSON.stringify(result)});span.end();return {result,...inputData};}catch(error){span.setStatus({code:2,message:error?.message??error});span.recordException(error);return {error,...inputData};}}});}// src/loop/workflows/agentic-execution/index.ts
|
|
@@ -431,7 +432,7 @@ payload:typedInputData});}}modelStreamSpan.setAttributes({"stream.response.id":t
|
|
|
431
432
|
function isControllerOpen(controller){return controller.desiredSize!==0&&controller.desiredSize!==null;}function workflowLoopStream({resumeContext,requireToolApproval,telemetry_settings,models,toolChoice,modelSettings,_internal,modelStreamSpan,messageId,runId,messageList,startTimestamp,streamState,agentId,toolCallId,...rest}){return new ReadableStream$1({start:async controller=>{const writer=new WritableStream({write:chunk=>{controller.enqueue(chunk);}});modelStreamSpan.setAttributes({...(telemetry_settings?.recordInputs!==false?{"stream.prompt.toolChoice":toolChoice?JSON.stringify(toolChoice):"auto"}:{})});const agenticLoopWorkflow=createAgenticLoopWorkflow({resumeContext,messageId,models,telemetry_settings,_internal,modelSettings,toolChoice,modelStreamSpan,controller,writer,runId,messageList,startTimestamp,streamState,agentId,...rest});if(rest.mastra){agenticLoopWorkflow.__registerMastra(rest.mastra);}const initialData={messageId,messages:{all:messageList.get.all.aiV5.model(),user:messageList.get.input.aiV5.model(),nonUser:[]},output:{steps:[],usage:{inputTokens:0,outputTokens:0,totalTokens:0}},metadata:{},stepResult:{reason:"undefined",warnings:[],isContinued:true,totalUsage:{inputTokens:0,outputTokens:0,totalTokens:0}}};const msToFirstChunk=_internal?.now?.()-startTimestamp;modelStreamSpan.addEvent("ai.stream.firstChunk",{"ai.response.msToFirstChunk":msToFirstChunk});modelStreamSpan.setAttributes({"stream.response.timestamp":new Date(startTimestamp).toISOString(),"stream.response.msToFirstChunk":msToFirstChunk});if(!resumeContext){controller.enqueue({type:"start",runId,from:"AGENT"/* AGENT */,payload:{id:agentId}});}const run=await agenticLoopWorkflow.createRunAsync({runId});const runtimeContext=new RuntimeContext();if(requireToolApproval){runtimeContext.set("__mastra_requireToolApproval",true);}const executionResult=resumeContext?await run.resume({resumeData:resumeContext.resumeData,tracingContext:rest.modelSpanTracker?.getTracingContext(),label:toolCallId}):await run.start({inputData:initialData,tracingContext:rest.modelSpanTracker?.getTracingContext(),runtimeContext});if(executionResult.status!=="success"){controller.close();return;}if(executionResult.result.stepResult?.reason==="abort"){controller.close();return;}controller.enqueue({type:"finish",runId,from:"AGENT"/* AGENT */,payload:{...executionResult.result,stepResult:{...executionResult.result.stepResult,// @ts-ignore we add 'abort' for tripwires so the type is not compatible
|
|
432
433
|
reason:executionResult.result.stepResult.reason}}});const msToFinish=(_internal?.now?.()??Date.now())-startTimestamp;modelStreamSpan.addEvent("ai.stream.finish");modelStreamSpan.setAttributes({"stream.response.msToFinish":msToFinish,"stream.response.avgOutputTokensPerSecond":1e3*(executionResult?.result?.output?.usage?.outputTokens??0)/msToFinish});controller.close();}});}// src/loop/loop.ts
|
|
433
434
|
function loop({resumeContext,models,logger,runId,idGenerator,telemetry_settings,messageList,includeRawChunks,modelSettings,tools,_internal,mode="stream",outputProcessors,returnScorerData,requireToolApproval,agentId,...rest}){let loggerToUse=logger||new ConsoleLogger({level:"debug"});if(models.length===0||!models[0]){const mastraError=new MastraError({id:"LOOP_MODELS_EMPTY",domain:"LLM"/* LLM */,category:"USER"/* USER */});loggerToUse.trackException(mastraError);loggerToUse.error(mastraError.toString());throw mastraError;}const firstModel=models[0];let runIdToUse=runId;if(!runIdToUse){runIdToUse=idGenerator?.()||crypto.randomUUID();}const internalToUse={now:_internal?.now||(()=>Date.now()),generateId:_internal?.generateId||(()=>generateId()),currentDate:_internal?.currentDate||(()=>/* @__PURE__ */new Date())};let startTimestamp=internalToUse.now?.();const{rootSpan}=getRootSpan({operationId:mode==="stream"?`mastra.stream`:`mastra.generate`,model:{modelId:firstModel.model.modelId,provider:firstModel.model.provider},modelSettings,headers:modelSettings?.headers??rest.headers,telemetry_settings});rootSpan.setAttributes({...(telemetry_settings?.recordOutputs!==false?{"stream.prompt.messages":JSON.stringify(messageList.get.input.aiV5.model())}:{})});const{rootSpan:modelStreamSpan}=getRootSpan({operationId:`mastra.${mode}.aisdk.doStream`,model:{modelId:firstModel.model.modelId,provider:firstModel.model.provider},modelSettings,headers:modelSettings?.headers??rest.headers,telemetry_settings});const messageId=rest.experimental_generateMessageId?.()||internalToUse.generateId?.();let modelOutput;const serializeStreamState=()=>{return modelOutput?.serializeState();};const deserializeStreamState=state=>{modelOutput?.deserializeState(state);};const processorStates=outputProcessors&&outputProcessors.length>0?/* @__PURE__ */new Map():void 0;const workflowLoopProps={resumeContext,models,runId:runIdToUse,logger:loggerToUse,startTimestamp,messageList,includeRawChunks:!!includeRawChunks,_internal:internalToUse,tools,modelStreamSpan,telemetry_settings,modelSettings,outputProcessors,messageId,agentId,requireToolApproval,streamState:{serialize:serializeStreamState,deserialize:deserializeStreamState},processorStates,...rest};const existingSnapshot=resumeContext?.snapshot;let initialStreamState;if(existingSnapshot){for(const key in existingSnapshot?.context){const step=existingSnapshot?.context[key];if(step&&step.status==="suspended"&&step.suspendPayload?.__streamState){initialStreamState=step.suspendPayload?.__streamState;break;}}}const baseStream=workflowLoopStream(workflowLoopProps);const stream=rest.modelSpanTracker?.wrapStream(baseStream)??baseStream;modelOutput=new MastraModelOutput({model:{modelId:firstModel.model.modelId,provider:firstModel.model.provider,version:firstModel.model.specificationVersion},stream,messageList,messageId,options:{runId:runIdToUse,telemetry_settings,rootSpan,toolCallStreaming:rest.toolCallStreaming,onFinish:rest.options?.onFinish,onStepFinish:rest.options?.onStepFinish,includeRawChunks:!!includeRawChunks,structuredOutput:rest.structuredOutput,outputProcessors,returnScorerData,tracingContext:rest.modelSpanTracker?.getTracingContext()},initialState:initialStreamState});return createDestructurableOutput(modelOutput);}// src/llm/model/model.loop.ts
|
|
434
|
-
var MastraLLMVNext=class extends MastraBase{#models;#mastra;#options;#firstModel;constructor({mastra,models,options}){super({name:"aisdk"});this.#options=options;if(mastra){this.#mastra=mastra;if(mastra.getLogger()){this.__setLogger(this.#mastra.getLogger());}}if(models.length===0||!models[0]){const mastraError=new MastraError({id:"LLM_LOOP_MODELS_EMPTY",domain:"LLM"/* LLM */,category:"USER"/* USER */});this.logger.trackException(mastraError);this.logger.error(mastraError.toString());throw mastraError;}else {this.#models=models;this.#firstModel=models[0];}}__registerPrimitives(p){if(p.telemetry){this.__setTelemetry(p.telemetry);}if(p.logger){this.__setLogger(p.logger);}}__registerMastra(p){this.#mastra=p;}getProvider(){return this.#firstModel.model.provider;}getModelId(){return this.#firstModel.model.modelId;}getModel(){return this.#firstModel.model;}_applySchemaCompat(schema){const model=this.#firstModel.model;const schemaCompatLayers=[];if(model){const modelInfo={modelId:model.modelId,supportsStructuredOutputs:true,provider:model.provider};schemaCompatLayers.push(new OpenAIReasoningSchemaCompatLayer(modelInfo),new OpenAISchemaCompatLayer(modelInfo),new GoogleSchemaCompatLayer(modelInfo),new AnthropicSchemaCompatLayer(modelInfo),new DeepSeekSchemaCompatLayer(modelInfo),new MetaSchemaCompatLayer(modelInfo));}return applyCompatLayer({schema,compatLayers:schemaCompatLayers,mode:"aiSdkSchema"});}convertToMessages(messages){if(Array.isArray(messages)){return messages.map(m=>{if(typeof m==="string"){return {role:"user",content:m};}return m;});}return [{role:"user",content:messages}];}stream({resumeContext,runId,stopWhen=stepCountIs(5),maxSteps,tools={},modelSettings,toolChoice="auto",telemetry_settings,threadId,resourceId,structuredOutput,options,outputProcessors,returnScorerData,providerOptions,tracingContext,messageList,requireToolApproval,_internal,agentId,toolCallId}){let stopWhenToUse;if(maxSteps&&typeof maxSteps==="number"){stopWhenToUse=stepCountIs(maxSteps);}else {stopWhenToUse=stopWhen;}const messages=messageList.get.all.aiV5.model();const firstModel=this.#firstModel.model;this.logger.debug(`[LLM] - Streaming text`,{runId,threadId,resourceId,messages,tools:Object.keys(tools||{})});const llmAISpan=tracingContext?.currentSpan?.createChildSpan({name:`llm: '${firstModel.modelId}'`,type:"model_generation"/* MODEL_GENERATION */,input:{messages:[...messageList.getSystemMessages(),...messages]},attributes:{model:firstModel.modelId,provider:firstModel.provider,streaming:true,parameters:modelSettings},metadata:{runId,threadId,resourceId},tracingPolicy:this.#options?.tracingPolicy});const modelSpanTracker=new ModelSpanTracker(llmAISpan);try{const loopOptions={mastra:this.#mastra,resumeContext,runId,toolCallId,messageList,models:this.#models,tools,stopWhen:stopWhenToUse,toolChoice,modelSettings,providerOptions,telemetry_settings:{...this.experimental_telemetry,...telemetry_settings},_internal,structuredOutput,outputProcessors,returnScorerData,modelSpanTracker,requireToolApproval,agentId,options:{...options,onStepFinish:async props=>{try{await options?.onStepFinish?.({...props,runId});}catch(e){const mastraError=new MastraError({id:"LLM_STREAM_ON_STEP_FINISH_CALLBACK_EXECUTION_FAILED",domain:"LLM"/* LLM */,category:"USER"/* USER */,details:{modelId:props.model?.modelId,modelProvider:props.model?.provider,runId:runId??"unknown",threadId:threadId??"unknown",resourceId:resourceId??"unknown",finishReason:props?.finishReason,toolCalls:props?.toolCalls?JSON.stringify(props.toolCalls):"",toolResults:props?.toolResults?JSON.stringify(props.toolResults):"",usage:props?.usage?JSON.stringify(props.usage):""}},e);modelSpanTracker?.reportGenerationError({error:mastraError});this.logger.trackException(mastraError);throw mastraError;}this.logger.debug("[LLM] - Stream Step Change:",{text:props?.text,toolCalls:props?.toolCalls,toolResults:props?.toolResults,finishReason:props?.finishReason,usage:props?.usage,runId});if(props?.response?.headers?.["x-ratelimit-remaining-tokens"]&&parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"],10)<2e3){this.logger.warn("Rate limit approaching, waiting 10 seconds",{runId});await delay(10*1e3);}},onFinish:async props=>{modelSpanTracker?.endGeneration({output:{files:props?.files,object:props?.object,reasoning:props?.reasoning,reasoningText:props?.reasoningText,sources:props?.sources,text:props?.text,warnings:props?.warnings},attributes:{finishReason:props?.finishReason,usage:{inputTokens:props?.totalUsage?.inputTokens,outputTokens:props?.totalUsage?.outputTokens,totalTokens:props?.totalUsage?.totalTokens,reasoningTokens:props?.totalUsage?.reasoningTokens,cachedInputTokens:props?.totalUsage?.cachedInputTokens}}});try{await options?.onFinish?.({...props,runId});}catch(e){const mastraError=new MastraError({id:"LLM_STREAM_ON_FINISH_CALLBACK_EXECUTION_FAILED",domain:"LLM"/* LLM */,category:"USER"/* USER */,details:{modelId:props.model?.modelId,modelProvider:props.model?.provider,runId:runId??"unknown",threadId:threadId??"unknown",resourceId:resourceId??"unknown",finishReason:props?.finishReason,toolCalls:props?.toolCalls?JSON.stringify(props.toolCalls):"",toolResults:props?.toolResults?JSON.stringify(props.toolResults):"",usage:props?.usage?JSON.stringify(props.usage):""}},e);modelSpanTracker?.reportGenerationError({error:mastraError});this.logger.trackException(mastraError);throw mastraError;}this.logger.debug("[LLM] - Stream Finished:",{text:props?.text,toolCalls:props?.toolCalls,toolResults:props?.toolResults,finishReason:props?.finishReason,usage:props?.usage,runId,threadId,resourceId});}}};return loop(loopOptions);}catch(e){const mastraError=new MastraError({id:"LLM_STREAM_TEXT_AI_SDK_EXECUTION_FAILED",domain:"LLM"/* LLM */,category:"THIRD_PARTY"/* THIRD_PARTY */,details:{modelId:firstModel.modelId,modelProvider:firstModel.provider,runId:runId??"unknown",threadId:threadId??"unknown",resourceId:resourceId??"unknown"}},e);modelSpanTracker?.reportGenerationError({error:mastraError});throw mastraError;}}};// src/loop/network/index.ts
|
|
435
|
+
var MastraLLMVNext=class extends MastraBase{#models;#mastra;#options;#firstModel;constructor({mastra,models,options}){super({name:"aisdk"});this.#options=options;if(mastra){this.#mastra=mastra;if(mastra.getLogger()){this.__setLogger(this.#mastra.getLogger());}}if(models.length===0||!models[0]){const mastraError=new MastraError({id:"LLM_LOOP_MODELS_EMPTY",domain:"LLM"/* LLM */,category:"USER"/* USER */});this.logger.trackException(mastraError);this.logger.error(mastraError.toString());throw mastraError;}else {this.#models=models;this.#firstModel=models[0];}}__registerPrimitives(p){if(p.telemetry){this.__setTelemetry(p.telemetry);}if(p.logger){this.__setLogger(p.logger);}}__registerMastra(p){this.#mastra=p;}getProvider(){return this.#firstModel.model.provider;}getModelId(){return this.#firstModel.model.modelId;}getModel(){return this.#firstModel.model;}_applySchemaCompat(schema){const model=this.#firstModel.model;const schemaCompatLayers=[];if(model){const modelInfo={modelId:model.modelId,supportsStructuredOutputs:true,provider:model.provider};schemaCompatLayers.push(new OpenAIReasoningSchemaCompatLayer(modelInfo),new OpenAISchemaCompatLayer(modelInfo),new GoogleSchemaCompatLayer(modelInfo),new AnthropicSchemaCompatLayer(modelInfo),new DeepSeekSchemaCompatLayer(modelInfo),new MetaSchemaCompatLayer(modelInfo));}return applyCompatLayer({schema,compatLayers:schemaCompatLayers,mode:"aiSdkSchema"});}convertToMessages(messages){if(Array.isArray(messages)){return messages.map(m=>{if(typeof m==="string"){return {role:"user",content:m};}return m;});}return [{role:"user",content:messages}];}stream({resumeContext,runId,stopWhen=stepCountIs(5),maxSteps,tools={},modelSettings,toolChoice="auto",telemetry_settings,threadId,resourceId,structuredOutput,options,outputProcessors,returnScorerData,providerOptions,tracingContext,messageList,requireToolApproval,_internal,agentId,toolCallId,methodType}){let stopWhenToUse;if(maxSteps&&typeof maxSteps==="number"){stopWhenToUse=stepCountIs(maxSteps);}else {stopWhenToUse=stopWhen;}const messages=messageList.get.all.aiV5.model();const firstModel=this.#firstModel.model;this.logger.debug(`[LLM] - Streaming text`,{runId,threadId,resourceId,messages,tools:Object.keys(tools||{})});const llmAISpan=tracingContext?.currentSpan?.createChildSpan({name:`llm: '${firstModel.modelId}'`,type:"model_generation"/* MODEL_GENERATION */,input:{messages:[...messageList.getSystemMessages(),...messages]},attributes:{model:firstModel.modelId,provider:firstModel.provider,streaming:true,parameters:modelSettings},metadata:{runId,threadId,resourceId},tracingPolicy:this.#options?.tracingPolicy});const modelSpanTracker=new ModelSpanTracker(llmAISpan);try{const loopOptions={mastra:this.#mastra,resumeContext,runId,toolCallId,messageList,models:this.#models,tools,stopWhen:stopWhenToUse,toolChoice,modelSettings,providerOptions,telemetry_settings:{...this.experimental_telemetry,...telemetry_settings},_internal,structuredOutput,outputProcessors,returnScorerData,modelSpanTracker,requireToolApproval,agentId,methodType,options:{...options,onStepFinish:async props=>{try{await options?.onStepFinish?.({...props,runId});}catch(e){const mastraError=new MastraError({id:"LLM_STREAM_ON_STEP_FINISH_CALLBACK_EXECUTION_FAILED",domain:"LLM"/* LLM */,category:"USER"/* USER */,details:{modelId:props.model?.modelId,modelProvider:props.model?.provider,runId:runId??"unknown",threadId:threadId??"unknown",resourceId:resourceId??"unknown",finishReason:props?.finishReason,toolCalls:props?.toolCalls?JSON.stringify(props.toolCalls):"",toolResults:props?.toolResults?JSON.stringify(props.toolResults):"",usage:props?.usage?JSON.stringify(props.usage):""}},e);modelSpanTracker?.reportGenerationError({error:mastraError});this.logger.trackException(mastraError);throw mastraError;}this.logger.debug("[LLM] - Stream Step Change:",{text:props?.text,toolCalls:props?.toolCalls,toolResults:props?.toolResults,finishReason:props?.finishReason,usage:props?.usage,runId});if(props?.response?.headers?.["x-ratelimit-remaining-tokens"]&&parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"],10)<2e3){this.logger.warn("Rate limit approaching, waiting 10 seconds",{runId});await delay(10*1e3);}},onFinish:async props=>{modelSpanTracker?.endGeneration({output:{files:props?.files,object:props?.object,reasoning:props?.reasoning,reasoningText:props?.reasoningText,sources:props?.sources,text:props?.text,warnings:props?.warnings},attributes:{finishReason:props?.finishReason,usage:{inputTokens:props?.totalUsage?.inputTokens,outputTokens:props?.totalUsage?.outputTokens,totalTokens:props?.totalUsage?.totalTokens,reasoningTokens:props?.totalUsage?.reasoningTokens,cachedInputTokens:props?.totalUsage?.cachedInputTokens}}});try{await options?.onFinish?.({...props,runId});}catch(e){const mastraError=new MastraError({id:"LLM_STREAM_ON_FINISH_CALLBACK_EXECUTION_FAILED",domain:"LLM"/* LLM */,category:"USER"/* USER */,details:{modelId:props.model?.modelId,modelProvider:props.model?.provider,runId:runId??"unknown",threadId:threadId??"unknown",resourceId:resourceId??"unknown",finishReason:props?.finishReason,toolCalls:props?.toolCalls?JSON.stringify(props.toolCalls):"",toolResults:props?.toolResults?JSON.stringify(props.toolResults):"",usage:props?.usage?JSON.stringify(props.usage):""}},e);modelSpanTracker?.reportGenerationError({error:mastraError});this.logger.trackException(mastraError);throw mastraError;}this.logger.debug("[LLM] - Stream Finished:",{text:props?.text,toolCalls:props?.toolCalls,toolResults:props?.toolResults,finishReason:props?.finishReason,usage:props?.usage,runId,threadId,resourceId});}}};return loop(loopOptions);}catch(e){const mastraError=new MastraError({id:"LLM_STREAM_TEXT_AI_SDK_EXECUTION_FAILED",domain:"LLM"/* LLM */,category:"THIRD_PARTY"/* THIRD_PARTY */,details:{modelId:firstModel.modelId,modelProvider:firstModel.provider,runId:runId??"unknown",threadId:threadId??"unknown",resourceId:resourceId??"unknown"}},e);modelSpanTracker?.reportGenerationError({error:mastraError});throw mastraError;}}};// src/loop/network/index.ts
|
|
435
436
|
var MastraAgentNetworkStream=class extends ReadableStream$1{#usageCount={inputTokens:0,outputTokens:0,totalTokens:0};#streamPromise;#run;constructor({createStream,run}){const deferredPromise={promise:null,resolve:null,reject:null};deferredPromise.promise=new Promise((resolve,reject)=>{deferredPromise.resolve=resolve;deferredPromise.reject=reject;});const updateUsageCount=usage=>{this.#usageCount.inputTokens+=parseInt(usage?.inputTokens?.toString()??"0",10);this.#usageCount.outputTokens+=parseInt(usage?.outputTokens?.toString()??"0",10);this.#usageCount.totalTokens+=parseInt(usage?.totalTokens?.toString()??"0",10);};super({start:async controller=>{const writer=new WritableStream({write:chunk=>{if(chunk.type==="step-output"&&chunk.payload?.output?.from==="AGENT"&&chunk.payload?.output?.type==="finish"||chunk.type==="step-output"&&chunk.payload?.output?.from==="WORKFLOW"&&chunk.payload?.output?.type==="finish"){const output=chunk.payload?.output;if(output&&"payload"in output&&output.payload){const finishPayload=output.payload;if("usage"in finishPayload&&finishPayload.usage){updateUsageCount(finishPayload.usage);}}}controller.enqueue(chunk);}});const stream=await createStream(writer);const getInnerChunk=chunk=>{if(chunk.type==="workflow-step-output"){return getInnerChunk(chunk.payload.output);}return chunk;};for await(const chunk of stream){if(chunk.type==="workflow-step-output"){const innerChunk=getInnerChunk(chunk);if(innerChunk.type==="routing-agent-end"||innerChunk.type==="agent-execution-end"||innerChunk.type==="workflow-execution-end"){if(innerChunk.payload?.usage){updateUsageCount(innerChunk.payload.usage);}}if(innerChunk.type==="network-execution-event-step-finish"){const finishPayload={...innerChunk.payload,usage:this.#usageCount};controller.enqueue({...innerChunk,payload:finishPayload});}else {controller.enqueue(innerChunk);}}}controller.close();deferredPromise.resolve();}});this.#run=run;this.#streamPromise=deferredPromise;}get status(){return this.#streamPromise.promise.then(()=>this.#run._getExecutionResults()).then(res=>res.status);}get result(){return this.#streamPromise.promise.then(()=>this.#run._getExecutionResults());}get usage(){return this.#streamPromise.promise.then(()=>this.#usageCount);}};// src/loop/types.ts
|
|
436
437
|
var PRIMITIVE_TYPES=z9.enum(["agent","workflow","none","tool"]);// src/loop/network/index.ts
|
|
437
438
|
async function getRoutingAgent({runtimeContext,agent}){const instructionsToUse=await agent.getInstructions({runtimeContext});const agentsToUse=await agent.listAgents({runtimeContext});const workflowsToUse=await agent.getWorkflows({runtimeContext});const toolsToUse=await agent.getTools({runtimeContext});const model=await agent.getModel({runtimeContext});const memoryToUse=await agent.getMemory({runtimeContext});const agentList=Object.entries(agentsToUse).map(([name,agent2])=>{return ` - **${name}**: ${agent2.getDescription()}`;}).join("\n");const workflowList=Object.entries(workflowsToUse).map(([name,workflow])=>{return ` - **${name}**: ${workflow.description}, input schema: ${JSON.stringify(zodToJsonSchema(workflow.inputSchema))}`;}).join("\n");const memoryTools=await memoryToUse?.getTools?.();const toolList=Object.entries({...toolsToUse,...memoryTools}).map(([name,tool])=>{return ` - **${name}**: ${tool.description}, input schema: ${JSON.stringify(zodToJsonSchema(tool.inputSchema||z9.object({})))}`;}).join("\n");const instructions=`
|
|
@@ -599,6 +600,7 @@ var SaveQueueManager=class _SaveQueueManager{logger;debounceMs;memory;static MAX
|
|
|
599
600
|
* @param threadId - The ID of the thread whose messages are being saved.
|
|
600
601
|
* @param memoryConfig - Optional memory configuration for saving.
|
|
601
602
|
*/async flushMessages(messageList,threadId,memoryConfig){if(!threadId)return;this.clearDebounce(threadId);return this.enqueueSave(threadId,messageList,memoryConfig);}};// src/agent/workflows/prepare-stream/index.ts
|
|
603
|
+
function getModelMethodFromAgentMethod(methodType){if(methodType==="generate"||methodType==="generateLegacy"){return "generate";}else if(methodType==="stream"||methodType==="streamLegacy"){return "stream";}else {throw new MastraError({id:"INVALID_METHOD_TYPE",domain:"AGENT"/* AGENT */,category:"USER"/* USER */});}}// src/processors/processors/structured-output.ts
|
|
602
604
|
var STRUCTURED_OUTPUT_PROCESSOR_NAME="structured-output";var StructuredOutputProcessor=class{name=STRUCTURED_OUTPUT_PROCESSOR_NAME;schema;structuringAgent;errorStrategy;fallbackValue;isStructuringAgentStreamStarted=false;jsonPromptInjection;constructor(options){if(!options.schema){throw new MastraError({id:"STRUCTURED_OUTPUT_PROCESSOR_SCHEMA_REQUIRED",domain:"AGENT"/* AGENT */,category:"USER"/* USER */,text:"StructuredOutputProcessor requires a schema to be provided"});}if(!options.model){throw new MastraError({id:"STRUCTURED_OUTPUT_PROCESSOR_MODEL_REQUIRED",domain:"AGENT"/* AGENT */,category:"USER"/* USER */,text:"StructuredOutputProcessor requires a model to be provided either in options or as fallback"});}this.schema=options.schema;this.errorStrategy=options.errorStrategy??"strict";this.fallbackValue=options.fallbackValue;this.jsonPromptInjection=options.jsonPromptInjection;this.structuringAgent=new Agent({name:"structured-output-structurer",instructions:options.instructions||this.generateInstructions(),model:options.model});}async processOutputStream(args){const{part,state,streamParts,abort,tracingContext}=args;const controller=state.controller;switch(part.type){case "finish":await this.processAndEmitStructuredOutput(streamParts,controller,abort,tracingContext);return part;default:return part;}}async processAndEmitStructuredOutput(streamParts,controller,abort,tracingContext){if(this.isStructuringAgentStreamStarted)return;this.isStructuringAgentStreamStarted=true;try{const structuringPrompt=this.buildStructuringPrompt(streamParts);const prompt=`Extract and structure the key information from the following text according to the specified schema. Keep the original meaning and details:
|
|
603
605
|
|
|
604
606
|
${structuringPrompt}`;const structuringAgentStream=await this.structuringAgent.stream(prompt,{structuredOutput:{schema:this.schema,jsonPromptInjection:this.jsonPromptInjection},tracingContext});const excludedChunkTypes=["start","finish","text-start","text-delta","text-end","step-start","step-finish"];for await(const chunk of structuringAgentStream.fullStream){if(excludedChunkTypes.includes(chunk.type)||chunk.type.startsWith("data-")){continue;}if(chunk.type==="error"){this.handleError("Structuring failed","Internal agent did not generate structured output",abort);if(this.errorStrategy==="warn"){break;}if(this.errorStrategy==="fallback"&&this.fallbackValue!==void 0){const fallbackChunk={runId:chunk.runId,from:"AGENT"/* AGENT */,type:"object-result",object:this.fallbackValue,metadata:{from:"structured-output",fallback:true}};controller.enqueue(fallbackChunk);break;}}const newChunk={...chunk,metadata:{from:"structured-output"}};controller.enqueue(newChunk);}}catch(error){this.handleError("Structured output processing failed",error instanceof Error?error.message:"Unknown error",abort);}}/**
|
|
@@ -670,7 +672,7 @@ Look for:
|
|
|
670
672
|
${this.customPatterns.length>0?`Additional custom patterns to detect: ${this.customPatterns.join(", ")}`:""}
|
|
671
673
|
|
|
672
674
|
Be thorough but avoid false positives. Only flag content that genuinely represents a security risk.`;}};// src/agent/workflows/prepare-stream/map-results-step.ts
|
|
673
|
-
function createMapResultsStep({capabilities,options,resourceId,runId,runtimeContext,memory,memoryConfig,saveQueueManager,agentAISpan,instructions,agentId}){return async({inputData,bail,tracingContext})=>{const toolsData=inputData["prepare-tools-step"];const memoryData=inputData["prepare-memory-step"];const result={...options,tools:toolsData.convertedTools,toolChoice:options.toolChoice,thread:memoryData.thread,threadId:memoryData.thread?.id,resourceId,runtimeContext,onStepFinish:async props=>{if(options.savePerStep){if(!memoryData.threadExists&&memory&&memoryData.thread){await memory.createThread({threadId:memoryData.thread?.id,title:memoryData.thread?.title,metadata:memoryData.thread?.metadata,resourceId:memoryData.thread?.resourceId,memoryConfig});memoryData.threadExists=true;}await capabilities.saveStepMessages({saveQueueManager,result:props,messageList:memoryData.messageList,threadId:memoryData.thread?.id,memoryConfig,runId});}return options.onStepFinish?.({...props,runId});},...(memoryData.tripwire&&{tripwire:memoryData.tripwire,tripwireReason:memoryData.tripwireReason})};if(result.tripwire){const agentModel=await capabilities.getModel({runtimeContext:result.runtimeContext});const modelOutput=await getModelOutputForTripwire({tripwireReason:result.tripwireReason,runId,tracingContext,options,model:agentModel,messageList:memoryData.messageList});return bail(modelOutput);}let effectiveOutputProcessors=options.outputProcessors||(capabilities.outputProcessors?typeof capabilities.outputProcessors==="function"?await capabilities.outputProcessors({runtimeContext:result.runtimeContext}):capabilities.outputProcessors:[]);if(options.structuredOutput?.model){const structuredProcessor=new StructuredOutputProcessor(options.structuredOutput);effectiveOutputProcessors=effectiveOutputProcessors?[...effectiveOutputProcessors,structuredProcessor]:[structuredProcessor];}const messageList=memoryData.messageList;const loopOptions={agentId,runtimeContext:result.runtimeContext,tracingContext:{currentSpan:agentAISpan},runId,toolChoice:result.toolChoice,tools:result.tools,resourceId:result.resourceId,threadId:result.threadId,stopWhen:result.stopWhen,maxSteps:result.maxSteps,providerOptions:result.providerOptions,options:{...(options.prepareStep&&{prepareStep:options.prepareStep}),onFinish:async payload=>{if(payload.finishReason==="error"){capabilities.logger.error("Error in agent stream",{error:payload.error,runId});return;}try{const outputText=messageList.get.all.core().map(m=>m.content).join("\n");await capabilities.executeOnFinish({result:payload,outputText,instructions,thread:result.thread,threadId:result.threadId,readOnlyMemory:options.memory?.readOnly,resourceId,memoryConfig,runtimeContext,agentAISpan,runId,messageList,threadExists:memoryData.threadExists,structuredOutput:!!options.structuredOutput?.schema,saveQueueManager,overrideScorers:options.scorers});}catch(e){capabilities.logger.error("Error saving memory on finish",{error:e,runId});}await options?.onFinish?.({...payload,runId,messages:messageList.get.response.aiV5.model(),usage:payload.usage,totalUsage:payload.totalUsage});},onStepFinish:result.onStepFinish,onChunk:options.onChunk,onError:options.onError,onAbort:options.onAbort,activeTools:options.activeTools,abortSignal:options.abortSignal},structuredOutput:options.structuredOutput,outputProcessors:effectiveOutputProcessors,modelSettings:{temperature:0,...(options.modelSettings||{})},messageList:memoryData.messageList};return loopOptions;};}// src/agent/workflows/prepare-stream/prepare-memory-step.ts
|
|
675
|
+
function createMapResultsStep({capabilities,options,resourceId,runId,runtimeContext,memory,memoryConfig,saveQueueManager,agentAISpan,instructions,agentId,methodType}){return async({inputData,bail,tracingContext})=>{const toolsData=inputData["prepare-tools-step"];const memoryData=inputData["prepare-memory-step"];const result={...options,tools:toolsData.convertedTools,toolChoice:options.toolChoice,thread:memoryData.thread,threadId:memoryData.thread?.id,resourceId,runtimeContext,onStepFinish:async props=>{if(options.savePerStep){if(!memoryData.threadExists&&memory&&memoryData.thread){await memory.createThread({threadId:memoryData.thread?.id,title:memoryData.thread?.title,metadata:memoryData.thread?.metadata,resourceId:memoryData.thread?.resourceId,memoryConfig});memoryData.threadExists=true;}await capabilities.saveStepMessages({saveQueueManager,result:props,messageList:memoryData.messageList,threadId:memoryData.thread?.id,memoryConfig,runId});}return options.onStepFinish?.({...props,runId});},...(memoryData.tripwire&&{tripwire:memoryData.tripwire,tripwireReason:memoryData.tripwireReason})};if(result.tripwire){const agentModel=await capabilities.getModel({runtimeContext:result.runtimeContext});const modelOutput=await getModelOutputForTripwire({tripwireReason:result.tripwireReason,runId,tracingContext,options,model:agentModel,messageList:memoryData.messageList});return bail(modelOutput);}let effectiveOutputProcessors=options.outputProcessors||(capabilities.outputProcessors?typeof capabilities.outputProcessors==="function"?await capabilities.outputProcessors({runtimeContext:result.runtimeContext}):capabilities.outputProcessors:[]);if(options.structuredOutput?.model){const structuredProcessor=new StructuredOutputProcessor(options.structuredOutput);effectiveOutputProcessors=effectiveOutputProcessors?[...effectiveOutputProcessors,structuredProcessor]:[structuredProcessor];}const messageList=memoryData.messageList;const modelMethodType=getModelMethodFromAgentMethod(methodType);const loopOptions={methodType:modelMethodType,agentId,runtimeContext:result.runtimeContext,tracingContext:{currentSpan:agentAISpan},runId,toolChoice:result.toolChoice,tools:result.tools,resourceId:result.resourceId,threadId:result.threadId,stopWhen:result.stopWhen,maxSteps:result.maxSteps,providerOptions:result.providerOptions,options:{...(options.prepareStep&&{prepareStep:options.prepareStep}),onFinish:async payload=>{if(payload.finishReason==="error"){capabilities.logger.error("Error in agent stream",{error:payload.error,runId});return;}try{const outputText=messageList.get.all.core().map(m=>m.content).join("\n");await capabilities.executeOnFinish({result:payload,outputText,instructions,thread:result.thread,threadId:result.threadId,readOnlyMemory:options.memory?.readOnly,resourceId,memoryConfig,runtimeContext,agentAISpan,runId,messageList,threadExists:memoryData.threadExists,structuredOutput:!!options.structuredOutput?.schema,saveQueueManager,overrideScorers:options.scorers});}catch(e){capabilities.logger.error("Error saving memory on finish",{error:e,runId});}await options?.onFinish?.({...payload,runId,messages:messageList.get.response.aiV5.model(),usage:payload.usage,totalUsage:payload.totalUsage});},onStepFinish:result.onStepFinish,onChunk:options.onChunk,onError:options.onError,onAbort:options.onAbort,activeTools:options.activeTools,abortSignal:options.abortSignal},structuredOutput:options.structuredOutput,outputProcessors:effectiveOutputProcessors,modelSettings:{temperature:0,...(options.modelSettings||{})},messageList:memoryData.messageList};return loopOptions;};}// src/agent/workflows/prepare-stream/prepare-memory-step.ts
|
|
674
676
|
var import_fast_deep_equal=__toESM(require_fast_deep_equal(),1);var coreToolSchema=z.object({id:z.string().optional(),description:z.string().optional(),parameters:z.union([z.record(z.string(),z.any()),// JSON Schema as object
|
|
675
677
|
z.any()// Zod schema or other schema types - validated at tool execution
|
|
676
678
|
]),outputSchema:z.union([z.record(z.string(),z.any()),z.any()]).optional(),execute:z.function(z.tuple([z.any(),z.any()]),z.promise(z.any())).optional(),type:z.union([z.literal("function"),z.literal("provider-defined"),z.undefined()]).optional(),args:z.record(z.string(),z.any()).optional()});var storageThreadSchema=z.object({id:z.string(),title:z.string().optional(),resourceId:z.string(),createdAt:z.date(),updatedAt:z.date(),metadata:z.record(z.string(),z.any()).optional()});var prepareToolsStepOutputSchema=z.object({convertedTools:z.record(z.string(),coreToolSchema)});var prepareMemoryStepOutputSchema=z.object({threadExists:z.boolean(),thread:storageThreadSchema.optional(),messageList:z.instanceof(MessageList),tripwire:z.boolean().optional(),tripwireReason:z.string().optional()});// src/agent/workflows/prepare-stream/prepare-memory-step.ts
|
|
@@ -685,9 +687,9 @@ the following messages are from ${ymd}
|
|
|
685
687
|
`)??void 0;const processedMemoryMessages=await memory.processMessages({messages:messageList.get.remembered.v1(),newMessages:messageList.get.input.v1(),systemMessage,memorySystemMessage:memorySystemMessage||void 0});const processedList=new MessageList({threadId:threadObject.id,resourceId,generateMessageId:capabilities.generateMessageId,// @ts-ignore Flag for agent network messages
|
|
686
688
|
_agentNetworkAppend:capabilities._agentNetworkAppend});addSystemMessage(processedList,instructions);processedList.addSystem(memorySystemMessage).addSystem(systemMessages).add(options.context||[],"context");addSystemMessage(processedList,options.system,"user-provided");processedList.add(processedMemoryMessages,"memory").add(messageList.get.input.v2(),"user");return {thread:threadObject,messageList:processedList,...(tripwireTriggered&&{tripwire:true,tripwireReason}),threadExists:!!existingThread};}});}// src/agent/workflows/prepare-stream/prepare-tools-step.ts
|
|
687
689
|
function createPrepareToolsStep({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,agentAISpan,methodType,memory}){return createStep({id:"prepare-tools-step",inputSchema:z.object({}),outputSchema:prepareToolsStepOutputSchema,execute:async()=>{const toolEnhancements=[options?.toolsets&&Object.keys(options?.toolsets||{}).length>0?`toolsets present (${Object.keys(options?.toolsets||{}).length} tools)`:void 0,memory&&resourceId?"memory and resourceId available":void 0].filter(Boolean).join(", ");capabilities.logger.debug(`[Agent:${capabilities.agentName}] - Enhancing tools: ${toolEnhancements}`,{runId,toolsets:options?.toolsets?Object.keys(options?.toolsets):void 0,clientTools:options?.clientTools?Object.keys(options?.clientTools):void 0,hasMemory:!!memory,hasResourceId:!!resourceId});const threadId=threadFromArgs?.id;const convertedTools=await capabilities.convertTools({toolsets:options?.toolsets,clientTools:options?.clientTools,threadId,resourceId,runId,runtimeContext,tracingContext:{currentSpan:agentAISpan},writableStream:options.writableStream,methodType});return {convertedTools};}});}// src/agent/workflows/prepare-stream/stream-step.ts
|
|
688
|
-
function createStreamStep({capabilities,runId,returnScorerData,format="mastra",requireToolApproval,resumeContext,agentId,toolCallId}){return createStep({id:"stream-text-step",inputSchema:z.any(),// tried to type this in various ways but it's too complex
|
|
689
|
-
outputSchema:z.union([z.instanceof(MastraModelOutput),z.instanceof(AISDKV5OutputStream)]),execute:async({inputData,tracingContext})=>{const validatedInputData=inputData;capabilities.logger.debug(`Starting agent ${capabilities.agentName} llm stream call`,{runId});const processors=validatedInputData.outputProcessors||(capabilities.outputProcessors?typeof capabilities.outputProcessors==="function"?await capabilities.outputProcessors({runtimeContext:validatedInputData.runtimeContext||new RuntimeContext()}):capabilities.outputProcessors:[]);const streamResult=capabilities.llm.stream({...validatedInputData,outputProcessors:processors,returnScorerData,tracingContext,requireToolApproval,resumeContext,_internal:{generateId:capabilities.generateMessageId},agentId,toolCallId});if(format==="aisdk"){return streamResult.aisdk.v5;}return streamResult;}});}// src/agent/workflows/prepare-stream/index.ts
|
|
690
|
-
function createPrepareStreamWorkflow({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,agentAISpan,methodType,format,instructions,memoryConfig,memory,saveQueueManager,returnScorerData,requireToolApproval,resumeContext,agentId,toolCallId}){const prepareToolsStep=createPrepareToolsStep({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,agentAISpan,methodType,memory});const prepareMemoryStep2=createPrepareMemoryStep({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,instructions,memoryConfig,memory});const streamStep=createStreamStep({capabilities,runId,returnScorerData,format,requireToolApproval,resumeContext,agentId,toolCallId});const mapResultsStep=createMapResultsStep({capabilities,options,resourceId,runId,runtimeContext,memory,memoryConfig,saveQueueManager,agentAISpan,instructions,agentId});return createWorkflow({id:"execution-workflow",inputSchema:z.object({}),outputSchema:z.union([z.instanceof(MastraModelOutput),z.instanceof(AISDKV5OutputStream)]),steps:[prepareToolsStep,prepareMemoryStep2,streamStep],options:{tracingPolicy:{internal:1/* WORKFLOW */}}}).parallel([prepareToolsStep,prepareMemoryStep2]).map(mapResultsStep).then(streamStep).commit();}// src/agent/agent.ts
|
|
690
|
+
function createStreamStep({capabilities,runId,returnScorerData,format="mastra",requireToolApproval,resumeContext,agentId,toolCallId,methodType}){return createStep({id:"stream-text-step",inputSchema:z.any(),// tried to type this in various ways but it's too complex
|
|
691
|
+
outputSchema:z.union([z.instanceof(MastraModelOutput),z.instanceof(AISDKV5OutputStream)]),execute:async({inputData,tracingContext})=>{const validatedInputData=inputData;capabilities.logger.debug(`Starting agent ${capabilities.agentName} llm stream call`,{runId});const processors=validatedInputData.outputProcessors||(capabilities.outputProcessors?typeof capabilities.outputProcessors==="function"?await capabilities.outputProcessors({runtimeContext:validatedInputData.runtimeContext||new RuntimeContext()}):capabilities.outputProcessors:[]);const modelMethodType=getModelMethodFromAgentMethod(methodType);const streamResult=capabilities.llm.stream({...validatedInputData,outputProcessors:processors,returnScorerData,tracingContext,requireToolApproval,resumeContext,_internal:{generateId:capabilities.generateMessageId},agentId,toolCallId,methodType:modelMethodType});if(format==="aisdk"){return streamResult.aisdk.v5;}return streamResult;}});}// src/agent/workflows/prepare-stream/index.ts
|
|
692
|
+
function createPrepareStreamWorkflow({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,agentAISpan,methodType,format,instructions,memoryConfig,memory,saveQueueManager,returnScorerData,requireToolApproval,resumeContext,agentId,toolCallId}){const prepareToolsStep=createPrepareToolsStep({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,agentAISpan,methodType,memory});const prepareMemoryStep2=createPrepareMemoryStep({capabilities,options,threadFromArgs,resourceId,runId,runtimeContext,instructions,memoryConfig,memory});const streamStep=createStreamStep({capabilities,runId,returnScorerData,format,requireToolApproval,resumeContext,agentId,toolCallId,methodType});const mapResultsStep=createMapResultsStep({capabilities,options,resourceId,runId,runtimeContext,memory,memoryConfig,saveQueueManager,agentAISpan,instructions,agentId,methodType});return createWorkflow({id:"execution-workflow",inputSchema:z.object({}),outputSchema:z.union([z.instanceof(MastraModelOutput),z.instanceof(AISDKV5OutputStream)]),steps:[prepareToolsStep,prepareMemoryStep2,streamStep],options:{tracingPolicy:{internal:1/* WORKFLOW */}}}).parallel([prepareToolsStep,prepareMemoryStep2]).map(mapResultsStep).then(streamStep).commit();}// src/agent/agent.ts
|
|
691
693
|
function resolveMaybePromise(value,cb){if(value instanceof Promise||value!=null&&typeof value.then==="function"){return Promise.resolve(value).then(cb);}return cb(value);}function resolveThreadIdFromArgs(args){if(args?.memory?.thread){if(typeof args.memory.thread==="string")return {id:args.memory.thread};if(typeof args.memory.thread==="object"&&args.memory.thread.id)return args.memory.thread;}if(args?.threadId)return {id:args.threadId};return void 0;}var _Agent_decorators,_init,_a;_Agent_decorators=[InstrumentClass({prefix:"agent",excludeMethods:["hasOwnMemory","getMemory","__primitive","__registerMastra","__registerPrimitives","__runInputProcessors","__runOutputProcessors","_wrapToolsWithAITracing","getProcessorRunner","__setTools","__setLogger","__setTelemetry","log","listAgents","getModel","getInstructions","getTools","getLLM","getWorkflows","getDefaultGenerateOptions","getDefaultStreamOptions","getDescription","getScorers","getVoice"]})];var Agent=class extends(_a=MastraBase){id;name;#instructions;#description;model;#originalModel;maxRetries;#mastra;#memory;#workflows;#defaultGenerateOptions;#defaultStreamOptions;#defaultVNextStreamOptions;#tools;evals;#scorers;#agents;#voice;#inputProcessors;#outputProcessors;#options;// This flag is for agent network messages. We should change the agent network formatting and remove this flag after.
|
|
692
694
|
_agentNetworkAppend=false;/**
|
|
693
695
|
* Creates a new Agent instance with the specified configuration.
|
|
@@ -881,7 +883,7 @@ _agentNetworkAppend=false;/**
|
|
|
881
883
|
* Set the concrete tools for the agent
|
|
882
884
|
* @param tools
|
|
883
885
|
* @internal
|
|
884
|
-
*/__setTools(tools){this.#tools=tools;this.logger.debug(`[Agents:${this.name}] Tools set for agent ${this.name}`,{model:this.model,name:this.name});}async generateTitleFromUserMessage({message,runtimeContext=new RuntimeContext(),tracingContext,model,instructions}){const llm=await this.getLLM({runtimeContext,model});const normMessage=new MessageList().add(message,"user").get.all.ui().at(-1);if(!normMessage){throw new Error(`Could not generate title from input ${JSON.stringify(message)}`);}const partsToGen=[];for(const part of normMessage.parts){if(part.type===`text`){partsToGen.push(part);}else if(part.type===`source`){partsToGen.push({type:"text",text:`User added URL: ${part.source.url.substring(0,100)}`});}else if(part.type===`file`){partsToGen.push({type:"text",text:`User added ${part.mimeType} file: ${part.data.substring(0,100)}`});}}const systemInstructions=await this.resolveTitleInstructions(runtimeContext,instructions);let text="";if(llm.getModel().specificationVersion==="v2"){const messageList=new MessageList().add([{role:"system",content:systemInstructions}],"system").add([{role:"user",content:JSON.stringify(partsToGen)}],"input");const result=llm.stream({runtimeContext,tracingContext,messageList,agentId:this.id});text=await result.text;}else {const result=await llm.__text({runtimeContext,tracingContext,messages:[{role:"system",content:systemInstructions},{role:"user",content:JSON.stringify(partsToGen)}]});text=result.text;}const cleanedText=text.replace(/<think>[\s\S]*?<\/think>/g,"").trim();return cleanedText;}getMostRecentUserMessage(messages){const userMessages=messages.filter(message=>message.role==="user");return userMessages.at(-1);}async genTitle(userMessage,runtimeContext,tracingContext,model,instructions){try{if(userMessage){const normMessage=new MessageList().add(userMessage,"user").get.all.ui().at(-1);if(normMessage){return await this.generateTitleFromUserMessage({message:normMessage,runtimeContext,tracingContext,model,instructions});}}return `New Thread ${(/* @__PURE__ */new Date()).toISOString()}`;}catch(e){this.logger.error("Error generating title:",e);return void 0;}}__setMemory(memory){this.#memory=memory;}/* @deprecated use agent.getMemory() and query memory directly */async fetchMemory({threadId,thread:passedThread,memoryConfig,resourceId,runId,userMessages,systemMessage,messageList=new MessageList({threadId,resourceId}),runtimeContext=new RuntimeContext()}){const memory=await this.getMemory({runtimeContext});if(memory){const thread=passedThread??(await memory.getThreadById({threadId}));if(!thread){return {threadId:threadId||"",messages:userMessages||[]};}if(userMessages&&userMessages.length>0){messageList.add(userMessages,"memory");}if(systemMessage?.role==="system"){messageList.addSystem(systemMessage,"memory");}const[memoryMessages,memorySystemMessage]=threadId&&memory?await Promise.all([memory.rememberMessages({threadId,resourceId,config:memoryConfig,vectorMessageSearch:messageList.getLatestUserContent()||""}).then(r=>r.messagesV2),memory.getSystemMessage({threadId,memoryConfig})]):[[],null];this.logger.debug("Fetched messages from memory",{threadId,runId,fetchedCount:memoryMessages.length});if(memorySystemMessage){messageList.addSystem(memorySystemMessage,"memory");}messageList.add(memoryMessages,"memory");const systemMessages=messageList.getSystemMessages()?.map(m=>m.content)?.join(`
|
|
886
|
+
*/__setTools(tools){this.#tools=tools;this.logger.debug(`[Agents:${this.name}] Tools set for agent ${this.name}`,{model:this.model,name:this.name});}async generateTitleFromUserMessage({message,runtimeContext=new RuntimeContext(),tracingContext,model,instructions}){const llm=await this.getLLM({runtimeContext,model});const normMessage=new MessageList().add(message,"user").get.all.ui().at(-1);if(!normMessage){throw new Error(`Could not generate title from input ${JSON.stringify(message)}`);}const partsToGen=[];for(const part of normMessage.parts){if(part.type===`text`){partsToGen.push(part);}else if(part.type===`source`){partsToGen.push({type:"text",text:`User added URL: ${part.source.url.substring(0,100)}`});}else if(part.type===`file`){partsToGen.push({type:"text",text:`User added ${part.mimeType} file: ${part.data.substring(0,100)}`});}}const systemInstructions=await this.resolveTitleInstructions(runtimeContext,instructions);let text="";if(llm.getModel().specificationVersion==="v2"){const messageList=new MessageList().add([{role:"system",content:systemInstructions}],"system").add([{role:"user",content:JSON.stringify(partsToGen)}],"input");const result=llm.stream({methodType:"generate",runtimeContext,tracingContext,messageList,agentId:this.id});text=await result.text;}else {const result=await llm.__text({runtimeContext,tracingContext,messages:[{role:"system",content:systemInstructions},{role:"user",content:JSON.stringify(partsToGen)}]});text=result.text;}const cleanedText=text.replace(/<think>[\s\S]*?<\/think>/g,"").trim();return cleanedText;}getMostRecentUserMessage(messages){const userMessages=messages.filter(message=>message.role==="user");return userMessages.at(-1);}async genTitle(userMessage,runtimeContext,tracingContext,model,instructions){try{if(userMessage){const normMessage=new MessageList().add(userMessage,"user").get.all.ui().at(-1);if(normMessage){return await this.generateTitleFromUserMessage({message:normMessage,runtimeContext,tracingContext,model,instructions});}}return `New Thread ${(/* @__PURE__ */new Date()).toISOString()}`;}catch(e){this.logger.error("Error generating title:",e);return void 0;}}__setMemory(memory){this.#memory=memory;}/* @deprecated use agent.getMemory() and query memory directly */async fetchMemory({threadId,thread:passedThread,memoryConfig,resourceId,runId,userMessages,systemMessage,messageList=new MessageList({threadId,resourceId}),runtimeContext=new RuntimeContext()}){const memory=await this.getMemory({runtimeContext});if(memory){const thread=passedThread??(await memory.getThreadById({threadId}));if(!thread){return {threadId:threadId||"",messages:userMessages||[]};}if(userMessages&&userMessages.length>0){messageList.add(userMessages,"memory");}if(systemMessage?.role==="system"){messageList.addSystem(systemMessage,"memory");}const[memoryMessages,memorySystemMessage]=threadId&&memory?await Promise.all([memory.rememberMessages({threadId,resourceId,config:memoryConfig,vectorMessageSearch:messageList.getLatestUserContent()||""}).then(r=>r.messagesV2),memory.getSystemMessage({threadId,memoryConfig})]):[[],null];this.logger.debug("Fetched messages from memory",{threadId,runId,fetchedCount:memoryMessages.length});if(memorySystemMessage){messageList.addSystem(memorySystemMessage,"memory");}messageList.add(memoryMessages,"memory");const systemMessages=messageList.getSystemMessages()?.map(m=>m.content)?.join(`
|
|
885
887
|
`)??void 0;const newMessages=messageList.get.input.v1();const processedMemoryMessages=await memory.processMessages({// these will be processed
|
|
886
888
|
messages:messageList.get.remembered.v1(),// these are here for inspecting but shouldn't be returned by the processor
|
|
887
889
|
// - ex TokenLimiter needs to measure all tokens even though it's only processing remembered messages
|
|
@@ -965,7 +967,8 @@ _agentNetworkAppend:this._agentNetworkAppend}).add(result.response.messages,"res
|
|
|
965
967
|
*/async prepareLLMOptions(messages,options,methodType){const{context,memoryOptions:memoryConfigFromArgs,resourceId:resourceIdFromArgs,maxSteps,onStepFinish,toolsets,clientTools,temperature,toolChoice="auto",runtimeContext=new RuntimeContext(),tracingContext,tracingOptions,savePerStep,writableStream,...args}=options;const threadFromArgs=resolveThreadIdFromArgs({threadId:args.threadId,memory:args.memory});const resourceId=args.memory?.resource||resourceIdFromArgs;const memoryConfig=args.memory?.options||memoryConfigFromArgs;if(resourceId&&threadFromArgs&&!this.hasOwnMemory()){this.logger.warn(`[Agent:${this.name}] - No memory is configured but resourceId and threadId were passed in args. This will not work.`);}const runId=args.runId||this.#mastra?.generateId()||randomUUID();const instructions=args.instructions||(await this.getInstructions({runtimeContext}));const llm=await this.getLLM({runtimeContext});const activeSpan=Telemetry.getActiveSpan();const baggageEntries={};if(threadFromArgs?.id){if(activeSpan){activeSpan.setAttribute("threadId",threadFromArgs.id);}baggageEntries.threadId={value:threadFromArgs.id};}if(resourceId){if(activeSpan){activeSpan.setAttribute("resourceId",resourceId);}baggageEntries.resourceId={value:resourceId};}if(Object.keys(baggageEntries).length>0){Telemetry.setBaggage(baggageEntries);}const memory=await this.getMemory({runtimeContext});const saveQueueManager=new SaveQueueManager({logger:this.logger,memory});const{before,after}=this.__primitive({messages,instructions,context,thread:threadFromArgs,memoryConfig,resourceId,runId,toolsets,clientTools,runtimeContext,saveQueueManager,writableStream,methodType,tracingContext,tracingOptions});let messageList;let thread;let threadExists;return {llm,before:async()=>{const beforeResult=await before();const{messageObjects,convertedTools,agentAISpan}=beforeResult;threadExists=beforeResult.threadExists||false;messageList=beforeResult.messageList;thread=beforeResult.thread;const threadId=thread?.id;const result={...options,messages:messageObjects,tools:convertedTools,runId,temperature,toolChoice,threadId,resourceId,runtimeContext,onStepFinish:async props=>{if(savePerStep){if(!threadExists&&memory&&thread){await memory.createThread({threadId,title:thread.title,metadata:thread.metadata,resourceId:thread.resourceId,memoryConfig});threadExists=true;}await this.saveStepMessages({saveQueueManager,result:props,messageList,threadId,memoryConfig,runId});}return onStepFinish?.({...props,runId});},...(beforeResult.tripwire&&{tripwire:beforeResult.tripwire,tripwireReason:beforeResult.tripwireReason}),...args,agentAISpan};return result;},after:async({result,outputText,structuredOutput=false,agentAISpan,overrideScorers})=>{const afterResult=await after({result,outputText,threadId:thread?.id,thread,memoryConfig,runId,messageList,structuredOutput,threadExists,agentAISpan,overrideScorers});return afterResult;}};}/**
|
|
966
968
|
* Resolves and prepares model configurations for the LLM.
|
|
967
969
|
* @internal
|
|
968
|
-
*/async prepareModels(runtimeContext,model){if(model||!Array.isArray(this.model)){const modelToUse=model??this.model;const resolvedModel=typeof modelToUse==="function"?await modelToUse({runtimeContext,mastra:this.#mastra}):modelToUse;if(resolvedModel
|
|
970
|
+
*/async prepareModels(runtimeContext,model){if(model||!Array.isArray(this.model)){const modelToUse=model??this.model;const resolvedModel=typeof modelToUse==="function"?await modelToUse({runtimeContext,mastra:this.#mastra}):modelToUse;if(resolvedModel?.specificationVersion!=="v2"){const mastraError=new MastraError({id:"AGENT_PREPARE_MODELS_INCOMPATIBLE_WITH_MODEL_ARRAY_V1",domain:"AGENT"/* AGENT */,category:"USER"/* USER */,details:{agentName:this.name},text:`[Agent:${this.name}] - Only v2 models are allowed when an array of models is provided`});this.logger.trackException(mastraError);this.logger.error(mastraError.toString());throw mastraError;}return [{id:"main",// TODO fix type check
|
|
971
|
+
model:resolvedModel,maxRetries:this.maxRetries??0,enabled:true}];}const models=await Promise.all(this.model.map(async modelConfig=>{const model2=await this.resolveModelConfig(modelConfig.model,runtimeContext);if(!isV2Model(model2)){const mastraError=new MastraError({id:"AGENT_PREPARE_MODELS_INCOMPATIBLE_WITH_MODEL_ARRAY_V1",domain:"AGENT"/* AGENT */,category:"USER"/* USER */,details:{agentName:this.name},text:`[Agent:${this.name}] - Only v2 models are allowed when an array of models is provided`});this.logger.trackException(mastraError);this.logger.error(mastraError.toString());throw mastraError;}const modelId=modelConfig.id||model2.modelId;if(!modelId){const mastraError=new MastraError({id:"AGENT_PREPARE_MODELS_MISSING_MODEL_ID",domain:"AGENT"/* AGENT */,category:"USER"/* USER */,details:{agentName:this.name},text:`[Agent:${this.name}] - Unable to determine model ID. Please provide an explicit ID in the model configuration.`});this.logger.trackException(mastraError);this.logger.error(mastraError.toString());throw mastraError;}return {id:modelId,model:model2,maxRetries:modelConfig.maxRetries??0,enabled:modelConfig.enabled??true};}));return models;}/**
|
|
969
972
|
* Merges telemetry wrapper with default onFinish callback when needed
|
|
970
973
|
* @internal
|
|
971
974
|
*/#mergeOnFinishWithTelemetry(streamOptions,defaultStreamOptions){let finalOnFinish=streamOptions?.onFinish||defaultStreamOptions.onFinish;if(streamOptions?.onFinish&&streamOptions.onFinish.__hasOriginalOnFinish===false&&defaultStreamOptions.onFinish){const telemetryWrapper=streamOptions.onFinish;const defaultCallback=defaultStreamOptions.onFinish;finalOnFinish=async data=>{await telemetryWrapper(data);await defaultCallback(data);};}return finalOnFinish;}/**
|
|
@@ -1212,5 +1215,5 @@ providerMetadata:baseFinishStep.providerMetadata,text:baseFinishStep.text,warnin
|
|
|
1212
1215
|
/** @internal */_getImmediateToolCalls(){return this.#toolCalls;}/** @internal */_getImmediateToolResults(){return this.#toolResults;}/** @internal */_getImmediateText(){return this.#bufferedText.join("");}/** @internal */_getImmediateObject(){return this.#bufferedObject;}/** @internal */_getImmediateUsage(){return this.#usageCount;}/** @internal */_getImmediateWarnings(){return this.#warnings;}/** @internal */_getImmediateFinishReason(){return this.#finishReason;}/** @internal */_getBaseStream(){return this.#baseStream;}#getTotalUsage(){let total=this.#usageCount.totalTokens;if(total===void 0){const input=this.#usageCount.inputTokens??0;const output=this.#usageCount.outputTokens??0;const reasoning=this.#usageCount.reasoningTokens??0;total=input+output+reasoning;}return {inputTokens:this.#usageCount.inputTokens,outputTokens:this.#usageCount.outputTokens,totalTokens:total,reasoningTokens:this.#usageCount.reasoningTokens,cachedInputTokens:this.#usageCount.cachedInputTokens};}#emitChunk(chunk){this.#bufferedChunks.push(chunk);this.#emitter.emit("chunk",chunk);}#createEventedStream(){const self=this;return new ReadableStream$1({start(controller){self.#bufferedChunks.forEach(chunk=>{controller.enqueue(chunk);});if(self.#streamFinished){controller.close();return;}const chunkHandler=chunk=>{controller.enqueue(chunk);};const finishHandler=()=>{self.#emitter.off("chunk",chunkHandler);self.#emitter.off("finish",finishHandler);controller.close();};self.#emitter.on("chunk",chunkHandler);self.#emitter.on("finish",finishHandler);},pull(_controller){if(!self.#consumptionStarted){void self.consumeStream();}},cancel(){self.#emitter.removeAllListeners();}});}get status(){return this.#status;}serializeState(){return {status:this.#status,bufferedSteps:this.#bufferedSteps,bufferedReasoningDetails:this.#bufferedReasoningDetails,bufferedByStep:this.#bufferedByStep,bufferedText:this.#bufferedText,bufferedTextChunks:this.#bufferedTextChunks,bufferedSources:this.#bufferedSources,bufferedReasoning:this.#bufferedReasoning,bufferedFiles:this.#bufferedFiles,toolCallArgsDeltas:this.#toolCallArgsDeltas,toolCallDeltaIdNameMap:this.#toolCallDeltaIdNameMap,toolCalls:this.#toolCalls,toolResults:this.#toolResults,warnings:this.#warnings,finishReason:this.#finishReason,request:this.#request,usageCount:this.#usageCount,tripwire:this.#tripwire,tripwireReason:this.#tripwireReason,messageList:this.messageList.serialize()};}deserializeState(state){this.#status=state.status;this.#bufferedSteps=state.bufferedSteps;this.#bufferedReasoningDetails=state.bufferedReasoningDetails;this.#bufferedByStep=state.bufferedByStep;this.#bufferedText=state.bufferedText;this.#bufferedTextChunks=state.bufferedTextChunks;this.#bufferedSources=state.bufferedSources;this.#bufferedReasoning=state.bufferedReasoning;this.#bufferedFiles=state.bufferedFiles;this.#toolCallArgsDeltas=state.toolCallArgsDeltas;this.#toolCallDeltaIdNameMap=state.toolCallDeltaIdNameMap;this.#toolCalls=state.toolCalls;this.#toolResults=state.toolResults;this.#warnings=state.warnings;this.#finishReason=state.finishReason;this.#request=state.request;this.#usageCount=state.usageCount;this.#tripwire=state.tripwire;this.#tripwireReason=state.tripwireReason;this.messageList=this.messageList.deserialize(state.messageList);}};
|
|
1213
1216
|
|
|
1214
1217
|
export { AISDKV5OutputStream, Agent, BatchPartsProcessor, ChunkFrom, DefaultExecutionEngine, ExecutionEngine, LanguageDetector, LanguageDetectorInputProcessor, LegacyStep, LegacyWorkflow, MastraAgentNetworkStream, MastraModelOutput, ModerationInputProcessor, ModerationProcessor, PIIDetector, PIIDetectorInputProcessor, ProcessorState, PromptInjectionDetector, PromptInjectionDetectorInputProcessor, Run, StructuredOutputProcessor, SystemPromptScrubber, TokenLimiterProcessor, TripWire, UnicodeNormalizer, UnicodeNormalizerInputProcessor, WhenConditionReturnValue, Workflow, WorkflowRunOutput, agentToStep, cloneStep, cloneWorkflow, convertFullStreamChunkToUIMessageStream, convertMastraChunkToAISDKv5, createStep, createWorkflow, getActivePathsAndStatus, getResultActivePaths, getResumeLabelsByStepId, getStepResult, getStepResult2, getSuspendedPaths, getZodErrors, isAgent, isConditionalKey, isErrorEvent, isFinalState, isLimboState, isTransitionEvent, isVariableReference, isWorkflow, loop, mapVariable, mergeChildValue, recursivelyCheckForFinalState, resolveVariables, tryGenerateWithJsonFallback, tryStreamWithJsonFallback, updateStepInHierarchy, validateStepInput, workflowToStep };
|
|
1215
|
-
//# sourceMappingURL=chunk-
|
|
1216
|
-
//# sourceMappingURL=chunk-
|
|
1218
|
+
//# sourceMappingURL=chunk-SHPLHQFH.js.map
|
|
1219
|
+
//# sourceMappingURL=chunk-SHPLHQFH.js.map
|