@n8n/n8n-nodes-langchain 1.120.2 → 1.121.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/dist/credentials/VercelAiGatewayApi.credentials.js +1 -1
  2. package/dist/credentials/VercelAiGatewayApi.credentials.js.map +1 -1
  3. package/dist/known/credentials.json +1 -0
  4. package/dist/known/nodes.json +4 -0
  5. package/dist/nodes/agents/Agent/V3/AgentV3.node.js +1 -1
  6. package/dist/nodes/agents/Agent/V3/AgentV3.node.js.map +1 -1
  7. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js +20 -399
  8. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js.map +1 -1
  9. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildExecutionContext.js +74 -0
  10. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildExecutionContext.js.map +1 -0
  11. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildResponseMetadata.js +37 -0
  12. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildResponseMetadata.js.map +1 -0
  13. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/checkMaxIterations.js +40 -0
  14. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/checkMaxIterations.js.map +1 -0
  15. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/createAgentSequence.js +61 -0
  16. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/createAgentSequence.js.map +1 -0
  17. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/executeBatch.js +88 -0
  18. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/executeBatch.js.map +1 -0
  19. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/finalizeResult.js +58 -0
  20. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/finalizeResult.js.map +1 -0
  21. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/index.js +50 -0
  22. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/index.js.map +1 -0
  23. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/prepareItemContext.js +66 -0
  24. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/prepareItemContext.js.map +1 -0
  25. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/runAgent.js +99 -0
  26. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/runAgent.js.map +1 -0
  27. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/types.js +17 -0
  28. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/types.js.map +1 -0
  29. package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js +55 -19
  30. package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js.map +1 -1
  31. package/dist/nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.js +1 -0
  32. package/dist/nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.js.map +1 -1
  33. package/dist/nodes/mcp/McpClient/McpClient.node.js +335 -0
  34. package/dist/nodes/mcp/McpClient/McpClient.node.js.map +1 -0
  35. package/dist/nodes/mcp/McpClient/listSearch.js +58 -0
  36. package/dist/nodes/mcp/McpClient/listSearch.js.map +1 -0
  37. package/dist/nodes/mcp/McpClient/resourceMapping.js +61 -0
  38. package/dist/nodes/mcp/McpClient/resourceMapping.js.map +1 -0
  39. package/dist/nodes/mcp/McpClient/utils.js +248 -0
  40. package/dist/nodes/mcp/McpClient/utils.js.map +1 -0
  41. package/dist/nodes/mcp/McpClientTool/McpClientTool.node.js +13 -55
  42. package/dist/nodes/mcp/McpClientTool/McpClientTool.node.js.map +1 -1
  43. package/dist/nodes/mcp/McpClientTool/loadOptions.js +2 -3
  44. package/dist/nodes/mcp/McpClientTool/loadOptions.js.map +1 -1
  45. package/dist/nodes/mcp/McpClientTool/types.js.map +1 -1
  46. package/dist/nodes/mcp/McpClientTool/utils.js +2 -174
  47. package/dist/nodes/mcp/McpClientTool/utils.js.map +1 -1
  48. package/dist/nodes/mcp/{McpClientTool → shared}/descriptions.js +40 -0
  49. package/dist/nodes/mcp/shared/descriptions.js.map +1 -0
  50. package/dist/nodes/mcp/shared/types.js +17 -0
  51. package/dist/nodes/mcp/shared/types.js.map +1 -0
  52. package/dist/nodes/mcp/shared/utils.js +231 -0
  53. package/dist/nodes/mcp/shared/utils.js.map +1 -0
  54. package/dist/nodes/tools/ToolHttpRequest/utils.js +5 -11
  55. package/dist/nodes/tools/ToolHttpRequest/utils.js.map +1 -1
  56. package/dist/nodes/vendors/OpenAi/OpenAi.node.js +3 -2
  57. package/dist/nodes/vendors/OpenAi/OpenAi.node.js.map +1 -1
  58. package/dist/nodes/vendors/OpenAi/v1/actions/audio/index.js +2 -2
  59. package/dist/nodes/vendors/OpenAi/v1/actions/audio/index.js.map +1 -1
  60. package/dist/nodes/vendors/OpenAi/v2/OpenAiV2.node.js +1 -1
  61. package/dist/nodes/vendors/OpenAi/v2/OpenAiV2.node.js.map +1 -1
  62. package/dist/nodes/vendors/OpenAi/v2/actions/audio/index.js +2 -2
  63. package/dist/nodes/vendors/OpenAi/v2/actions/audio/index.js.map +1 -1
  64. package/dist/nodes/vendors/OpenAi/v2/actions/text/classify.operation.js +12 -3
  65. package/dist/nodes/vendors/OpenAi/v2/actions/text/classify.operation.js.map +1 -1
  66. package/dist/types/credentials.json +2 -2
  67. package/dist/types/nodes.json +5 -4
  68. package/dist/utils/agent-execution/buildSteps.js +77 -0
  69. package/dist/utils/agent-execution/buildSteps.js.map +1 -0
  70. package/dist/utils/agent-execution/createEngineRequests.js +48 -0
  71. package/dist/utils/agent-execution/createEngineRequests.js.map +1 -0
  72. package/dist/utils/agent-execution/index.js +42 -0
  73. package/dist/utils/agent-execution/index.js.map +1 -0
  74. package/dist/utils/agent-execution/memoryManagement.js +66 -0
  75. package/dist/utils/agent-execution/memoryManagement.js.map +1 -0
  76. package/dist/utils/agent-execution/processEventStream.js +128 -0
  77. package/dist/utils/agent-execution/processEventStream.js.map +1 -0
  78. package/dist/utils/agent-execution/types.js +17 -0
  79. package/dist/utils/agent-execution/types.js.map +1 -0
  80. package/package.json +11 -9
  81. package/dist/nodes/mcp/McpClientTool/descriptions.js.map +0 -1
@@ -25,7 +25,7 @@ class VercelAiGatewayApi {
25
25
  constructor() {
26
26
  this.name = "vercelAiGatewayApi";
27
27
  this.displayName = "Vercel AI Gateway";
28
- this.documentationUrl = "vercelaigateway";
28
+ this.documentationUrl = "vercel";
29
29
  this.properties = [
30
30
  {
31
31
  displayName: "API Key or OIDC Token",
@@ -1 +1 @@
1
- {"version":3,"sources":["../../credentials/VercelAiGatewayApi.credentials.ts"],"sourcesContent":["import type {\n\tIAuthenticateGeneric,\n\tICredentialTestRequest,\n\tICredentialType,\n\tINodeProperties,\n} from 'n8n-workflow';\n\nexport class VercelAiGatewayApi implements ICredentialType {\n\tname = 'vercelAiGatewayApi';\n\n\tdisplayName = 'Vercel AI Gateway';\n\n\tdocumentationUrl = 'vercelaigateway';\n\n\tproperties: INodeProperties[] = [\n\t\t{\n\t\t\tdisplayName: 'API Key or OIDC Token',\n\t\t\tname: 'apiKey',\n\t\t\ttype: 'string',\n\t\t\ttypeOptions: { password: true },\n\t\t\trequired: true,\n\t\t\tdefault: '',\n\t\t\tdescription: 'Your credentials for the Vercel AI Gateway',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Base URL',\n\t\t\tname: 'url',\n\t\t\ttype: 'string',\n\t\t\trequired: true,\n\t\t\tdefault: 'https://ai-gateway.vercel.sh/v1',\n\t\t\tdescription: 'The base URL for your Vercel AI Gateway instance',\n\t\t\tplaceholder: 'https://ai-gateway.vercel.sh/v1',\n\t\t},\n\t];\n\n\tauthenticate: IAuthenticateGeneric = {\n\t\ttype: 'generic',\n\t\tproperties: {\n\t\t\theaders: {\n\t\t\t\tAuthorization: '=Bearer {{$credentials.apiKey}}',\n\t\t\t\t'http-referer': 'https://n8n.io/',\n\t\t\t\t'x-title': 'n8n',\n\t\t\t},\n\t\t},\n\t};\n\n\ttest: ICredentialTestRequest = {\n\t\trequest: {\n\t\t\tbaseURL: '={{ $credentials.url }}',\n\t\t\turl: '/chat/completions',\n\t\t\tmethod: 'POST',\n\t\t\theaders: {\n\t\t\t\t'http-referer': 'https://n8n.io/',\n\t\t\t\t'x-title': 'n8n',\n\t\t\t},\n\t\t\tbody: {\n\t\t\t\tmodel: 'openai/gpt-4.1-nano',\n\t\t\t\tmessages: [{ role: 'user', content: 'test' }],\n\t\t\t\tmax_tokens: 1,\n\t\t\t},\n\t\t},\n\t};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOO,MAAM,mBAA8C;AAAA,EAApD;AACN,gBAAO;AAEP,uBAAc;AAEd,4BAAmB;AAEnB,sBAAgC;AAAA,MAC/B;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa,EAAE,UAAU,KAAK;AAAA,QAC9B,UAAU;AAAA,QACV,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,IACD;AAEA,wBAAqC;AAAA,MACpC,MAAM;AAAA,MACN,YAAY;AAAA,QACX,SAAS;AAAA,UACR,eAAe;AAAA,UACf,gBAAgB;AAAA,UAChB,WAAW;AAAA,QACZ;AAAA,MACD;AAAA,IACD;AAEA,gBAA+B;AAAA,MAC9B,SAAS;AAAA,QACR,SAAS;AAAA,QACT,KAAK;AAAA,QACL,QAAQ;AAAA,QACR,SAAS;AAAA,UACR,gBAAgB;AAAA,UAChB,WAAW;AAAA,QACZ;AAAA,QACA,MAAM;AAAA,UACL,OAAO;AAAA,UACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,OAAO,CAAC;AAAA,UAC5C,YAAY;AAAA,QACb;AAAA,MACD;AAAA,IACD;AAAA;AACD;","names":[]}
1
+ {"version":3,"sources":["../../credentials/VercelAiGatewayApi.credentials.ts"],"sourcesContent":["import type {\n\tIAuthenticateGeneric,\n\tICredentialTestRequest,\n\tICredentialType,\n\tINodeProperties,\n} from 'n8n-workflow';\n\nexport class VercelAiGatewayApi implements ICredentialType {\n\tname = 'vercelAiGatewayApi';\n\n\tdisplayName = 'Vercel AI Gateway';\n\n\tdocumentationUrl = 'vercel';\n\n\tproperties: INodeProperties[] = [\n\t\t{\n\t\t\tdisplayName: 'API Key or OIDC Token',\n\t\t\tname: 'apiKey',\n\t\t\ttype: 'string',\n\t\t\ttypeOptions: { password: true },\n\t\t\trequired: true,\n\t\t\tdefault: '',\n\t\t\tdescription: 'Your credentials for the Vercel AI Gateway',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Base URL',\n\t\t\tname: 'url',\n\t\t\ttype: 'string',\n\t\t\trequired: true,\n\t\t\tdefault: 'https://ai-gateway.vercel.sh/v1',\n\t\t\tdescription: 'The base URL for your Vercel AI Gateway instance',\n\t\t\tplaceholder: 'https://ai-gateway.vercel.sh/v1',\n\t\t},\n\t];\n\n\tauthenticate: IAuthenticateGeneric = {\n\t\ttype: 'generic',\n\t\tproperties: {\n\t\t\theaders: {\n\t\t\t\tAuthorization: '=Bearer {{$credentials.apiKey}}',\n\t\t\t\t'http-referer': 'https://n8n.io/',\n\t\t\t\t'x-title': 'n8n',\n\t\t\t},\n\t\t},\n\t};\n\n\ttest: ICredentialTestRequest = {\n\t\trequest: {\n\t\t\tbaseURL: '={{ $credentials.url }}',\n\t\t\turl: '/chat/completions',\n\t\t\tmethod: 'POST',\n\t\t\theaders: {\n\t\t\t\t'http-referer': 'https://n8n.io/',\n\t\t\t\t'x-title': 'n8n',\n\t\t\t},\n\t\t\tbody: {\n\t\t\t\tmodel: 'openai/gpt-4.1-nano',\n\t\t\t\tmessages: [{ role: 'user', content: 'test' }],\n\t\t\t\tmax_tokens: 1,\n\t\t\t},\n\t\t},\n\t};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOO,MAAM,mBAA8C;AAAA,EAApD;AACN,gBAAO;AAEP,uBAAc;AAEd,4BAAmB;AAEnB,sBAAgC;AAAA,MAC/B;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa,EAAE,UAAU,KAAK;AAAA,QAC9B,UAAU;AAAA,QACV,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,IACD;AAEA,wBAAqC;AAAA,MACpC,MAAM;AAAA,MACN,YAAY;AAAA,QACX,SAAS;AAAA,UACR,eAAe;AAAA,UACf,gBAAgB;AAAA,UAChB,WAAW;AAAA,QACZ;AAAA,MACD;AAAA,IACD;AAEA,gBAA+B;AAAA,MAC9B,SAAS;AAAA,QACR,SAAS;AAAA,QACT,KAAK;AAAA,QACL,QAAQ;AAAA,QACR,SAAS;AAAA,UACR,gBAAgB;AAAA,UAChB,WAAW;AAAA,QACZ;AAAA,QACA,MAAM;AAAA,UACL,OAAO;AAAA,UACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,OAAO,CAAC;AAAA,UAC5C,YAAY;AAAA,QACb;AAAA,MACD;AAAA,IACD;AAAA;AACD;","names":[]}
@@ -80,6 +80,7 @@
80
80
  "oAuth2Api"
81
81
  ],
82
82
  "supportedNodes": [
83
+ "mcpClient",
83
84
  "mcpClientTool"
84
85
  ]
85
86
  },
@@ -191,6 +191,10 @@
191
191
  "className": "LmOpenHuggingFaceInference",
192
192
  "sourcePath": "dist/nodes/llms/LMOpenHuggingFaceInference/LmOpenHuggingFaceInference.node.js"
193
193
  },
194
+ "mcpClient": {
195
+ "className": "McpClient",
196
+ "sourcePath": "dist/nodes/mcp/McpClient/McpClient.node.js"
197
+ },
194
198
  "mcpClientTool": {
195
199
  "className": "McpClientTool",
196
200
  "sourcePath": "dist/nodes/mcp/McpClientTool/McpClientTool.node.js"
@@ -21,8 +21,8 @@ __export(AgentV3_node_exports, {
21
21
  AgentV3: () => AgentV3
22
22
  });
23
23
  module.exports = __toCommonJS(AgentV3_node_exports);
24
- var import_descriptions = require("../../../../utils/descriptions");
25
24
  var import_n8n_workflow = require("n8n-workflow");
25
+ var import_descriptions = require("../../../../utils/descriptions");
26
26
  var import_description = require("../agents/ToolsAgent/V3/description");
27
27
  var import_execute = require("../agents/ToolsAgent/V3/execute");
28
28
  var import_utils = require("../utils");
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../nodes/agents/Agent/V3/AgentV3.node.ts"],"sourcesContent":["import {\n\tpromptTypeOptions,\n\ttextFromGuardrailsNode,\n\ttextFromPreviousNode,\n\ttextInput,\n} from '@utils/descriptions';\nimport { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tINodeType,\n\tINodeTypeDescription,\n\tINodeTypeBaseDescription,\n\tEngineResponse,\n\tEngineRequest,\n} from 'n8n-workflow';\n\nimport { toolsAgentProperties } from '../agents/ToolsAgent/V3/description';\nimport type { RequestResponseMetadata } from '../agents/ToolsAgent/V3/execute';\nimport { toolsAgentExecute } from '../agents/ToolsAgent/V3/execute';\nimport { getInputs } from '../utils';\n\nexport class AgentV3 implements INodeType {\n\tdescription: INodeTypeDescription;\n\n\tconstructor(baseDescription: INodeTypeBaseDescription) {\n\t\tthis.description = {\n\t\t\t...baseDescription,\n\t\t\tversion: [3],\n\t\t\tdefaults: {\n\t\t\t\tname: 'AI Agent',\n\t\t\t\tcolor: '#404040',\n\t\t\t},\n\t\t\tinputs: `={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\t${getInputs.toString()};\n\t\t\t\t\treturn getInputs(true, hasOutputParser, needsFallback);\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}`,\n\t\t\toutputs: [NodeConnectionTypes.Main],\n\t\t\tproperties: [\n\t\t\t\t{\n\t\t\t\t\tdisplayName:\n\t\t\t\t\t\t'Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/workflows/templates/1954\" target=\"_blank\">example</a> of how this node works',\n\t\t\t\t\tname: 'aiAgentStarterCallout',\n\t\t\t\t\ttype: 'callout',\n\t\t\t\t\tdefault: '',\n\t\t\t\t},\n\t\t\t\tpromptTypeOptions,\n\t\t\t\t{\n\t\t\t\t\t...textFromGuardrailsNode,\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tpromptType: ['guardrails'],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t...textFromPreviousNode,\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tpromptType: ['auto'],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t...textInput,\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tpromptType: ['define'],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName: 'Require Specific Output Format',\n\t\t\t\t\tname: 'hasOutputParser',\n\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\tdefault: false,\n\t\t\t\t\tnoDataExpression: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,\n\t\t\t\t\tname: 'notice',\n\t\t\t\t\ttype: 'notice',\n\t\t\t\t\tdefault: '',\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\thasOutputParser: [true],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName: 'Enable Fallback Model',\n\t\t\t\t\tname: 'needsFallback',\n\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\tdefault: false,\n\t\t\t\t\tnoDataExpression: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName:\n\t\t\t\t\t\t'Connect an additional language model on the canvas to use it as a fallback if the main model fails',\n\t\t\t\t\tname: 'fallbackNotice',\n\t\t\t\t\ttype: 'notice',\n\t\t\t\t\tdefault: '',\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tneedsFallback: [true],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\ttoolsAgentProperties,\n\t\t\t],\n\t\t\thints: [\n\t\t\t\t{\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'You are using streaming responses. Make sure to set the response mode to \"Streaming Response\" on the connected trigger node.',\n\t\t\t\t\ttype: 'warning',\n\t\t\t\t\tlocation: 'outputPane',\n\t\t\t\t\twhenToDisplay: 'afterExecution',\n\t\t\t\t\tdisplayCondition: '={{ $parameter[\"enableStreaming\"] === true }}',\n\t\t\t\t},\n\t\t\t],\n\t\t};\n\t}\n\n\tasync execute(\n\t\tthis: IExecuteFunctions,\n\t\tresponse?: EngineResponse<RequestResponseMetadata>,\n\t): Promise<INodeExecutionData[][] | EngineRequest<RequestResponseMetadata>> {\n\t\treturn await toolsAgentExecute.call(this, response);\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAKO;AACP,0BAAoC;AAWpC,yBAAqC;AAErC,qBAAkC;AAClC,mBAA0B;AAEnB,MAAM,QAA6B;AAAA,EAGzC,YAAY,iBAA2C;AACtD,SAAK,cAAc;AAAA,MAClB,GAAG;AAAA,MACH,SAAS,CAAC,CAAC;AAAA,MACX,UAAU;AAAA,QACT,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA,QAAQ;AAAA;AAAA,OAEJ,uBAAU,SAAS,CAAC;AAAA;AAAA;AAAA;AAAA,MAIxB,SAAS,CAAC,wCAAoB,IAAI;AAAA,MAClC,YAAY;AAAA,QACX;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,QACA;AAAA,UACC,GAAG;AAAA,UACH,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,YAAY;AAAA,YAC1B;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,GAAG;AAAA,UACH,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,MAAM;AAAA,YACpB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,GAAG;AAAA,UACH,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,QAAQ;AAAA,YACtB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,kBAAkB;AAAA,QACnB;AAAA,QACA;AAAA,UACC,aAAa,8FAA8F,wCAAoB,cAAc;AAAA,UAC7I,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,iBAAiB,CAAC,IAAI;AAAA,YACvB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,kBAAkB;AAAA,QACnB;AAAA,QACA;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,eAAe,CAAC,IAAI;AAAA,YACrB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,MACD;AAAA,MACA,OAAO;AAAA,QACN;AAAA,UACC,SACC;AAAA,UACD,MAAM;AAAA,UACN,UAAU;AAAA,UACV,eAAe;AAAA,UACf,kBAAkB;AAAA,QACnB;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EAEA,MAAM,QAEL,UAC2E;AAC3E,WAAO,MAAM,iCAAkB,KAAK,MAAM,QAAQ;AAAA,EACnD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../../nodes/agents/Agent/V3/AgentV3.node.ts"],"sourcesContent":["import { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tINodeType,\n\tINodeTypeDescription,\n\tINodeTypeBaseDescription,\n\tEngineResponse,\n\tEngineRequest,\n} from 'n8n-workflow';\n\nimport {\n\tpromptTypeOptions,\n\ttextFromGuardrailsNode,\n\ttextFromPreviousNode,\n\ttextInput,\n} from '@utils/descriptions';\n\nimport { toolsAgentProperties } from '../agents/ToolsAgent/V3/description';\nimport type { RequestResponseMetadata } from '../agents/ToolsAgent/V3/execute';\nimport { toolsAgentExecute } from '../agents/ToolsAgent/V3/execute';\nimport { getInputs } from '../utils';\n\nexport class AgentV3 implements INodeType {\n\tdescription: INodeTypeDescription;\n\n\tconstructor(baseDescription: INodeTypeBaseDescription) {\n\t\tthis.description = {\n\t\t\t...baseDescription,\n\t\t\tversion: [3],\n\t\t\tdefaults: {\n\t\t\t\tname: 'AI Agent',\n\t\t\t\tcolor: '#404040',\n\t\t\t},\n\t\t\tinputs: `={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\t${getInputs.toString()};\n\t\t\t\t\treturn getInputs(true, hasOutputParser, needsFallback);\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}`,\n\t\t\toutputs: [NodeConnectionTypes.Main],\n\t\t\tproperties: [\n\t\t\t\t{\n\t\t\t\t\tdisplayName:\n\t\t\t\t\t\t'Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/workflows/templates/1954\" target=\"_blank\">example</a> of how this node works',\n\t\t\t\t\tname: 'aiAgentStarterCallout',\n\t\t\t\t\ttype: 'callout',\n\t\t\t\t\tdefault: '',\n\t\t\t\t},\n\t\t\t\tpromptTypeOptions,\n\t\t\t\t{\n\t\t\t\t\t...textFromGuardrailsNode,\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tpromptType: ['guardrails'],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t...textFromPreviousNode,\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tpromptType: ['auto'],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t...textInput,\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tpromptType: ['define'],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName: 'Require Specific Output Format',\n\t\t\t\t\tname: 'hasOutputParser',\n\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\tdefault: false,\n\t\t\t\t\tnoDataExpression: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,\n\t\t\t\t\tname: 'notice',\n\t\t\t\t\ttype: 'notice',\n\t\t\t\t\tdefault: '',\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\thasOutputParser: [true],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName: 'Enable Fallback Model',\n\t\t\t\t\tname: 'needsFallback',\n\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\tdefault: false,\n\t\t\t\t\tnoDataExpression: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tdisplayName:\n\t\t\t\t\t\t'Connect an additional language model on the canvas to use it as a fallback if the main model fails',\n\t\t\t\t\tname: 'fallbackNotice',\n\t\t\t\t\ttype: 'notice',\n\t\t\t\t\tdefault: '',\n\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\tneedsFallback: [true],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\ttoolsAgentProperties,\n\t\t\t],\n\t\t\thints: [\n\t\t\t\t{\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'You are using streaming responses. Make sure to set the response mode to \"Streaming Response\" on the connected trigger node.',\n\t\t\t\t\ttype: 'warning',\n\t\t\t\t\tlocation: 'outputPane',\n\t\t\t\t\twhenToDisplay: 'afterExecution',\n\t\t\t\t\tdisplayCondition: '={{ $parameter[\"enableStreaming\"] === true }}',\n\t\t\t\t},\n\t\t\t],\n\t\t};\n\t}\n\n\tasync execute(\n\t\tthis: IExecuteFunctions,\n\t\tresponse?: EngineResponse<RequestResponseMetadata>,\n\t): Promise<INodeExecutionData[][] | EngineRequest<RequestResponseMetadata>> {\n\t\treturn await toolsAgentExecute.call(this, response);\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAAoC;AAWpC,0BAKO;AAEP,yBAAqC;AAErC,qBAAkC;AAClC,mBAA0B;AAEnB,MAAM,QAA6B;AAAA,EAGzC,YAAY,iBAA2C;AACtD,SAAK,cAAc;AAAA,MAClB,GAAG;AAAA,MACH,SAAS,CAAC,CAAC;AAAA,MACX,UAAU;AAAA,QACT,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA,QAAQ;AAAA;AAAA,OAEJ,uBAAU,SAAS,CAAC;AAAA;AAAA;AAAA;AAAA,MAIxB,SAAS,CAAC,wCAAoB,IAAI;AAAA,MAClC,YAAY;AAAA,QACX;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,QACA;AAAA,UACC,GAAG;AAAA,UACH,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,YAAY;AAAA,YAC1B;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,GAAG;AAAA,UACH,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,MAAM;AAAA,YACpB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,GAAG;AAAA,UACH,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,QAAQ;AAAA,YACtB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,kBAAkB;AAAA,QACnB;AAAA,QACA;AAAA,UACC,aAAa,8FAA8F,wCAAoB,cAAc;AAAA,UAC7I,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,iBAAiB,CAAC,IAAI;AAAA,YACvB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,kBAAkB;AAAA,QACnB;AAAA,QACA;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,eAAe,CAAC,IAAI;AAAA,YACrB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,MACD;AAAA,MACA,OAAO;AAAA,QACN;AAAA,UACC,SACC;AAAA,UACD,MAAM;AAAA,UACN,UAAU;AAAA,UACV,eAAe;AAAA,UACf,kBAAkB;AAAA,QACnB;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EAEA,MAAM,QAEL,UAC2E;AAC3E,WAAO,MAAM,iCAAkB,KAAK,MAAM,QAAQ;AAAA,EACnD;AACD;","names":[]}
@@ -1,9 +1,7 @@
1
1
  "use strict";
2
- var __create = Object.create;
3
2
  var __defProp = Object.defineProperty;
4
3
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
6
  var __export = (target, all) => {
9
7
  for (var name in all)
@@ -17,403 +15,41 @@ var __copyProps = (to, from, except, desc) => {
17
15
  }
18
16
  return to;
19
17
  };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
19
  var execute_exports = {};
30
20
  __export(execute_exports, {
31
21
  toolsAgentExecute: () => toolsAgentExecute
32
22
  });
33
23
  module.exports = __toCommonJS(execute_exports);
34
- var import_messages = require("@langchain/core/messages");
35
- var import_runnables = require("@langchain/core/runnables");
36
- var import_agents = require("langchain/agents");
37
- var import_omit = __toESM(require("lodash/omit"));
38
24
  var import_n8n_workflow = require("n8n-workflow");
39
- var import_node_assert = __toESM(require("node:assert"));
40
- var import_helpers = require("../../../../../../utils/helpers");
41
- var import_N8nOutputParser = require("../../../../../../utils/output_parsers/N8nOutputParser");
42
- var import_common = require("../common");
43
- var import_prompt = require("../prompt");
44
- async function createEngineRequests(toolCalls, itemIndex, tools) {
45
- return toolCalls.map((toolCall) => {
46
- const foundTool = tools.find((tool) => tool.name === toolCall.tool);
47
- if (!foundTool) return;
48
- const nodeName = foundTool.metadata?.sourceNodeName;
49
- const input = foundTool.metadata?.isFromToolkit ? { ...toolCall.toolInput, tool: toolCall.tool } : toolCall.toolInput;
50
- return {
51
- nodeName,
52
- input,
53
- type: import_n8n_workflow.NodeConnectionTypes.AiTool,
54
- id: toolCall.toolCallId,
55
- metadata: {
56
- itemIndex
57
- }
58
- };
59
- });
60
- }
61
- function getAllTools(model, tools) {
62
- const modelTools = model.metadata?.tools ?? [];
63
- const allTools = [...tools, ...modelTools];
64
- return allTools;
65
- }
66
- function createAgentSequence(model, tools, prompt, _options, outputParser, memory, fallbackModel) {
67
- const agent = (0, import_agents.createToolCallingAgent)({
68
- llm: model,
69
- tools: getAllTools(model, tools),
70
- prompt,
71
- streamRunnable: false
72
- });
73
- let fallbackAgent;
74
- if (fallbackModel) {
75
- fallbackAgent = (0, import_agents.createToolCallingAgent)({
76
- llm: fallbackModel,
77
- tools: getAllTools(fallbackModel, tools),
78
- prompt,
79
- streamRunnable: false
80
- });
81
- }
82
- const runnableAgent = import_runnables.RunnableSequence.from([
83
- fallbackAgent ? agent.withFallbacks([fallbackAgent]) : agent,
84
- (0, import_common.getAgentStepsParser)(outputParser, memory),
85
- import_common.fixEmptyContentMessage
86
- ]);
87
- runnableAgent.singleAction = true;
88
- runnableAgent.streamRunnable = false;
89
- return runnableAgent;
90
- }
91
- async function processEventStream(ctx, eventStream, itemIndex, returnIntermediateSteps = false, memory, input) {
92
- const agentResult = {
93
- output: ""
94
- };
95
- if (returnIntermediateSteps) {
96
- agentResult.intermediateSteps = [];
97
- }
98
- const toolCalls = [];
99
- ctx.sendChunk("begin", itemIndex);
100
- for await (const event of eventStream) {
101
- switch (event.event) {
102
- case "on_chat_model_stream":
103
- const chunk = event.data?.chunk;
104
- if (chunk?.content) {
105
- const chunkContent = chunk.content;
106
- let chunkText = "";
107
- if (Array.isArray(chunkContent)) {
108
- for (const message of chunkContent) {
109
- if (message?.type === "text") {
110
- chunkText += message?.text;
111
- }
112
- }
113
- } else if (typeof chunkContent === "string") {
114
- chunkText = chunkContent;
115
- }
116
- ctx.sendChunk("item", itemIndex, chunkText);
117
- agentResult.output += chunkText;
118
- }
119
- break;
120
- case "on_chat_model_end":
121
- if (event.data) {
122
- const chatModelData = event.data;
123
- const output = chatModelData.output;
124
- if (output?.tool_calls && output.tool_calls.length > 0) {
125
- for (const toolCall of output.tool_calls) {
126
- toolCalls.push({
127
- tool: toolCall.name,
128
- toolInput: toolCall.args,
129
- toolCallId: toolCall.id || "unknown",
130
- type: toolCall.type || "tool_call",
131
- log: output.content || `Calling ${toolCall.name} with input: ${JSON.stringify(toolCall.args)}`,
132
- messageLog: [output]
133
- });
134
- }
135
- if (returnIntermediateSteps) {
136
- for (const toolCall of output.tool_calls) {
137
- agentResult.intermediateSteps.push({
138
- action: {
139
- tool: toolCall.name,
140
- toolInput: toolCall.args,
141
- log: output.content || `Calling ${toolCall.name} with input: ${JSON.stringify(toolCall.args)}`,
142
- messageLog: [output],
143
- // Include the full LLM response
144
- toolCallId: toolCall.id || "unknown",
145
- type: toolCall.type || "tool_call"
146
- }
147
- });
148
- }
149
- }
150
- }
151
- }
152
- break;
153
- case "on_tool_end":
154
- if (returnIntermediateSteps && event.data && agentResult.intermediateSteps.length > 0) {
155
- const toolData = event.data;
156
- const matchingStep = agentResult.intermediateSteps.find(
157
- (step) => !step.observation && step.action.tool === event.name
158
- );
159
- if (matchingStep) {
160
- matchingStep.observation = toolData.output || "";
161
- }
162
- }
163
- break;
164
- default:
165
- break;
166
- }
167
- }
168
- ctx.sendChunk("end", itemIndex);
169
- if (memory && input && agentResult.output) {
170
- await memory.saveContext({ input }, { output: agentResult.output });
171
- }
172
- if (toolCalls.length > 0) {
173
- agentResult.toolCalls = toolCalls;
174
- }
175
- return agentResult;
176
- }
177
- function buildSteps(response, itemIndex) {
178
- const steps = [];
179
- if (response) {
180
- const responses = response?.actionResponses ?? [];
181
- if (response.metadata?.previousRequests) {
182
- steps.push(...response.metadata.previousRequests);
183
- }
184
- for (const tool of responses) {
185
- if (tool.action?.metadata?.itemIndex !== itemIndex) continue;
186
- const toolInput = {
187
- ...tool.action.input,
188
- id: tool.action.id
189
- };
190
- if (!toolInput || !tool.data) {
191
- continue;
192
- }
193
- const step = steps.find((step2) => step2.action.toolCallId === toolInput.id);
194
- if (step) {
195
- continue;
196
- }
197
- const syntheticAIMessage = new import_messages.AIMessage({
198
- content: `Calling ${tool.action.nodeName} with input: ${JSON.stringify(toolInput)}`,
199
- tool_calls: [
200
- {
201
- id: toolInput?.id ?? "reconstructed_call",
202
- name: (0, import_n8n_workflow.nodeNameToToolName)(tool.action.nodeName),
203
- args: toolInput,
204
- type: "tool_call"
205
- }
206
- ]
207
- });
208
- const toolResult = {
209
- action: {
210
- tool: (0, import_n8n_workflow.nodeNameToToolName)(tool.action.nodeName),
211
- toolInput: toolInput.input || {},
212
- log: toolInput.log || syntheticAIMessage.content,
213
- messageLog: [syntheticAIMessage],
214
- toolCallId: toolInput?.id,
215
- type: toolInput.type || "tool_call"
216
- },
217
- observation: JSON.stringify(tool.data?.data?.ai_tool?.[0]?.map((item) => item?.json) ?? "")
218
- };
219
- steps.push(toolResult);
220
- }
221
- }
222
- return steps;
223
- }
25
+ var import_helpers = require("./helpers");
224
26
  async function toolsAgentExecute(response) {
225
27
  this.logger.debug("Executing Tools Agent V3");
28
+ const maxIterations = this.getNodeParameter("options.maxIterations", 0, 10);
29
+ (0, import_helpers.checkMaxIterations)(response, maxIterations, this.getNode());
226
30
  const returnData = [];
227
31
  let request = void 0;
228
- const items = this.getInputData();
229
- const batchSize = this.getNodeParameter("options.batching.batchSize", 0, 1);
230
- const delayBetweenBatches = this.getNodeParameter(
231
- "options.batching.delayBetweenBatches",
232
- 0,
233
- 0
234
- );
235
- const needsFallback = this.getNodeParameter("needsFallback", 0, false);
236
- const memory = await (0, import_common.getOptionalMemory)(this);
237
- const model = await (0, import_common.getChatModel)(this, 0);
238
- (0, import_node_assert.default)(model, "Please connect a model to the Chat Model input");
239
- const fallbackModel = needsFallback ? await (0, import_common.getChatModel)(this, 1) : null;
240
- if (needsFallback && !fallbackModel) {
241
- throw new import_n8n_workflow.NodeOperationError(
242
- this.getNode(),
243
- "Please connect a model to the Fallback Model input or disable the fallback option"
244
- );
245
- }
32
+ const executionContext = await (0, import_helpers.buildExecutionContext)(this);
33
+ const { items, batchSize, delayBetweenBatches, model, fallbackModel, memory } = executionContext;
246
34
  for (let i = 0; i < items.length; i += batchSize) {
247
35
  const batch = items.slice(i, i + batchSize);
248
- const batchPromises = batch.map(async (_item, batchItemIndex) => {
249
- const itemIndex = i + batchItemIndex;
250
- if (response && response?.metadata?.itemIndex === itemIndex) {
251
- return null;
252
- }
253
- const steps = buildSteps(response, itemIndex);
254
- const input = (0, import_helpers.getPromptInputByType)({
255
- ctx: this,
256
- i: itemIndex,
257
- inputKey: "text",
258
- promptTypeKey: "promptType"
259
- });
260
- if (input === void 0) {
261
- throw new import_n8n_workflow.NodeOperationError(this.getNode(), 'The "text" parameter is empty.');
262
- }
263
- const outputParser2 = await (0, import_N8nOutputParser.getOptionalOutputParser)(this, itemIndex);
264
- const tools = await (0, import_common.getTools)(this, outputParser2);
265
- const options = this.getNodeParameter("options", itemIndex);
266
- if (options.enableStreaming === void 0) {
267
- options.enableStreaming = true;
268
- }
269
- const messages = await (0, import_common.prepareMessages)(this, itemIndex, {
270
- systemMessage: options.systemMessage,
271
- passthroughBinaryImages: options.passthroughBinaryImages ?? true,
272
- outputParser: outputParser2
273
- });
274
- const prompt = (0, import_common.preparePrompt)(messages);
275
- const executor = createAgentSequence(
276
- model,
277
- tools,
278
- prompt,
279
- options,
280
- outputParser2,
281
- memory,
282
- fallbackModel
283
- );
284
- const invokeParams = {
285
- steps,
286
- input,
287
- system_message: options.systemMessage ?? import_prompt.SYSTEM_MESSAGE,
288
- formatting_instructions: "IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer."
289
- };
290
- const executeOptions = { signal: this.getExecutionCancelSignal() };
291
- const isStreamingAvailable = "isStreaming" in this ? this.isStreaming?.() : void 0;
292
- if ("isStreaming" in this && options.enableStreaming && isStreamingAvailable && this.getNode().typeVersion >= 2.1) {
293
- let chatHistory = void 0;
294
- if (memory) {
295
- chatHistory = await loadChatHistory(memory, model, options.maxTokensFromMemory);
296
- }
297
- const eventStream = executor.streamEvents(
298
- {
299
- ...invokeParams,
300
- chat_history: chatHistory
301
- },
302
- {
303
- version: "v2",
304
- ...executeOptions
305
- }
306
- );
307
- const result = await processEventStream(
308
- this,
309
- eventStream,
310
- itemIndex,
311
- options.returnIntermediateSteps,
312
- memory,
313
- input
314
- );
315
- if (result.toolCalls && result.toolCalls.length > 0) {
316
- const currentIteration = (response?.metadata?.iterationCount ?? 0) + 1;
317
- if (options.maxIterations && currentIteration > options.maxIterations) {
318
- throw new import_n8n_workflow.NodeOperationError(this.getNode(), "Maximum iterations reached");
319
- }
320
- const actions = await createEngineRequests(result.toolCalls, itemIndex, tools);
321
- return {
322
- actions,
323
- metadata: {
324
- previousRequests: buildSteps(response, itemIndex),
325
- iterationCount: currentIteration
326
- }
327
- };
328
- }
329
- return result;
36
+ const { returnData: batchReturnData, request: batchRequest } = await (0, import_helpers.executeBatch)(
37
+ this,
38
+ batch,
39
+ i,
40
+ model,
41
+ fallbackModel,
42
+ memory,
43
+ response
44
+ );
45
+ returnData.push.apply(returnData, batchReturnData);
46
+ if (batchRequest) {
47
+ if (!request) {
48
+ request = batchRequest;
330
49
  } else {
331
- let chatHistory = void 0;
332
- if (memory) {
333
- chatHistory = await loadChatHistory(memory, model, options.maxTokensFromMemory);
334
- }
335
- const modelResponse = await executor.invoke({
336
- ...invokeParams,
337
- chat_history: chatHistory
338
- });
339
- if ("returnValues" in modelResponse) {
340
- if (memory && input && modelResponse.returnValues.output) {
341
- let fullOutput = modelResponse.returnValues.output;
342
- if (steps.length > 0) {
343
- const toolContext = steps.map(
344
- (step) => `Tool: ${step.action.tool}, Input: ${JSON.stringify(step.action.toolInput)}, Result: ${step.observation}`
345
- ).join("; ");
346
- fullOutput = `[Used tools: ${toolContext}] ${fullOutput}`;
347
- }
348
- await memory.saveContext({ input }, { output: fullOutput });
349
- }
350
- const result = { ...modelResponse.returnValues };
351
- if (options.returnIntermediateSteps && steps.length > 0) {
352
- result.intermediateSteps = steps;
353
- }
354
- return result;
355
- }
356
- const currentIteration = (response?.metadata?.iterationCount ?? 0) + 1;
357
- if (options.maxIterations && currentIteration > options.maxIterations) {
358
- throw new import_n8n_workflow.NodeOperationError(this.getNode(), "Maximum iterations reached");
359
- }
360
- const actions = await createEngineRequests(modelResponse, itemIndex, tools);
361
- return {
362
- actions,
363
- metadata: {
364
- previousRequests: buildSteps(response, itemIndex),
365
- iterationCount: currentIteration
366
- }
367
- };
368
- }
369
- });
370
- const batchResults = await Promise.allSettled(batchPromises);
371
- const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(this, 0);
372
- batchResults.forEach((result, index) => {
373
- const itemIndex = i + index;
374
- if (result.status === "rejected") {
375
- const error = result.reason;
376
- if (this.continueOnFail()) {
377
- returnData.push({
378
- json: { error: error.message },
379
- pairedItem: { item: itemIndex }
380
- });
381
- return;
382
- } else {
383
- throw new import_n8n_workflow.NodeOperationError(this.getNode(), error);
384
- }
385
- }
386
- const response2 = result.value;
387
- if ("actions" in response2) {
388
- if (!request) {
389
- request = {
390
- actions: response2.actions,
391
- metadata: response2.metadata
392
- };
393
- } else {
394
- request.actions.push(...response2.actions);
395
- }
396
- return;
397
- }
398
- if (memory && outputParser) {
399
- const parsedOutput = (0, import_n8n_workflow.jsonParse)(
400
- response2.output
401
- );
402
- response2.output = parsedOutput?.output ?? parsedOutput;
50
+ request.actions.push.apply(request.actions, batchRequest.actions);
403
51
  }
404
- const itemResult = {
405
- json: (0, import_omit.default)(
406
- response2,
407
- "system_message",
408
- "formatting_instructions",
409
- "input",
410
- "chat_history",
411
- "agent_scratchpad"
412
- ),
413
- pairedItem: { item: itemIndex }
414
- };
415
- returnData.push(itemResult);
416
- });
52
+ }
417
53
  if (i + batchSize < items.length && delayBetweenBatches > 0) {
418
54
  await (0, import_n8n_workflow.sleep)(delayBetweenBatches);
419
55
  }
@@ -423,21 +59,6 @@ async function toolsAgentExecute(response) {
423
59
  }
424
60
  return [returnData];
425
61
  }
426
- async function loadChatHistory(memory, model, maxTokensFromMemory) {
427
- const memoryVariables = await memory.loadMemoryVariables({});
428
- let chatHistory = memoryVariables["chat_history"];
429
- if (maxTokensFromMemory) {
430
- chatHistory = await (0, import_messages.trimMessages)(chatHistory, {
431
- strategy: "last",
432
- maxTokens: maxTokensFromMemory,
433
- tokenCounter: model,
434
- includeSystem: true,
435
- startOn: "human",
436
- allowPartial: true
437
- });
438
- }
439
- return chatHistory;
440
- }
441
62
  // Annotate the CommonJS export names for ESM import in node:
442
63
  0 && (module.exports = {
443
64
  toolsAgentExecute
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../../../nodes/agents/Agent/agents/ToolsAgent/V3/execute.ts"],"sourcesContent":["import type { StreamEvent } from '@langchain/core/dist/tracers/event_stream';\nimport type { IterableReadableStream } from '@langchain/core/dist/utils/stream';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { AIMessageChunk, BaseMessage, MessageContentText } from '@langchain/core/messages';\nimport { AIMessage, trimMessages } from '@langchain/core/messages';\nimport type { ToolCall } from '@langchain/core/messages/tool';\nimport type { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { RunnableSequence } from '@langchain/core/runnables';\nimport { type AgentRunnableSequence, createToolCallingAgent } from 'langchain/agents';\nimport type { BaseChatMemory } from 'langchain/memory';\nimport type { DynamicStructuredTool, Tool } from 'langchain/tools';\nimport omit from 'lodash/omit';\nimport {\n\tjsonParse,\n\tNodeConnectionTypes,\n\tnodeNameToToolName,\n\tNodeOperationError,\n\tsleep,\n} from 'n8n-workflow';\nimport type {\n\tEngineRequest,\n\tGenericValue,\n\tIDataObject,\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tISupplyDataFunctions,\n\tEngineResponse,\n} from 'n8n-workflow';\nimport assert from 'node:assert';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport {\n\tgetOptionalOutputParser,\n\ttype N8nOutputParser,\n} from '@utils/output_parsers/N8nOutputParser';\n\nimport {\n\tfixEmptyContentMessage,\n\tgetAgentStepsParser,\n\tgetChatModel,\n\tgetOptionalMemory,\n\tgetTools,\n\tprepareMessages,\n\tpreparePrompt,\n} from '../common';\nimport { SYSTEM_MESSAGE } from '../prompt';\n\ntype ToolCallRequest = {\n\ttool: string;\n\ttoolInput: Record<string, unknown>;\n\ttoolCallId: string;\n\ttype?: string;\n\tlog?: string;\n\tmessageLog?: unknown[];\n};\n\nasync function createEngineRequests(\n\ttoolCalls: ToolCallRequest[],\n\titemIndex: number,\n\ttools: Array<DynamicStructuredTool | Tool>,\n) {\n\treturn toolCalls.map((toolCall) => {\n\t\t// First try to get from metadata (for toolkit tools)\n\t\tconst foundTool = tools.find((tool) => tool.name === toolCall.tool);\n\n\t\tif (!foundTool) return;\n\n\t\tconst nodeName = foundTool.metadata?.sourceNodeName;\n\n\t\t// For toolkit tools, include the tool name so the node knows which tool to execute\n\t\tconst input = foundTool.metadata?.isFromToolkit\n\t\t\t? { ...toolCall.toolInput, tool: toolCall.tool }\n\t\t\t: toolCall.toolInput;\n\n\t\treturn {\n\t\t\tnodeName,\n\t\t\tinput,\n\t\t\ttype: NodeConnectionTypes.AiTool,\n\t\t\tid: toolCall.toolCallId,\n\t\t\tmetadata: {\n\t\t\t\titemIndex,\n\t\t\t},\n\t\t};\n\t});\n}\n\n/**\n * Uses provided tools and tried to get tools from model metadata\n * Some chat model nodes can define built-in tools in their metadata\n */\nfunction getAllTools(model: BaseChatModel, tools: Array<DynamicStructuredTool | Tool>) {\n\tconst modelTools = (model.metadata?.tools as Tool[]) ?? [];\n\tconst allTools = [...tools, ...modelTools];\n\treturn allTools;\n}\n\n/**\n * Creates an agent executor with the given configuration\n */\nfunction createAgentSequence(\n\tmodel: BaseChatModel,\n\ttools: Array<DynamicStructuredTool | Tool>,\n\tprompt: ChatPromptTemplate,\n\t_options: { maxIterations?: number; returnIntermediateSteps?: boolean },\n\toutputParser?: N8nOutputParser,\n\tmemory?: BaseChatMemory,\n\tfallbackModel?: BaseChatModel | null,\n) {\n\tconst agent = createToolCallingAgent({\n\t\tllm: model,\n\t\ttools: getAllTools(model, tools),\n\t\tprompt,\n\t\tstreamRunnable: false,\n\t});\n\n\tlet fallbackAgent: AgentRunnableSequence | undefined;\n\tif (fallbackModel) {\n\t\tfallbackAgent = createToolCallingAgent({\n\t\t\tllm: fallbackModel,\n\t\t\ttools: getAllTools(fallbackModel, tools),\n\t\t\tprompt,\n\t\t\tstreamRunnable: false,\n\t\t});\n\t}\n\tconst runnableAgent = RunnableSequence.from([\n\t\tfallbackAgent ? agent.withFallbacks([fallbackAgent]) : agent,\n\t\tgetAgentStepsParser(outputParser, memory),\n\t\tfixEmptyContentMessage,\n\t]) as AgentRunnableSequence;\n\n\trunnableAgent.singleAction = true;\n\trunnableAgent.streamRunnable = false;\n\n\treturn runnableAgent;\n}\n\ntype IntermediateStep = {\n\taction: {\n\t\ttool: string;\n\t\ttoolInput: Record<string, unknown>;\n\t\tlog: string;\n\t\tmessageLog: unknown[];\n\t\ttoolCallId: string;\n\t\ttype: string;\n\t};\n\tobservation?: string;\n};\n\ntype AgentResult = {\n\toutput: string;\n\tintermediateSteps?: IntermediateStep[];\n\ttoolCalls?: ToolCallRequest[];\n};\n\nasync function processEventStream(\n\tctx: IExecuteFunctions,\n\teventStream: IterableReadableStream<StreamEvent>,\n\titemIndex: number,\n\treturnIntermediateSteps: boolean = false,\n\tmemory?: BaseChatMemory,\n\tinput?: string,\n): Promise<AgentResult> {\n\tconst agentResult: AgentResult = {\n\t\toutput: '',\n\t};\n\n\tif (returnIntermediateSteps) {\n\t\tagentResult.intermediateSteps = [];\n\t}\n\n\tconst toolCalls: ToolCallRequest[] = [];\n\n\tctx.sendChunk('begin', itemIndex);\n\tfor await (const event of eventStream) {\n\t\t// Stream chat model tokens as they come in\n\t\tswitch (event.event) {\n\t\t\tcase 'on_chat_model_stream':\n\t\t\t\tconst chunk = event.data?.chunk as AIMessageChunk;\n\t\t\t\tif (chunk?.content) {\n\t\t\t\t\tconst chunkContent = chunk.content;\n\t\t\t\t\tlet chunkText = '';\n\t\t\t\t\tif (Array.isArray(chunkContent)) {\n\t\t\t\t\t\tfor (const message of chunkContent) {\n\t\t\t\t\t\t\tif (message?.type === 'text') {\n\t\t\t\t\t\t\t\tchunkText += (message as MessageContentText)?.text;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (typeof chunkContent === 'string') {\n\t\t\t\t\t\tchunkText = chunkContent;\n\t\t\t\t\t}\n\t\t\t\t\tctx.sendChunk('item', itemIndex, chunkText);\n\n\t\t\t\t\tagentResult.output += chunkText;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 'on_chat_model_end':\n\t\t\t\t// Capture full LLM response with tool calls for intermediate steps\n\t\t\t\tif (event.data) {\n\t\t\t\t\tconst chatModelData = event.data as {\n\t\t\t\t\t\toutput?: { tool_calls?: ToolCall[]; content?: string };\n\t\t\t\t\t};\n\t\t\t\t\tconst output = chatModelData.output;\n\n\t\t\t\t\t// Check if this LLM response contains tool calls\n\t\t\t\t\tif (output?.tool_calls && output.tool_calls.length > 0) {\n\t\t\t\t\t\t// Collect tool calls for request building\n\t\t\t\t\t\tfor (const toolCall of output.tool_calls) {\n\t\t\t\t\t\t\ttoolCalls.push({\n\t\t\t\t\t\t\t\ttool: toolCall.name,\n\t\t\t\t\t\t\t\ttoolInput: toolCall.args,\n\t\t\t\t\t\t\t\ttoolCallId: toolCall.id || 'unknown',\n\t\t\t\t\t\t\t\ttype: toolCall.type || 'tool_call',\n\t\t\t\t\t\t\t\tlog:\n\t\t\t\t\t\t\t\t\toutput.content ||\n\t\t\t\t\t\t\t\t\t`Calling ${toolCall.name} with input: ${JSON.stringify(toolCall.args)}`,\n\t\t\t\t\t\t\t\tmessageLog: [output],\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Also add to intermediate steps if needed\n\t\t\t\t\t\tif (returnIntermediateSteps) {\n\t\t\t\t\t\t\tfor (const toolCall of output.tool_calls) {\n\t\t\t\t\t\t\t\tagentResult.intermediateSteps!.push({\n\t\t\t\t\t\t\t\t\taction: {\n\t\t\t\t\t\t\t\t\t\ttool: toolCall.name,\n\t\t\t\t\t\t\t\t\t\ttoolInput: toolCall.args,\n\t\t\t\t\t\t\t\t\t\tlog:\n\t\t\t\t\t\t\t\t\t\t\toutput.content ||\n\t\t\t\t\t\t\t\t\t\t\t`Calling ${toolCall.name} with input: ${JSON.stringify(toolCall.args)}`,\n\t\t\t\t\t\t\t\t\t\tmessageLog: [output], // Include the full LLM response\n\t\t\t\t\t\t\t\t\t\ttoolCallId: toolCall.id || 'unknown',\n\t\t\t\t\t\t\t\t\t\ttype: toolCall.type || 'tool_call',\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 'on_tool_end':\n\t\t\t\t// Capture tool execution results and match with action\n\t\t\t\tif (returnIntermediateSteps && event.data && agentResult.intermediateSteps!.length > 0) {\n\t\t\t\t\tconst toolData = event.data as { output?: string };\n\t\t\t\t\t// Find the matching intermediate step for this tool call\n\t\t\t\t\tconst matchingStep = agentResult.intermediateSteps!.find(\n\t\t\t\t\t\t(step) => !step.observation && step.action.tool === event.name,\n\t\t\t\t\t);\n\t\t\t\t\tif (matchingStep) {\n\t\t\t\t\t\tmatchingStep.observation = toolData.output || '';\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t}\n\t}\n\tctx.sendChunk('end', itemIndex);\n\n\t// Save conversation to memory if memory is connected\n\tif (memory && input && agentResult.output) {\n\t\tawait memory.saveContext({ input }, { output: agentResult.output });\n\t}\n\n\t// Include collected tool calls in the result\n\tif (toolCalls.length > 0) {\n\t\tagentResult.toolCalls = toolCalls;\n\t}\n\n\treturn agentResult;\n}\n\nexport type RequestResponseMetadata = {\n\titemIndex?: number;\n\tpreviousRequests: ToolCallData[];\n\titerationCount?: number;\n};\n\ntype ToolCallData = {\n\taction: {\n\t\ttool: string;\n\t\ttoolInput: Record<string, unknown>;\n\t\tlog: string | number | true | object;\n\t\ttoolCallId: IDataObject | GenericValue | GenericValue[] | IDataObject[];\n\t\ttype: string | number | true | object;\n\t};\n\tobservation: string;\n};\n\nfunction buildSteps(\n\tresponse: EngineResponse<RequestResponseMetadata> | undefined,\n\titemIndex: number,\n): ToolCallData[] {\n\tconst steps: ToolCallData[] = [];\n\n\tif (response) {\n\t\tconst responses = response?.actionResponses ?? [];\n\n\t\tif (response.metadata?.previousRequests) {\n\t\t\tsteps.push(...response.metadata.previousRequests);\n\t\t}\n\n\t\tfor (const tool of responses) {\n\t\t\tif (tool.action?.metadata?.itemIndex !== itemIndex) continue;\n\n\t\t\tconst toolInput: IDataObject = {\n\t\t\t\t...tool.action.input,\n\t\t\t\tid: tool.action.id,\n\t\t\t};\n\t\t\tif (!toolInput || !tool.data) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tconst step = steps.find((step) => step.action.toolCallId === toolInput.id);\n\t\t\tif (step) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t// Create a synthetic AI message for the messageLog\n\t\t\t// This represents the AI's decision to call the tool\n\t\t\tconst syntheticAIMessage = new AIMessage({\n\t\t\t\tcontent: `Calling ${tool.action.nodeName} with input: ${JSON.stringify(toolInput)}`,\n\t\t\t\ttool_calls: [\n\t\t\t\t\t{\n\t\t\t\t\t\tid: (toolInput?.id as string) ?? 'reconstructed_call',\n\t\t\t\t\t\tname: nodeNameToToolName(tool.action.nodeName),\n\t\t\t\t\t\targs: toolInput,\n\t\t\t\t\t\ttype: 'tool_call',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t});\n\n\t\t\tconst toolResult = {\n\t\t\t\taction: {\n\t\t\t\t\ttool: nodeNameToToolName(tool.action.nodeName),\n\t\t\t\t\ttoolInput: (toolInput.input as IDataObject) || {},\n\t\t\t\t\tlog: toolInput.log || syntheticAIMessage.content,\n\t\t\t\t\tmessageLog: [syntheticAIMessage],\n\t\t\t\t\ttoolCallId: toolInput?.id,\n\t\t\t\t\ttype: toolInput.type || 'tool_call',\n\t\t\t\t},\n\t\t\t\tobservation: JSON.stringify(tool.data?.data?.ai_tool?.[0]?.map((item) => item?.json) ?? ''),\n\t\t\t};\n\n\t\t\tsteps.push(toolResult);\n\t\t}\n\t}\n\treturn steps;\n}\n\n/* -----------------------------------------------------------\n Main Executor Function\n----------------------------------------------------------- */\n/**\n * The main executor method for the Tools Agent.\n *\n * This function retrieves necessary components (model, memory, tools), prepares the prompt,\n * creates the agent, and processes each input item. The error handling for each item is also\n * managed here based on the node's continueOnFail setting.\n *\n * @param this Execute context. SupplyDataContext is passed when agent is as a tool\n *\n * @returns The array of execution data for all processed items\n */\nexport async function toolsAgentExecute(\n\tthis: IExecuteFunctions | ISupplyDataFunctions,\n\tresponse?: EngineResponse<RequestResponseMetadata>,\n): Promise<INodeExecutionData[][] | EngineRequest<RequestResponseMetadata>> {\n\tthis.logger.debug('Executing Tools Agent V3');\n\n\tconst returnData: INodeExecutionData[] = [];\n\tlet request: EngineRequest<RequestResponseMetadata> | undefined = undefined;\n\n\tconst items = this.getInputData();\n\tconst batchSize = this.getNodeParameter('options.batching.batchSize', 0, 1) as number;\n\tconst delayBetweenBatches = this.getNodeParameter(\n\t\t'options.batching.delayBetweenBatches',\n\t\t0,\n\t\t0,\n\t) as number;\n\tconst needsFallback = this.getNodeParameter('needsFallback', 0, false) as boolean;\n\tconst memory = await getOptionalMemory(this);\n\tconst model = await getChatModel(this, 0);\n\tassert(model, 'Please connect a model to the Chat Model input');\n\tconst fallbackModel = needsFallback ? await getChatModel(this, 1) : null;\n\n\tif (needsFallback && !fallbackModel) {\n\t\tthrow new NodeOperationError(\n\t\t\tthis.getNode(),\n\t\t\t'Please connect a model to the Fallback Model input or disable the fallback option',\n\t\t);\n\t}\n\n\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\tconst batch = items.slice(i, i + batchSize);\n\t\tconst batchPromises = batch.map(async (_item, batchItemIndex) => {\n\t\t\tconst itemIndex = i + batchItemIndex;\n\n\t\t\tif (response && response?.metadata?.itemIndex === itemIndex) {\n\t\t\t\treturn null;\n\t\t\t}\n\n\t\t\tconst steps = buildSteps(response, itemIndex);\n\n\t\t\tconst input = getPromptInputByType({\n\t\t\t\tctx: this,\n\t\t\t\ti: itemIndex,\n\t\t\t\tinputKey: 'text',\n\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t});\n\t\t\tif (input === undefined) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), 'The \"text\" parameter is empty.');\n\t\t\t}\n\t\t\tconst outputParser = await getOptionalOutputParser(this, itemIndex);\n\t\t\tconst tools = await getTools(this, outputParser);\n\t\t\tconst options = this.getNodeParameter('options', itemIndex) as {\n\t\t\t\tsystemMessage?: string;\n\t\t\t\tmaxIterations?: number;\n\t\t\t\treturnIntermediateSteps?: boolean;\n\t\t\t\tpassthroughBinaryImages?: boolean;\n\t\t\t\tenableStreaming?: boolean;\n\t\t\t\tmaxTokensFromMemory?: number;\n\t\t\t};\n\n\t\t\tif (options.enableStreaming === undefined) {\n\t\t\t\toptions.enableStreaming = true;\n\t\t\t}\n\n\t\t\t// Prepare the prompt messages and prompt template.\n\t\t\tconst messages = await prepareMessages(this, itemIndex, {\n\t\t\t\tsystemMessage: options.systemMessage,\n\t\t\t\tpassthroughBinaryImages: options.passthroughBinaryImages ?? true,\n\t\t\t\toutputParser,\n\t\t\t});\n\t\t\tconst prompt: ChatPromptTemplate = preparePrompt(messages);\n\n\t\t\t// Create executors for primary and fallback models\n\t\t\tconst executor = createAgentSequence(\n\t\t\t\tmodel,\n\t\t\t\ttools,\n\t\t\t\tprompt,\n\t\t\t\toptions,\n\t\t\t\toutputParser,\n\t\t\t\tmemory,\n\t\t\t\tfallbackModel,\n\t\t\t);\n\t\t\t// Invoke with fallback logic\n\t\t\tconst invokeParams = {\n\t\t\t\tsteps,\n\t\t\t\tinput,\n\t\t\t\tsystem_message: options.systemMessage ?? SYSTEM_MESSAGE,\n\t\t\t\tformatting_instructions:\n\t\t\t\t\t'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',\n\t\t\t};\n\t\t\tconst executeOptions = { signal: this.getExecutionCancelSignal() };\n\n\t\t\t// Check if streaming is actually available\n\t\t\tconst isStreamingAvailable = 'isStreaming' in this ? this.isStreaming?.() : undefined;\n\n\t\t\tif (\n\t\t\t\t'isStreaming' in this &&\n\t\t\t\toptions.enableStreaming &&\n\t\t\t\tisStreamingAvailable &&\n\t\t\t\tthis.getNode().typeVersion >= 2.1\n\t\t\t) {\n\t\t\t\tlet chatHistory: BaseMessage[] | undefined = undefined;\n\t\t\t\tif (memory) {\n\t\t\t\t\t// Load memory variables to respect context window length\n\t\t\t\t\tchatHistory = await loadChatHistory(memory, model, options.maxTokensFromMemory);\n\t\t\t\t}\n\t\t\t\tconst eventStream = executor.streamEvents(\n\t\t\t\t\t{\n\t\t\t\t\t\t...invokeParams,\n\t\t\t\t\t\tchat_history: chatHistory,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tversion: 'v2',\n\t\t\t\t\t\t...executeOptions,\n\t\t\t\t\t},\n\t\t\t\t);\n\n\t\t\t\tconst result = await processEventStream(\n\t\t\t\t\tthis,\n\t\t\t\t\teventStream,\n\t\t\t\t\titemIndex,\n\t\t\t\t\toptions.returnIntermediateSteps,\n\t\t\t\t\tmemory,\n\t\t\t\t\tinput,\n\t\t\t\t);\n\n\t\t\t\t// If result contains tool calls, build the request object like the normal flow\n\t\t\t\tif (result.toolCalls && result.toolCalls.length > 0) {\n\t\t\t\t\tconst currentIteration = (response?.metadata?.iterationCount ?? 0) + 1;\n\n\t\t\t\t\t// Check if we've exceeded maxIterations\n\t\t\t\t\tif (options.maxIterations && currentIteration > options.maxIterations) {\n\t\t\t\t\t\tthrow new NodeOperationError(this.getNode(), 'Maximum iterations reached');\n\t\t\t\t\t}\n\n\t\t\t\t\tconst actions = await createEngineRequests(result.toolCalls, itemIndex, tools);\n\n\t\t\t\t\treturn {\n\t\t\t\t\t\tactions,\n\t\t\t\t\t\tmetadata: {\n\t\t\t\t\t\t\tpreviousRequests: buildSteps(response, itemIndex),\n\t\t\t\t\t\t\titerationCount: currentIteration,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t}\n\n\t\t\t\treturn result;\n\t\t\t} else {\n\t\t\t\t// Handle regular execution\n\t\t\t\tlet chatHistory: BaseMessage[] | undefined = undefined;\n\t\t\t\tif (memory) {\n\t\t\t\t\t// Load memory variables to respect context window length\n\t\t\t\t\tchatHistory = await loadChatHistory(memory, model, options.maxTokensFromMemory);\n\t\t\t\t}\n\t\t\t\tconst modelResponse = await executor.invoke({\n\t\t\t\t\t...invokeParams,\n\t\t\t\t\tchat_history: chatHistory,\n\t\t\t\t});\n\n\t\t\t\tif ('returnValues' in modelResponse) {\n\t\t\t\t\t// Save conversation to memory including any tool call context\n\t\t\t\t\tif (memory && input && modelResponse.returnValues.output) {\n\t\t\t\t\t\t// If there were tool calls in this conversation, include them in the context\n\t\t\t\t\t\tlet fullOutput = modelResponse.returnValues.output as string;\n\n\t\t\t\t\t\tif (steps.length > 0) {\n\t\t\t\t\t\t\t// Include tool call information in the conversation context\n\t\t\t\t\t\t\tconst toolContext = steps\n\t\t\t\t\t\t\t\t.map(\n\t\t\t\t\t\t\t\t\t(step) =>\n\t\t\t\t\t\t\t\t\t\t`Tool: ${step.action.tool}, Input: ${JSON.stringify(step.action.toolInput)}, Result: ${step.observation}`,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t.join('; ');\n\t\t\t\t\t\t\tfullOutput = `[Used tools: ${toolContext}] ${fullOutput}`;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tawait memory.saveContext({ input }, { output: fullOutput });\n\t\t\t\t\t}\n\t\t\t\t\t// Include intermediate steps if requested\n\t\t\t\t\tconst result = { ...modelResponse.returnValues };\n\t\t\t\t\tif (options.returnIntermediateSteps && steps.length > 0) {\n\t\t\t\t\t\tresult.intermediateSteps = steps;\n\t\t\t\t\t}\n\t\t\t\t\treturn result;\n\t\t\t\t}\n\n\t\t\t\tconst currentIteration = (response?.metadata?.iterationCount ?? 0) + 1;\n\n\t\t\t\t// Check if we've exceeded maxIterations\n\t\t\t\tif (options.maxIterations && currentIteration > options.maxIterations) {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), 'Maximum iterations reached');\n\t\t\t\t}\n\n\t\t\t\tconst actions = await createEngineRequests(modelResponse, itemIndex, tools);\n\n\t\t\t\treturn {\n\t\t\t\t\tactions,\n\t\t\t\t\tmetadata: {\n\t\t\t\t\t\tpreviousRequests: buildSteps(response, itemIndex),\n\t\t\t\t\t\titerationCount: currentIteration,\n\t\t\t\t\t},\n\t\t\t\t};\n\t\t\t}\n\t\t});\n\n\t\tconst batchResults = await Promise.allSettled(batchPromises);\n\t\t// This is only used to check if the output parser is connected\n\t\t// so we can parse the output if needed. Actual output parsing is done in the loop above\n\t\tconst outputParser = await getOptionalOutputParser(this, 0);\n\t\tbatchResults.forEach((result, index) => {\n\t\t\tconst itemIndex = i + index;\n\t\t\tif (result.status === 'rejected') {\n\t\t\t\tconst error = result.reason as Error;\n\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: { error: error.message },\n\t\t\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t\t\t} as INodeExecutionData);\n\t\t\t\t\treturn;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error);\n\t\t\t\t}\n\t\t\t}\n\t\t\tconst response = result.value;\n\n\t\t\tif ('actions' in response) {\n\t\t\t\tif (!request) {\n\t\t\t\t\trequest = {\n\t\t\t\t\t\tactions: response.actions,\n\t\t\t\t\t\tmetadata: response.metadata,\n\t\t\t\t\t};\n\t\t\t\t} else {\n\t\t\t\t\trequest.actions.push(...response.actions);\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// If memory and outputParser are connected, parse the output.\n\t\t\tif (memory && outputParser) {\n\t\t\t\tconst parsedOutput = jsonParse<{ output: Record<string, unknown> }>(\n\t\t\t\t\tresponse.output as string,\n\t\t\t\t);\n\t\t\t\tresponse.output = parsedOutput?.output ?? parsedOutput;\n\t\t\t}\n\n\t\t\t// Omit internal keys before returning the result.\n\t\t\tconst itemResult: INodeExecutionData = {\n\t\t\t\tjson: omit(\n\t\t\t\t\tresponse,\n\t\t\t\t\t'system_message',\n\t\t\t\t\t'formatting_instructions',\n\t\t\t\t\t'input',\n\t\t\t\t\t'chat_history',\n\t\t\t\t\t'agent_scratchpad',\n\t\t\t\t),\n\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t};\n\n\t\t\treturnData.push(itemResult);\n\t\t});\n\n\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\tawait sleep(delayBetweenBatches);\n\t\t}\n\t}\n\t// Check if we have any Request objects (tool calls)\n\tif (request) {\n\t\treturn request;\n\t}\n\n\t// Otherwise return execution data\n\treturn [returnData];\n}\nasync function loadChatHistory(\n\tmemory: BaseChatMemory,\n\tmodel: BaseChatModel,\n\tmaxTokensFromMemory?: number,\n): Promise<BaseMessage[]> {\n\tconst memoryVariables = await memory.loadMemoryVariables({});\n\tlet chatHistory = memoryVariables['chat_history'] as BaseMessage[];\n\n\tif (maxTokensFromMemory) {\n\t\tchatHistory = await trimMessages(chatHistory, {\n\t\t\tstrategy: 'last',\n\t\t\tmaxTokens: maxTokensFromMemory,\n\t\t\ttokenCounter: model,\n\t\t\tincludeSystem: true,\n\t\t\tstartOn: 'human',\n\t\t\tallowPartial: true,\n\t\t});\n\t}\n\n\treturn chatHistory;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAIA,sBAAwC;AAGxC,uBAAiC;AACjC,oBAAmE;AAGnE,kBAAiB;AACjB,0BAMO;AAUP,yBAAmB;AAEnB,qBAAqC;AACrC,6BAGO;AAEP,oBAQO;AACP,oBAA+B;AAW/B,eAAe,qBACd,WACA,WACA,OACC;AACD,SAAO,UAAU,IAAI,CAAC,aAAa;AAElC,UAAM,YAAY,MAAM,KAAK,CAAC,SAAS,KAAK,SAAS,SAAS,IAAI;AAElE,QAAI,CAAC,UAAW;AAEhB,UAAM,WAAW,UAAU,UAAU;AAGrC,UAAM,QAAQ,UAAU,UAAU,gBAC/B,EAAE,GAAG,SAAS,WAAW,MAAM,SAAS,KAAK,IAC7C,SAAS;AAEZ,WAAO;AAAA,MACN;AAAA,MACA;AAAA,MACA,MAAM,wCAAoB;AAAA,MAC1B,IAAI,SAAS;AAAA,MACb,UAAU;AAAA,QACT;AAAA,MACD;AAAA,IACD;AAAA,EACD,CAAC;AACF;AAMA,SAAS,YAAY,OAAsB,OAA4C;AACtF,QAAM,aAAc,MAAM,UAAU,SAAoB,CAAC;AACzD,QAAM,WAAW,CAAC,GAAG,OAAO,GAAG,UAAU;AACzC,SAAO;AACR;AAKA,SAAS,oBACR,OACA,OACA,QACA,UACA,cACA,QACA,eACC;AACD,QAAM,YAAQ,sCAAuB;AAAA,IACpC,KAAK;AAAA,IACL,OAAO,YAAY,OAAO,KAAK;AAAA,IAC/B;AAAA,IACA,gBAAgB;AAAA,EACjB,CAAC;AAED,MAAI;AACJ,MAAI,eAAe;AAClB,wBAAgB,sCAAuB;AAAA,MACtC,KAAK;AAAA,MACL,OAAO,YAAY,eAAe,KAAK;AAAA,MACvC;AAAA,MACA,gBAAgB;AAAA,IACjB,CAAC;AAAA,EACF;AACA,QAAM,gBAAgB,kCAAiB,KAAK;AAAA,IAC3C,gBAAgB,MAAM,cAAc,CAAC,aAAa,CAAC,IAAI;AAAA,QACvD,mCAAoB,cAAc,MAAM;AAAA,IACxC;AAAA,EACD,CAAC;AAED,gBAAc,eAAe;AAC7B,gBAAc,iBAAiB;AAE/B,SAAO;AACR;AAoBA,eAAe,mBACd,KACA,aACA,WACA,0BAAmC,OACnC,QACA,OACuB;AACvB,QAAM,cAA2B;AAAA,IAChC,QAAQ;AAAA,EACT;AAEA,MAAI,yBAAyB;AAC5B,gBAAY,oBAAoB,CAAC;AAAA,EAClC;AAEA,QAAM,YAA+B,CAAC;AAEtC,MAAI,UAAU,SAAS,SAAS;AAChC,mBAAiB,SAAS,aAAa;AAEtC,YAAQ,MAAM,OAAO;AAAA,MACpB,KAAK;AACJ,cAAM,QAAQ,MAAM,MAAM;AAC1B,YAAI,OAAO,SAAS;AACnB,gBAAM,eAAe,MAAM;AAC3B,cAAI,YAAY;AAChB,cAAI,MAAM,QAAQ,YAAY,GAAG;AAChC,uBAAW,WAAW,cAAc;AACnC,kBAAI,SAAS,SAAS,QAAQ;AAC7B,6BAAc,SAAgC;AAAA,cAC/C;AAAA,YACD;AAAA,UACD,WAAW,OAAO,iBAAiB,UAAU;AAC5C,wBAAY;AAAA,UACb;AACA,cAAI,UAAU,QAAQ,WAAW,SAAS;AAE1C,sBAAY,UAAU;AAAA,QACvB;AACA;AAAA,MACD,KAAK;AAEJ,YAAI,MAAM,MAAM;AACf,gBAAM,gBAAgB,MAAM;AAG5B,gBAAM,SAAS,cAAc;AAG7B,cAAI,QAAQ,cAAc,OAAO,WAAW,SAAS,GAAG;AAEvD,uBAAW,YAAY,OAAO,YAAY;AACzC,wBAAU,KAAK;AAAA,gBACd,MAAM,SAAS;AAAA,gBACf,WAAW,SAAS;AAAA,gBACpB,YAAY,SAAS,MAAM;AAAA,gBAC3B,MAAM,SAAS,QAAQ;AAAA,gBACvB,KACC,OAAO,WACP,WAAW,SAAS,IAAI,gBAAgB,KAAK,UAAU,SAAS,IAAI,CAAC;AAAA,gBACtE,YAAY,CAAC,MAAM;AAAA,cACpB,CAAC;AAAA,YACF;AAGA,gBAAI,yBAAyB;AAC5B,yBAAW,YAAY,OAAO,YAAY;AACzC,4BAAY,kBAAmB,KAAK;AAAA,kBACnC,QAAQ;AAAA,oBACP,MAAM,SAAS;AAAA,oBACf,WAAW,SAAS;AAAA,oBACpB,KACC,OAAO,WACP,WAAW,SAAS,IAAI,gBAAgB,KAAK,UAAU,SAAS,IAAI,CAAC;AAAA,oBACtE,YAAY,CAAC,MAAM;AAAA;AAAA,oBACnB,YAAY,SAAS,MAAM;AAAA,oBAC3B,MAAM,SAAS,QAAQ;AAAA,kBACxB;AAAA,gBACD,CAAC;AAAA,cACF;AAAA,YACD;AAAA,UACD;AAAA,QACD;AACA;AAAA,MACD,KAAK;AAEJ,YAAI,2BAA2B,MAAM,QAAQ,YAAY,kBAAmB,SAAS,GAAG;AACvF,gBAAM,WAAW,MAAM;AAEvB,gBAAM,eAAe,YAAY,kBAAmB;AAAA,YACnD,CAAC,SAAS,CAAC,KAAK,eAAe,KAAK,OAAO,SAAS,MAAM;AAAA,UAC3D;AACA,cAAI,cAAc;AACjB,yBAAa,cAAc,SAAS,UAAU;AAAA,UAC/C;AAAA,QACD;AACA;AAAA,MACD;AACC;AAAA,IACF;AAAA,EACD;AACA,MAAI,UAAU,OAAO,SAAS;AAG9B,MAAI,UAAU,SAAS,YAAY,QAAQ;AAC1C,UAAM,OAAO,YAAY,EAAE,MAAM,GAAG,EAAE,QAAQ,YAAY,OAAO,CAAC;AAAA,EACnE;AAGA,MAAI,UAAU,SAAS,GAAG;AACzB,gBAAY,YAAY;AAAA,EACzB;AAEA,SAAO;AACR;AAmBA,SAAS,WACR,UACA,WACiB;AACjB,QAAM,QAAwB,CAAC;AAE/B,MAAI,UAAU;AACb,UAAM,YAAY,UAAU,mBAAmB,CAAC;AAEhD,QAAI,SAAS,UAAU,kBAAkB;AACxC,YAAM,KAAK,GAAG,SAAS,SAAS,gBAAgB;AAAA,IACjD;AAEA,eAAW,QAAQ,WAAW;AAC7B,UAAI,KAAK,QAAQ,UAAU,cAAc,UAAW;AAEpD,YAAM,YAAyB;AAAA,QAC9B,GAAG,KAAK,OAAO;AAAA,QACf,IAAI,KAAK,OAAO;AAAA,MACjB;AACA,UAAI,CAAC,aAAa,CAAC,KAAK,MAAM;AAC7B;AAAA,MACD;AAEA,YAAM,OAAO,MAAM,KAAK,CAACA,UAASA,MAAK,OAAO,eAAe,UAAU,EAAE;AACzE,UAAI,MAAM;AACT;AAAA,MACD;AAGA,YAAM,qBAAqB,IAAI,0BAAU;AAAA,QACxC,SAAS,WAAW,KAAK,OAAO,QAAQ,gBAAgB,KAAK,UAAU,SAAS,CAAC;AAAA,QACjF,YAAY;AAAA,UACX;AAAA,YACC,IAAK,WAAW,MAAiB;AAAA,YACjC,UAAM,wCAAmB,KAAK,OAAO,QAAQ;AAAA,YAC7C,MAAM;AAAA,YACN,MAAM;AAAA,UACP;AAAA,QACD;AAAA,MACD,CAAC;AAED,YAAM,aAAa;AAAA,QAClB,QAAQ;AAAA,UACP,UAAM,wCAAmB,KAAK,OAAO,QAAQ;AAAA,UAC7C,WAAY,UAAU,SAAyB,CAAC;AAAA,UAChD,KAAK,UAAU,OAAO,mBAAmB;AAAA,UACzC,YAAY,CAAC,kBAAkB;AAAA,UAC/B,YAAY,WAAW;AAAA,UACvB,MAAM,UAAU,QAAQ;AAAA,QACzB;AAAA,QACA,aAAa,KAAK,UAAU,KAAK,MAAM,MAAM,UAAU,CAAC,GAAG,IAAI,CAAC,SAAS,MAAM,IAAI,KAAK,EAAE;AAAA,MAC3F;AAEA,YAAM,KAAK,UAAU;AAAA,IACtB;AAAA,EACD;AACA,SAAO;AACR;AAgBA,eAAsB,kBAErB,UAC2E;AAC3E,OAAK,OAAO,MAAM,0BAA0B;AAE5C,QAAM,aAAmC,CAAC;AAC1C,MAAI,UAA8D;AAElE,QAAM,QAAQ,KAAK,aAAa;AAChC,QAAM,YAAY,KAAK,iBAAiB,8BAA8B,GAAG,CAAC;AAC1E,QAAM,sBAAsB,KAAK;AAAA,IAChC;AAAA,IACA;AAAA,IACA;AAAA,EACD;AACA,QAAM,gBAAgB,KAAK,iBAAiB,iBAAiB,GAAG,KAAK;AACrE,QAAM,SAAS,UAAM,iCAAkB,IAAI;AAC3C,QAAM,QAAQ,UAAM,4BAAa,MAAM,CAAC;AACxC,yBAAAC,SAAO,OAAO,gDAAgD;AAC9D,QAAM,gBAAgB,gBAAgB,UAAM,4BAAa,MAAM,CAAC,IAAI;AAEpE,MAAI,iBAAiB,CAAC,eAAe;AACpC,UAAM,IAAI;AAAA,MACT,KAAK,QAAQ;AAAA,MACb;AAAA,IACD;AAAA,EACD;AAEA,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,UAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAC1C,UAAM,gBAAgB,MAAM,IAAI,OAAO,OAAO,mBAAmB;AAChE,YAAM,YAAY,IAAI;AAEtB,UAAI,YAAY,UAAU,UAAU,cAAc,WAAW;AAC5D,eAAO;AAAA,MACR;AAEA,YAAM,QAAQ,WAAW,UAAU,SAAS;AAE5C,YAAM,YAAQ,qCAAqB;AAAA,QAClC,KAAK;AAAA,QACL,GAAG;AAAA,QACH,UAAU;AAAA,QACV,eAAe;AAAA,MAChB,CAAC;AACD,UAAI,UAAU,QAAW;AACxB,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,gCAAgC;AAAA,MAC9E;AACA,YAAMC,gBAAe,UAAM,gDAAwB,MAAM,SAAS;AAClE,YAAM,QAAQ,UAAM,wBAAS,MAAMA,aAAY;AAC/C,YAAM,UAAU,KAAK,iBAAiB,WAAW,SAAS;AAS1D,UAAI,QAAQ,oBAAoB,QAAW;AAC1C,gBAAQ,kBAAkB;AAAA,MAC3B;AAGA,YAAM,WAAW,UAAM,+BAAgB,MAAM,WAAW;AAAA,QACvD,eAAe,QAAQ;AAAA,QACvB,yBAAyB,QAAQ,2BAA2B;AAAA,QAC5D,cAAAA;AAAA,MACD,CAAC;AACD,YAAM,aAA6B,6BAAc,QAAQ;AAGzD,YAAM,WAAW;AAAA,QAChB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACAA;AAAA,QACA;AAAA,QACA;AAAA,MACD;AAEA,YAAM,eAAe;AAAA,QACpB;AAAA,QACA;AAAA,QACA,gBAAgB,QAAQ,iBAAiB;AAAA,QACzC,yBACC;AAAA,MACF;AACA,YAAM,iBAAiB,EAAE,QAAQ,KAAK,yBAAyB,EAAE;AAGjE,YAAM,uBAAuB,iBAAiB,OAAO,KAAK,cAAc,IAAI;AAE5E,UACC,iBAAiB,QACjB,QAAQ,mBACR,wBACA,KAAK,QAAQ,EAAE,eAAe,KAC7B;AACD,YAAI,cAAyC;AAC7C,YAAI,QAAQ;AAEX,wBAAc,MAAM,gBAAgB,QAAQ,OAAO,QAAQ,mBAAmB;AAAA,QAC/E;AACA,cAAM,cAAc,SAAS;AAAA,UAC5B;AAAA,YACC,GAAG;AAAA,YACH,cAAc;AAAA,UACf;AAAA,UACA;AAAA,YACC,SAAS;AAAA,YACT,GAAG;AAAA,UACJ;AAAA,QACD;AAEA,cAAM,SAAS,MAAM;AAAA,UACpB;AAAA,UACA;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,UACR;AAAA,UACA;AAAA,QACD;AAGA,YAAI,OAAO,aAAa,OAAO,UAAU,SAAS,GAAG;AACpD,gBAAM,oBAAoB,UAAU,UAAU,kBAAkB,KAAK;AAGrE,cAAI,QAAQ,iBAAiB,mBAAmB,QAAQ,eAAe;AACtE,kBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,4BAA4B;AAAA,UAC1E;AAEA,gBAAM,UAAU,MAAM,qBAAqB,OAAO,WAAW,WAAW,KAAK;AAE7E,iBAAO;AAAA,YACN;AAAA,YACA,UAAU;AAAA,cACT,kBAAkB,WAAW,UAAU,SAAS;AAAA,cAChD,gBAAgB;AAAA,YACjB;AAAA,UACD;AAAA,QACD;AAEA,eAAO;AAAA,MACR,OAAO;AAEN,YAAI,cAAyC;AAC7C,YAAI,QAAQ;AAEX,wBAAc,MAAM,gBAAgB,QAAQ,OAAO,QAAQ,mBAAmB;AAAA,QAC/E;AACA,cAAM,gBAAgB,MAAM,SAAS,OAAO;AAAA,UAC3C,GAAG;AAAA,UACH,cAAc;AAAA,QACf,CAAC;AAED,YAAI,kBAAkB,eAAe;AAEpC,cAAI,UAAU,SAAS,cAAc,aAAa,QAAQ;AAEzD,gBAAI,aAAa,cAAc,aAAa;AAE5C,gBAAI,MAAM,SAAS,GAAG;AAErB,oBAAM,cAAc,MAClB;AAAA,gBACA,CAAC,SACA,SAAS,KAAK,OAAO,IAAI,YAAY,KAAK,UAAU,KAAK,OAAO,SAAS,CAAC,aAAa,KAAK,WAAW;AAAA,cACzG,EACC,KAAK,IAAI;AACX,2BAAa,gBAAgB,WAAW,KAAK,UAAU;AAAA,YACxD;AAEA,kBAAM,OAAO,YAAY,EAAE,MAAM,GAAG,EAAE,QAAQ,WAAW,CAAC;AAAA,UAC3D;AAEA,gBAAM,SAAS,EAAE,GAAG,cAAc,aAAa;AAC/C,cAAI,QAAQ,2BAA2B,MAAM,SAAS,GAAG;AACxD,mBAAO,oBAAoB;AAAA,UAC5B;AACA,iBAAO;AAAA,QACR;AAEA,cAAM,oBAAoB,UAAU,UAAU,kBAAkB,KAAK;AAGrE,YAAI,QAAQ,iBAAiB,mBAAmB,QAAQ,eAAe;AACtE,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,4BAA4B;AAAA,QAC1E;AAEA,cAAM,UAAU,MAAM,qBAAqB,eAAe,WAAW,KAAK;AAE1E,eAAO;AAAA,UACN;AAAA,UACA,UAAU;AAAA,YACT,kBAAkB,WAAW,UAAU,SAAS;AAAA,YAChD,gBAAgB;AAAA,UACjB;AAAA,QACD;AAAA,MACD;AAAA,IACD,CAAC;AAED,UAAM,eAAe,MAAM,QAAQ,WAAW,aAAa;AAG3D,UAAM,eAAe,UAAM,gDAAwB,MAAM,CAAC;AAC1D,iBAAa,QAAQ,CAAC,QAAQ,UAAU;AACvC,YAAM,YAAY,IAAI;AACtB,UAAI,OAAO,WAAW,YAAY;AACjC,cAAM,QAAQ,OAAO;AACrB,YAAI,KAAK,eAAe,GAAG;AAC1B,qBAAW,KAAK;AAAA,YACf,MAAM,EAAE,OAAO,MAAM,QAAQ;AAAA,YAC7B,YAAY,EAAE,MAAM,UAAU;AAAA,UAC/B,CAAuB;AACvB;AAAA,QACD,OAAO;AACN,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,KAAK;AAAA,QACnD;AAAA,MACD;AACA,YAAMC,YAAW,OAAO;AAExB,UAAI,aAAaA,WAAU;AAC1B,YAAI,CAAC,SAAS;AACb,oBAAU;AAAA,YACT,SAASA,UAAS;AAAA,YAClB,UAAUA,UAAS;AAAA,UACpB;AAAA,QACD,OAAO;AACN,kBAAQ,QAAQ,KAAK,GAAGA,UAAS,OAAO;AAAA,QACzC;AACA;AAAA,MACD;AAGA,UAAI,UAAU,cAAc;AAC3B,cAAM,mBAAe;AAAA,UACpBA,UAAS;AAAA,QACV;AACA,QAAAA,UAAS,SAAS,cAAc,UAAU;AAAA,MAC3C;AAGA,YAAM,aAAiC;AAAA,QACtC,UAAM,YAAAC;AAAA,UACLD;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACD;AAAA,QACA,YAAY,EAAE,MAAM,UAAU;AAAA,MAC/B;AAEA,iBAAW,KAAK,UAAU;AAAA,IAC3B,CAAC;AAED,QAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,gBAAM,2BAAM,mBAAmB;AAAA,IAChC;AAAA,EACD;AAEA,MAAI,SAAS;AACZ,WAAO;AAAA,EACR;AAGA,SAAO,CAAC,UAAU;AACnB;AACA,eAAe,gBACd,QACA,OACA,qBACyB;AACzB,QAAM,kBAAkB,MAAM,OAAO,oBAAoB,CAAC,CAAC;AAC3D,MAAI,cAAc,gBAAgB,cAAc;AAEhD,MAAI,qBAAqB;AACxB,kBAAc,UAAM,8BAAa,aAAa;AAAA,MAC7C,UAAU;AAAA,MACV,WAAW;AAAA,MACX,cAAc;AAAA,MACd,eAAe;AAAA,MACf,SAAS;AAAA,MACT,cAAc;AAAA,IACf,CAAC;AAAA,EACF;AAEA,SAAO;AACR;","names":["step","assert","outputParser","response","omit"]}
1
+ {"version":3,"sources":["../../../../../../../nodes/agents/Agent/agents/ToolsAgent/V3/execute.ts"],"sourcesContent":["import { sleep } from 'n8n-workflow';\nimport type {\n\tEngineRequest,\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tISupplyDataFunctions,\n\tEngineResponse,\n} from 'n8n-workflow';\n\nimport { buildExecutionContext, executeBatch, checkMaxIterations } from './helpers';\nimport type { RequestResponseMetadata } from './types';\n\n/* -----------------------------------------------------------\n Main Executor Function\n----------------------------------------------------------- */\n/**\n * The main executor method for the Tools Agent V3.\n *\n * This function orchestrates the execution across input batches, handling:\n * - Building shared execution context (models, memory, batching config)\n * - Processing items in batches with continue-on-fail logic\n * - Returning either tool call requests or node output data\n *\n * @param this Execute context. SupplyDataContext is passed when agent is used as a tool\n * @param response Optional engine response containing tool call results from previous execution\n * @returns Array of execution data for all processed items, or engine request for tool calls\n */\nexport async function toolsAgentExecute(\n\tthis: IExecuteFunctions | ISupplyDataFunctions,\n\tresponse?: EngineResponse<RequestResponseMetadata>,\n): Promise<INodeExecutionData[][] | EngineRequest<RequestResponseMetadata>> {\n\tthis.logger.debug('Executing Tools Agent V3');\n\n\t// Check max iterations if this is a continuation of a previous execution\n\tconst maxIterations = this.getNodeParameter('options.maxIterations', 0, 10) as number;\n\tcheckMaxIterations(response, maxIterations, this.getNode());\n\n\tconst returnData: INodeExecutionData[] = [];\n\tlet request: EngineRequest<RequestResponseMetadata> | undefined = undefined;\n\n\t// Build execution context with shared configuration\n\tconst executionContext = await buildExecutionContext(this);\n\tconst { items, batchSize, delayBetweenBatches, model, fallbackModel, memory } = executionContext;\n\n\t// Process items in batches\n\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\tconst batch = items.slice(i, i + batchSize);\n\n\t\tconst { returnData: batchReturnData, request: batchRequest } = await executeBatch(\n\t\t\tthis,\n\t\t\tbatch,\n\t\t\ti,\n\t\t\tmodel,\n\t\t\tfallbackModel,\n\t\t\tmemory,\n\t\t\tresponse,\n\t\t);\n\n\t\t// Collect results from batch\n\t\treturnData.push.apply(returnData, batchReturnData);\n\n\t\t// Collect requests from batch\n\t\tif (batchRequest) {\n\t\t\tif (!request) {\n\t\t\t\trequest = batchRequest;\n\t\t\t} else {\n\t\t\t\trequest.actions.push.apply(request.actions, batchRequest.actions);\n\t\t\t}\n\t\t}\n\n\t\t// Apply delay between batches if configured\n\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\tawait sleep(delayBetweenBatches);\n\t\t}\n\t}\n\n\t// Return tool call request if any tools need to be executed\n\tif (request) {\n\t\treturn request;\n\t}\n\n\t// Otherwise return execution data\n\treturn [returnData];\n}\n\n// Re-export types for backwards compatibility\nexport type { RequestResponseMetadata } from './types';\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAAsB;AAStB,qBAAwE;AAkBxE,eAAsB,kBAErB,UAC2E;AAC3E,OAAK,OAAO,MAAM,0BAA0B;AAG5C,QAAM,gBAAgB,KAAK,iBAAiB,yBAAyB,GAAG,EAAE;AAC1E,yCAAmB,UAAU,eAAe,KAAK,QAAQ,CAAC;AAE1D,QAAM,aAAmC,CAAC;AAC1C,MAAI,UAA8D;AAGlE,QAAM,mBAAmB,UAAM,sCAAsB,IAAI;AACzD,QAAM,EAAE,OAAO,WAAW,qBAAqB,OAAO,eAAe,OAAO,IAAI;AAGhF,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,UAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAE1C,UAAM,EAAE,YAAY,iBAAiB,SAAS,aAAa,IAAI,UAAM;AAAA,MACpE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAGA,eAAW,KAAK,MAAM,YAAY,eAAe;AAGjD,QAAI,cAAc;AACjB,UAAI,CAAC,SAAS;AACb,kBAAU;AAAA,MACX,OAAO;AACN,gBAAQ,QAAQ,KAAK,MAAM,QAAQ,SAAS,aAAa,OAAO;AAAA,MACjE;AAAA,IACD;AAGA,QAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,gBAAM,2BAAM,mBAAmB;AAAA,IAChC;AAAA,EACD;AAGA,MAAI,SAAS;AACZ,WAAO;AAAA,EACR;AAGA,SAAO,CAAC,UAAU;AACnB;","names":[]}