@n8n/n8n-nodes-langchain 1.91.1 → 1.92.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/credentials/AnthropicApi.credentials.js +8 -1
  2. package/dist/credentials/AnthropicApi.credentials.js.map +1 -1
  3. package/dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js +150 -0
  4. package/dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js.map +1 -0
  5. package/dist/known/credentials.json +10 -0
  6. package/dist/nodes/agents/Agent/agents/ToolsAgent/description.js +23 -0
  7. package/dist/nodes/agents/Agent/agents/ToolsAgent/description.js.map +1 -1
  8. package/dist/nodes/agents/Agent/agents/ToolsAgent/execute.js +28 -13
  9. package/dist/nodes/agents/Agent/agents/ToolsAgent/execute.js.map +1 -1
  10. package/dist/nodes/chains/ChainLLM/ChainLlm.node.js +39 -21
  11. package/dist/nodes/chains/ChainLLM/ChainLlm.node.js.map +1 -1
  12. package/dist/nodes/chains/ChainLLM/methods/config.js +24 -0
  13. package/dist/nodes/chains/ChainLLM/methods/config.js.map +1 -1
  14. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js +55 -15
  15. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js.map +1 -1
  16. package/dist/nodes/chains/ChainSummarization/V2/ChainSummarizationV2.node.js +54 -11
  17. package/dist/nodes/chains/ChainSummarization/V2/ChainSummarizationV2.node.js.map +1 -1
  18. package/dist/nodes/chains/InformationExtractor/InformationExtractor.node.js +66 -24
  19. package/dist/nodes/chains/InformationExtractor/InformationExtractor.node.js.map +1 -1
  20. package/dist/nodes/chains/SentimentAnalysis/SentimentAnalysis.node.js +86 -27
  21. package/dist/nodes/chains/SentimentAnalysis/SentimentAnalysis.node.js.map +1 -1
  22. package/dist/nodes/chains/TextClassifier/TextClassifier.node.js +81 -47
  23. package/dist/nodes/chains/TextClassifier/TextClassifier.node.js.map +1 -1
  24. package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js +5 -1
  25. package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js.map +1 -1
  26. package/dist/nodes/llms/LMChatAnthropic/methods/searchModels.js +3 -1
  27. package/dist/nodes/llms/LMChatAnthropic/methods/searchModels.js.map +1 -1
  28. package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js +63 -123
  29. package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js.map +1 -1
  30. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/N8nOAuth2TokenCredential.js +61 -0
  31. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/N8nOAuth2TokenCredential.js.map +1 -0
  32. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/api-key.js +53 -0
  33. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/api-key.js.map +1 -0
  34. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/oauth2.js +54 -0
  35. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/oauth2.js.map +1 -0
  36. package/dist/nodes/llms/LmChatAzureOpenAi/properties.js +155 -0
  37. package/dist/nodes/llms/LmChatAzureOpenAi/properties.js.map +1 -0
  38. package/dist/nodes/llms/LmChatAzureOpenAi/types.js +42 -0
  39. package/dist/nodes/llms/LmChatAzureOpenAi/types.js.map +1 -0
  40. package/dist/types/credentials.json +2 -1
  41. package/dist/types/nodes.json +8 -8
  42. package/package.json +10 -7
@@ -34,6 +34,13 @@ class AnthropicApi {
34
34
  typeOptions: { password: true },
35
35
  required: true,
36
36
  default: ""
37
+ },
38
+ {
39
+ displayName: "Base URL",
40
+ name: "url",
41
+ type: "string",
42
+ default: "https://api.anthropic.com",
43
+ description: "Override the default base URL for the API"
37
44
  }
38
45
  ];
39
46
  this.authenticate = {
@@ -46,7 +53,7 @@ class AnthropicApi {
46
53
  };
47
54
  this.test = {
48
55
  request: {
49
- baseURL: "https://api.anthropic.com",
56
+ baseURL: "={{$credentials?.url}}",
50
57
  url: "/v1/messages",
51
58
  method: "POST",
52
59
  headers: {
@@ -1 +1 @@
1
- {"version":3,"sources":["../../credentials/AnthropicApi.credentials.ts"],"sourcesContent":["import type {\n\tIAuthenticateGeneric,\n\tICredentialTestRequest,\n\tICredentialType,\n\tINodeProperties,\n} from 'n8n-workflow';\n\nexport class AnthropicApi implements ICredentialType {\n\tname = 'anthropicApi';\n\n\tdisplayName = 'Anthropic';\n\n\tdocumentationUrl = 'anthropic';\n\n\tproperties: INodeProperties[] = [\n\t\t{\n\t\t\tdisplayName: 'API Key',\n\t\t\tname: 'apiKey',\n\t\t\ttype: 'string',\n\t\t\ttypeOptions: { password: true },\n\t\t\trequired: true,\n\t\t\tdefault: '',\n\t\t},\n\t];\n\n\tauthenticate: IAuthenticateGeneric = {\n\t\ttype: 'generic',\n\t\tproperties: {\n\t\t\theaders: {\n\t\t\t\t'x-api-key': '={{$credentials.apiKey}}',\n\t\t\t},\n\t\t},\n\t};\n\n\ttest: ICredentialTestRequest = {\n\t\trequest: {\n\t\t\tbaseURL: 'https://api.anthropic.com',\n\t\t\turl: '/v1/messages',\n\t\t\tmethod: 'POST',\n\t\t\theaders: {\n\t\t\t\t'anthropic-version': '2023-06-01',\n\t\t\t},\n\t\t\tbody: {\n\t\t\t\tmodel: 'claude-3-haiku-20240307',\n\t\t\t\tmessages: [{ role: 'user', content: 'Hey' }],\n\t\t\t\tmax_tokens: 1,\n\t\t\t},\n\t\t},\n\t};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOO,MAAM,aAAwC;AAAA,EAA9C;AACN,gBAAO;AAEP,uBAAc;AAEd,4BAAmB;AAEnB,sBAAgC;AAAA,MAC/B;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa,EAAE,UAAU,KAAK;AAAA,QAC9B,UAAU;AAAA,QACV,SAAS;AAAA,MACV;AAAA,IACD;AAEA,wBAAqC;AAAA,MACpC,MAAM;AAAA,MACN,YAAY;AAAA,QACX,SAAS;AAAA,UACR,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAEA,gBAA+B;AAAA,MAC9B,SAAS;AAAA,QACR,SAAS;AAAA,QACT,KAAK;AAAA,QACL,QAAQ;AAAA,QACR,SAAS;AAAA,UACR,qBAAqB;AAAA,QACtB;AAAA,QACA,MAAM;AAAA,UACL,OAAO;AAAA,UACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,CAAC;AAAA,UAC3C,YAAY;AAAA,QACb;AAAA,MACD;AAAA,IACD;AAAA;AACD;","names":[]}
1
+ {"version":3,"sources":["../../credentials/AnthropicApi.credentials.ts"],"sourcesContent":["import type {\n\tIAuthenticateGeneric,\n\tICredentialTestRequest,\n\tICredentialType,\n\tINodeProperties,\n} from 'n8n-workflow';\n\nexport class AnthropicApi implements ICredentialType {\n\tname = 'anthropicApi';\n\n\tdisplayName = 'Anthropic';\n\n\tdocumentationUrl = 'anthropic';\n\n\tproperties: INodeProperties[] = [\n\t\t{\n\t\t\tdisplayName: 'API Key',\n\t\t\tname: 'apiKey',\n\t\t\ttype: 'string',\n\t\t\ttypeOptions: { password: true },\n\t\t\trequired: true,\n\t\t\tdefault: '',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Base URL',\n\t\t\tname: 'url',\n\t\t\ttype: 'string',\n\t\t\tdefault: 'https://api.anthropic.com',\n\t\t\tdescription: 'Override the default base URL for the API',\n\t\t},\n\t];\n\n\tauthenticate: IAuthenticateGeneric = {\n\t\ttype: 'generic',\n\t\tproperties: {\n\t\t\theaders: {\n\t\t\t\t'x-api-key': '={{$credentials.apiKey}}',\n\t\t\t},\n\t\t},\n\t};\n\n\ttest: ICredentialTestRequest = {\n\t\trequest: {\n\t\t\tbaseURL: '={{$credentials?.url}}',\n\t\t\turl: '/v1/messages',\n\t\t\tmethod: 'POST',\n\t\t\theaders: {\n\t\t\t\t'anthropic-version': '2023-06-01',\n\t\t\t},\n\t\t\tbody: {\n\t\t\t\tmodel: 'claude-3-haiku-20240307',\n\t\t\t\tmessages: [{ role: 'user', content: 'Hey' }],\n\t\t\t\tmax_tokens: 1,\n\t\t\t},\n\t\t},\n\t};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOO,MAAM,aAAwC;AAAA,EAA9C;AACN,gBAAO;AAEP,uBAAc;AAEd,4BAAmB;AAEnB,sBAAgC;AAAA,MAC/B;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa,EAAE,UAAU,KAAK;AAAA,QAC9B,UAAU;AAAA,QACV,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,IACD;AAEA,wBAAqC;AAAA,MACpC,MAAM;AAAA,MACN,YAAY;AAAA,QACX,SAAS;AAAA,UACR,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAEA,gBAA+B;AAAA,MAC9B,SAAS;AAAA,QACR,SAAS;AAAA,QACT,KAAK;AAAA,QACL,QAAQ;AAAA,QACR,SAAS;AAAA,UACR,qBAAqB;AAAA,QACtB;AAAA,QACA,MAAM;AAAA,UACL,OAAO;AAAA,UACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,CAAC;AAAA,UAC3C,YAAY;AAAA,QACb;AAAA,MACD;AAAA,IACD;AAAA;AACD;","names":[]}
@@ -0,0 +1,150 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+ var AzureEntraCognitiveServicesOAuth2Api_credentials_exports = {};
20
+ __export(AzureEntraCognitiveServicesOAuth2Api_credentials_exports, {
21
+ AzureEntraCognitiveServicesOAuth2Api: () => AzureEntraCognitiveServicesOAuth2Api
22
+ });
23
+ module.exports = __toCommonJS(AzureEntraCognitiveServicesOAuth2Api_credentials_exports);
24
+ const defaultScopes = ["openid", "offline_access"];
25
+ class AzureEntraCognitiveServicesOAuth2Api {
26
+ constructor() {
27
+ this.name = "azureEntraCognitiveServicesOAuth2Api";
28
+ // eslint-disable-next-line n8n-nodes-base/cred-class-field-display-name-missing-oauth2
29
+ this.displayName = "Azure Entra ID (Azure Active Directory) API";
30
+ this.extends = ["oAuth2Api"];
31
+ this.documentationUrl = "azureEntraCognitiveServicesOAuth2Api";
32
+ this.properties = [
33
+ {
34
+ displayName: "Grant Type",
35
+ name: "grantType",
36
+ type: "hidden",
37
+ default: "authorizationCode"
38
+ },
39
+ {
40
+ displayName: "Resource Name",
41
+ name: "resourceName",
42
+ type: "string",
43
+ required: true,
44
+ default: ""
45
+ },
46
+ {
47
+ displayName: "API Version",
48
+ name: "apiVersion",
49
+ type: "string",
50
+ required: true,
51
+ default: "2024-12-01-preview"
52
+ },
53
+ {
54
+ displayName: "Endpoint",
55
+ name: "endpoint",
56
+ type: "string",
57
+ default: void 0,
58
+ placeholder: "https://westeurope.api.cognitive.microsoft.com"
59
+ },
60
+ {
61
+ displayName: "Tenant ID",
62
+ name: "tenantId",
63
+ type: "string",
64
+ default: "common",
65
+ description: 'Enter your Azure Tenant ID (Directory ID) or keep "common" for multi-tenant apps. Using a specific Tenant ID is generally recommended and required for certain authentication flows.',
66
+ placeholder: "e.g., xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx or common"
67
+ },
68
+ {
69
+ displayName: "Authorization URL",
70
+ name: "authUrl",
71
+ type: "string",
72
+ default: "https://login.microsoftonline.com/$TENANT_ID/oauth2/authorize"
73
+ },
74
+ {
75
+ displayName: "Access Token URL",
76
+ name: "accessTokenUrl",
77
+ type: "string",
78
+ default: "https://login.microsoftonline.com/$TENANT_ID/oauth2/token"
79
+ },
80
+ {
81
+ displayName: "Client ID",
82
+ name: "clientId",
83
+ type: "string",
84
+ required: true,
85
+ default: "",
86
+ description: "Client ID obtained from the Azure AD App Registration"
87
+ },
88
+ {
89
+ displayName: "Client Secret",
90
+ name: "clientSecret",
91
+ type: "string",
92
+ required: true,
93
+ typeOptions: { password: true },
94
+ default: "",
95
+ description: "Client Secret obtained from the Azure AD App Registration"
96
+ },
97
+ {
98
+ displayName: "Additional Body Properties",
99
+ name: "additionalBodyProperties",
100
+ type: "hidden",
101
+ default: '{"grant_type": "client_credentials", "resource": "https://cognitiveservices.azure.com/"}'
102
+ },
103
+ {
104
+ displayName: "Authentication",
105
+ name: "authentication",
106
+ type: "hidden",
107
+ default: "body"
108
+ },
109
+ {
110
+ displayName: "Custom Scopes",
111
+ name: "customScopes",
112
+ type: "boolean",
113
+ default: false,
114
+ description: 'Define custom scopes. You might need this if the default scopes are not sufficient or if you want to minimize permissions. Ensure you include "openid" and "offline_access".'
115
+ },
116
+ {
117
+ displayName: "Auth URI Query Parameters",
118
+ name: "authQueryParameters",
119
+ type: "hidden",
120
+ default: "",
121
+ description: "For some services additional query parameters have to be set which can be defined here",
122
+ placeholder: ""
123
+ },
124
+ {
125
+ displayName: "Enabled Scopes",
126
+ name: "enabledScopes",
127
+ type: "string",
128
+ displayOptions: {
129
+ show: {
130
+ customScopes: [true]
131
+ }
132
+ },
133
+ default: defaultScopes.join(" "),
134
+ placeholder: "openid offline_access",
135
+ description: "Space-separated list of scopes to request."
136
+ },
137
+ {
138
+ displayName: "Scope",
139
+ name: "scope",
140
+ type: "hidden",
141
+ default: '={{ $self.customScopes ? $self.enabledScopes : "' + defaultScopes.join(" ") + '"}}'
142
+ }
143
+ ];
144
+ }
145
+ }
146
+ // Annotate the CommonJS export names for ESM import in node:
147
+ 0 && (module.exports = {
148
+ AzureEntraCognitiveServicesOAuth2Api
149
+ });
150
+ //# sourceMappingURL=AzureEntraCognitiveServicesOAuth2Api.credentials.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.ts"],"sourcesContent":["import type { ICredentialType, INodeProperties } from 'n8n-workflow';\n\nconst defaultScopes = ['openid', 'offline_access'];\n\nexport class AzureEntraCognitiveServicesOAuth2Api implements ICredentialType {\n\tname = 'azureEntraCognitiveServicesOAuth2Api';\n\n\t// eslint-disable-next-line n8n-nodes-base/cred-class-field-display-name-missing-oauth2\n\tdisplayName = 'Azure Entra ID (Azure Active Directory) API';\n\n\textends = ['oAuth2Api'];\n\n\tdocumentationUrl = 'azureEntraCognitiveServicesOAuth2Api';\n\n\tproperties: INodeProperties[] = [\n\t\t{\n\t\t\tdisplayName: 'Grant Type',\n\t\t\tname: 'grantType',\n\t\t\ttype: 'hidden',\n\t\t\tdefault: 'authorizationCode',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Resource Name',\n\t\t\tname: 'resourceName',\n\t\t\ttype: 'string',\n\t\t\trequired: true,\n\t\t\tdefault: '',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'API Version',\n\t\t\tname: 'apiVersion',\n\t\t\ttype: 'string',\n\t\t\trequired: true,\n\t\t\tdefault: '2024-12-01-preview',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Endpoint',\n\t\t\tname: 'endpoint',\n\t\t\ttype: 'string',\n\t\t\tdefault: undefined,\n\t\t\tplaceholder: 'https://westeurope.api.cognitive.microsoft.com',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Tenant ID',\n\t\t\tname: 'tenantId',\n\t\t\ttype: 'string',\n\t\t\tdefault: 'common',\n\t\t\tdescription:\n\t\t\t\t'Enter your Azure Tenant ID (Directory ID) or keep \"common\" for multi-tenant apps. Using a specific Tenant ID is generally recommended and required for certain authentication flows.',\n\t\t\tplaceholder: 'e.g., xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx or common',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Authorization URL',\n\t\t\tname: 'authUrl',\n\t\t\ttype: 'string',\n\t\t\tdefault: 'https://login.microsoftonline.com/$TENANT_ID/oauth2/authorize',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Access Token URL',\n\t\t\tname: 'accessTokenUrl',\n\t\t\ttype: 'string',\n\t\t\tdefault: 'https://login.microsoftonline.com/$TENANT_ID/oauth2/token',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Client ID',\n\t\t\tname: 'clientId',\n\t\t\ttype: 'string',\n\t\t\trequired: true,\n\t\t\tdefault: '',\n\t\t\tdescription: 'Client ID obtained from the Azure AD App Registration',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Client Secret',\n\t\t\tname: 'clientSecret',\n\t\t\ttype: 'string',\n\t\t\trequired: true,\n\t\t\ttypeOptions: { password: true },\n\t\t\tdefault: '',\n\t\t\tdescription: 'Client Secret obtained from the Azure AD App Registration',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Additional Body Properties',\n\t\t\tname: 'additionalBodyProperties',\n\t\t\ttype: 'hidden',\n\t\t\tdefault:\n\t\t\t\t'{\"grant_type\": \"client_credentials\", \"resource\": \"https://cognitiveservices.azure.com/\"}',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Authentication',\n\t\t\tname: 'authentication',\n\t\t\ttype: 'hidden',\n\t\t\tdefault: 'body',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Custom Scopes',\n\t\t\tname: 'customScopes',\n\t\t\ttype: 'boolean',\n\t\t\tdefault: false,\n\t\t\tdescription:\n\t\t\t\t'Define custom scopes. You might need this if the default scopes are not sufficient or if you want to minimize permissions. Ensure you include \"openid\" and \"offline_access\".',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Auth URI Query Parameters',\n\t\t\tname: 'authQueryParameters',\n\t\t\ttype: 'hidden',\n\t\t\tdefault: '',\n\t\t\tdescription:\n\t\t\t\t'For some services additional query parameters have to be set which can be defined here',\n\t\t\tplaceholder: '',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Enabled Scopes',\n\t\t\tname: 'enabledScopes',\n\t\t\ttype: 'string',\n\t\t\tdisplayOptions: {\n\t\t\t\tshow: {\n\t\t\t\t\tcustomScopes: [true],\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefault: defaultScopes.join(' '),\n\t\t\tplaceholder: 'openid offline_access',\n\t\t\tdescription: 'Space-separated list of scopes to request.',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Scope',\n\t\t\tname: 'scope',\n\t\t\ttype: 'hidden',\n\t\t\tdefault: '={{ $self.customScopes ? $self.enabledScopes : \"' + defaultScopes.join(' ') + '\"}}',\n\t\t},\n\t];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,MAAM,gBAAgB,CAAC,UAAU,gBAAgB;AAE1C,MAAM,qCAAgE;AAAA,EAAtE;AACN,gBAAO;AAGP;AAAA,uBAAc;AAEd,mBAAU,CAAC,WAAW;AAEtB,4BAAmB;AAEnB,sBAAgC;AAAA,MAC/B;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,UAAU;AAAA,QACV,aAAa,EAAE,UAAU,KAAK;AAAA,QAC9B,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,gBAAgB;AAAA,UACf,MAAM;AAAA,YACL,cAAc,CAAC,IAAI;AAAA,UACpB;AAAA,QACD;AAAA,QACA,SAAS,cAAc,KAAK,GAAG;AAAA,QAC/B,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS,qDAAqD,cAAc,KAAK,GAAG,IAAI;AAAA,MACzF;AAAA,IACD;AAAA;AACD;","names":[]}
@@ -14,6 +14,16 @@
14
14
  "lmChatAzureOpenAi"
15
15
  ]
16
16
  },
17
+ "azureEntraCognitiveServicesOAuth2Api": {
18
+ "className": "AzureEntraCognitiveServicesOAuth2Api",
19
+ "sourcePath": "dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js",
20
+ "extends": [
21
+ "oAuth2Api"
22
+ ],
23
+ "supportedNodes": [
24
+ "lmChatAzureOpenAi"
25
+ ]
26
+ },
17
27
  "cohereApi": {
18
28
  "className": "CohereApi",
19
29
  "sourcePath": "dist/credentials/CohereApi.credentials.js",
@@ -65,6 +65,29 @@ const toolsAgentProperties = [
65
65
  type: "boolean",
66
66
  default: true,
67
67
  description: "Whether or not binary images should be automatically passed through to the agent as image type messages"
68
+ },
69
+ {
70
+ displayName: "Batch Processing",
71
+ name: "batching",
72
+ type: "collection",
73
+ description: "Batch processing options for rate limiting",
74
+ default: {},
75
+ options: [
76
+ {
77
+ displayName: "Batch Size",
78
+ name: "batchSize",
79
+ default: 1,
80
+ type: "number",
81
+ description: "How many items to process in parallel. This is useful for rate limiting, but will impact the ordering in the agents log output."
82
+ },
83
+ {
84
+ displayName: "Delay Between Batches",
85
+ name: "delayBetweenBatches",
86
+ default: 0,
87
+ type: "number",
88
+ description: "Delay in milliseconds between batches. This is useful for rate limiting."
89
+ }
90
+ ]
68
91
  }
69
92
  ]
70
93
  }
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../../nodes/agents/Agent/agents/ToolsAgent/description.ts"],"sourcesContent":["import type { INodeProperties } from 'n8n-workflow';\n\nimport { SYSTEM_MESSAGE } from './prompt';\n\nexport const toolsAgentProperties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\ttype: 'collection',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tagent: ['toolsAgent'],\n\t\t\t},\n\t\t},\n\t\tdefault: {},\n\t\tplaceholder: 'Add Option',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'System Message',\n\t\t\t\tname: 'systemMessage',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: SYSTEM_MESSAGE,\n\t\t\t\tdescription: 'The message that will be sent to the agent before the conversation starts',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\trows: 6,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Max Iterations',\n\t\t\t\tname: 'maxIterations',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 10,\n\t\t\t\tdescription: 'The maximum number of iterations the agent will run before stopping',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Return Intermediate Steps',\n\t\t\t\tname: 'returnIntermediateSteps',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: false,\n\t\t\t\tdescription: 'Whether or not the output should include intermediate steps the agent took',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Automatically Passthrough Binary Images',\n\t\t\t\tname: 'passthroughBinaryImages',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: true,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether or not binary images should be automatically passed through to the agent as image type messages',\n\t\t\t},\n\t\t],\n\t},\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,oBAA+B;AAExB,MAAM,uBAA0C;AAAA,EACtD;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,OAAO,CAAC,YAAY;AAAA,MACrB;AAAA,IACD;AAAA,IACA,SAAS,CAAC;AAAA,IACV,aAAa;AAAA,IACb,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,UACZ,MAAM;AAAA,QACP;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../../../nodes/agents/Agent/agents/ToolsAgent/description.ts"],"sourcesContent":["import type { INodeProperties } from 'n8n-workflow';\n\nimport { SYSTEM_MESSAGE } from './prompt';\n\nexport const toolsAgentProperties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\ttype: 'collection',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tagent: ['toolsAgent'],\n\t\t\t},\n\t\t},\n\t\tdefault: {},\n\t\tplaceholder: 'Add Option',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'System Message',\n\t\t\t\tname: 'systemMessage',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: SYSTEM_MESSAGE,\n\t\t\t\tdescription: 'The message that will be sent to the agent before the conversation starts',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\trows: 6,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Max Iterations',\n\t\t\t\tname: 'maxIterations',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 10,\n\t\t\t\tdescription: 'The maximum number of iterations the agent will run before stopping',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Return Intermediate Steps',\n\t\t\t\tname: 'returnIntermediateSteps',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: false,\n\t\t\t\tdescription: 'Whether or not the output should include intermediate steps the agent took',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Automatically Passthrough Binary Images',\n\t\t\t\tname: 'passthroughBinaryImages',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: true,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether or not binary images should be automatically passed through to the agent as image type messages',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Batch Processing',\n\t\t\t\tname: 'batching',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdescription: 'Batch processing options for rate limiting',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Batch Size',\n\t\t\t\t\t\tname: 'batchSize',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'How many items to process in parallel. This is useful for rate limiting, but will impact the ordering in the agents log output.',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Delay Between Batches',\n\t\t\t\t\t\tname: 'delayBetweenBatches',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\tdescription: 'Delay in milliseconds between batches. This is useful for rate limiting.',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,oBAA+B;AAExB,MAAM,uBAA0C;AAAA,EACtD;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,OAAO,CAAC,YAAY;AAAA,MACrB;AAAA,IACD;AAAA,IACA,SAAS,CAAC;AAAA,IACV,aAAa;AAAA,IACb,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,UACZ,MAAM;AAAA,QACP;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa;AAAA,QACb,SAAS,CAAC;AAAA,QACV,SAAS;AAAA,UACR;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,SAAS;AAAA,YACT,MAAM;AAAA,YACN,aACC;AAAA,UACF;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,SAAS;AAAA,YACT,MAAM;AAAA,YACN,aAAa;AAAA,UACd;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
@@ -208,11 +208,17 @@ async function toolsAgentExecute() {
208
208
  const returnData = [];
209
209
  const items = this.getInputData();
210
210
  const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(this);
211
+ const memory = await getOptionalMemory(this);
211
212
  const tools = await getTools(this, outputParser);
212
- for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
213
- try {
213
+ const { batchSize, delayBetweenBatches } = this.getNodeParameter("options.batching", 0, {
214
+ batchSize: 1,
215
+ delayBetweenBatches: 0
216
+ });
217
+ for (let i = 0; i < items.length; i += batchSize) {
218
+ const batch = items.slice(i, i + batchSize);
219
+ const batchPromises = batch.map(async (_item, batchItemIndex) => {
220
+ const itemIndex = i + batchItemIndex;
214
221
  const model = await getChatModel(this);
215
- const memory = await getOptionalMemory(this);
216
222
  const input = (0, import_helpers.getPromptInputByType)({
217
223
  ctx: this,
218
224
  i: itemIndex,
@@ -248,7 +254,7 @@ async function toolsAgentExecute() {
248
254
  returnIntermediateSteps: options.returnIntermediateSteps === true,
249
255
  maxIterations: options.maxIterations ?? 10
250
256
  });
251
- const response = await executor.invoke(
257
+ return await executor.invoke(
252
258
  {
253
259
  input,
254
260
  system_message: options.systemMessage ?? import_prompt.SYSTEM_MESSAGE,
@@ -256,6 +262,21 @@ async function toolsAgentExecute() {
256
262
  },
257
263
  { signal: this.getExecutionCancelSignal() }
258
264
  );
265
+ });
266
+ const batchResults = await Promise.allSettled(batchPromises);
267
+ batchResults.forEach((result, index) => {
268
+ if (result.status === "rejected") {
269
+ if (this.continueOnFail()) {
270
+ returnData.push({
271
+ json: { error: result.reason },
272
+ pairedItem: { item: index }
273
+ });
274
+ return;
275
+ } else {
276
+ throw new import_n8n_workflow.NodeOperationError(this.getNode(), result.reason);
277
+ }
278
+ }
279
+ const response = result.value;
259
280
  if (memory && outputParser) {
260
281
  const parsedOutput = (0, import_n8n_workflow.jsonParse)(
261
282
  response.output
@@ -273,15 +294,9 @@ async function toolsAgentExecute() {
273
294
  )
274
295
  };
275
296
  returnData.push(itemResult);
276
- } catch (error) {
277
- if (this.continueOnFail()) {
278
- returnData.push({
279
- json: { error: error.message },
280
- pairedItem: { item: itemIndex }
281
- });
282
- continue;
283
- }
284
- throw error;
297
+ });
298
+ if (i + batchSize < items.length && delayBetweenBatches > 0) {
299
+ await (0, import_n8n_workflow.sleep)(delayBetweenBatches);
285
300
  }
286
301
  }
287
302
  return [returnData];
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../../nodes/agents/Agent/agents/ToolsAgent/execute.ts"],"sourcesContent":["import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport { HumanMessage } from '@langchain/core/messages';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';\nimport { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { RunnableSequence } from '@langchain/core/runnables';\nimport type { Tool } from '@langchain/core/tools';\nimport { DynamicStructuredTool } from '@langchain/core/tools';\nimport type { AgentAction, AgentFinish } from 'langchain/agents';\nimport { AgentExecutor, createToolCallingAgent } from 'langchain/agents';\nimport type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output_parser';\nimport { omit } from 'lodash';\nimport { BINARY_ENCODING, jsonParse, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\nimport type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport type { ZodObject } from 'zod';\nimport { z } from 'zod';\n\nimport { isChatInstance, getPromptInputByType, getConnectedTools } from '@utils/helpers';\nimport {\n\tgetOptionalOutputParser,\n\ttype N8nOutputParser,\n} from '@utils/output_parsers/N8nOutputParser';\n\nimport { SYSTEM_MESSAGE } from './prompt';\n\n/* -----------------------------------------------------------\n Output Parser Helper\n----------------------------------------------------------- */\n/**\n * Retrieve the output parser schema.\n * If the parser does not return a valid schema, default to a schema with a single text field.\n */\nexport function getOutputParserSchema(\n\toutputParser: N8nOutputParser,\n\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n): ZodObject<any, any, any, any> {\n\tconst schema =\n\t\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n\t\t(outputParser.getSchema() as ZodObject<any, any, any, any>) ?? z.object({ text: z.string() });\n\treturn schema;\n}\n\n/* -----------------------------------------------------------\n Binary Data Helpers\n----------------------------------------------------------- */\n/**\n * Extracts binary image messages from the input data.\n * When operating in filesystem mode, the binary stream is first converted to a buffer.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @returns A HumanMessage containing the binary image messages.\n */\nexport async function extractBinaryMessages(\n\tctx: IExecuteFunctions,\n\titemIndex: number,\n): Promise<HumanMessage> {\n\tconst binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};\n\tconst binaryMessages = await Promise.all(\n\t\tObject.values(binaryData)\n\t\t\t.filter((data) => data.mimeType.startsWith('image/'))\n\t\t\t.map(async (data) => {\n\t\t\t\tlet binaryUrlString: string;\n\n\t\t\t\t// In filesystem mode we need to get binary stream by id before converting it to buffer\n\t\t\t\tif (data.id) {\n\t\t\t\t\tconst binaryBuffer = await ctx.helpers.binaryToBuffer(\n\t\t\t\t\t\tawait ctx.helpers.getBinaryStream(data.id),\n\t\t\t\t\t);\n\t\t\t\t\tbinaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(\n\t\t\t\t\t\tBINARY_ENCODING,\n\t\t\t\t\t)}`;\n\t\t\t\t} else {\n\t\t\t\t\tbinaryUrlString = data.data.includes('base64')\n\t\t\t\t\t\t? data.data\n\t\t\t\t\t\t: `data:${data.mimeType};base64,${data.data}`;\n\t\t\t\t}\n\n\t\t\t\treturn {\n\t\t\t\t\ttype: 'image_url',\n\t\t\t\t\timage_url: {\n\t\t\t\t\t\turl: binaryUrlString,\n\t\t\t\t\t},\n\t\t\t\t};\n\t\t\t}),\n\t);\n\treturn new HumanMessage({\n\t\tcontent: [...binaryMessages],\n\t});\n}\n\n/* -----------------------------------------------------------\n Agent Output Format Helpers\n----------------------------------------------------------- */\n/**\n * Fixes empty content messages in agent steps.\n *\n * This function is necessary when using RunnableSequence.from in LangChain.\n * If a tool doesn't have any arguments, LangChain returns input: '' (empty string).\n * This can throw an error for some providers (like Anthropic) which expect the input to always be an object.\n * This function replaces empty string inputs with empty objects to prevent such errors.\n *\n * @param steps - The agent steps to fix\n * @returns The fixed agent steps\n */\nexport function fixEmptyContentMessage(\n\tsteps: AgentFinish | ToolsAgentAction[],\n): AgentFinish | ToolsAgentAction[] {\n\tif (!Array.isArray(steps)) return steps;\n\n\tsteps.forEach((step) => {\n\t\tif ('messageLog' in step && step.messageLog !== undefined) {\n\t\t\tif (Array.isArray(step.messageLog)) {\n\t\t\t\tstep.messageLog.forEach((message: BaseMessage) => {\n\t\t\t\t\tif ('content' in message && Array.isArray(message.content)) {\n\t\t\t\t\t\t// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access\n\t\t\t\t\t\t(message.content as Array<{ input?: string | object }>).forEach((content) => {\n\t\t\t\t\t\t\tif (content.input === '') {\n\t\t\t\t\t\t\t\tcontent.input = {};\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t});\n\n\treturn steps;\n}\n\n/**\n * Ensures consistent handling of outputs regardless of the model used,\n * providing a unified output format for further processing.\n *\n * This method is necessary to handle different output formats from various language models.\n * Specifically, it checks if the agent step is the final step (contains returnValues) and determines\n * if the output is a simple string (e.g., from OpenAI models) or an array of outputs (e.g., from Anthropic models).\n *\n * Examples:\n * 1. Anthropic model output:\n * ```json\n * {\n * \"output\": [\n * {\n * \"index\": 0,\n * \"type\": \"text\",\n * \"text\": \"The result of the calculation is approximately 1001.8166...\"\n * }\n * ]\n * }\n *```\n * 2. OpenAI model output:\n * ```json\n * {\n * \"output\": \"The result of the calculation is approximately 1001.82...\"\n * }\n * ```\n *\n * @param steps - The agent finish or agent action steps.\n * @returns The modified agent finish steps or the original steps.\n */\nexport function handleAgentFinishOutput(\n\tsteps: AgentFinish | AgentAction[],\n): AgentFinish | AgentAction[] {\n\ttype AgentMultiOutputFinish = AgentFinish & {\n\t\treturnValues: { output: Array<{ text: string; type: string; index: number }> };\n\t};\n\tconst agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;\n\n\tif (agentFinishSteps.returnValues) {\n\t\tconst isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);\n\t\tif (isMultiOutput) {\n\t\t\t// If all items in the multi-output array are of type 'text', merge them into a single string\n\t\t\tconst multiOutputSteps = agentFinishSteps.returnValues.output as Array<{\n\t\t\t\tindex: number;\n\t\t\t\ttype: string;\n\t\t\t\ttext: string;\n\t\t\t}>;\n\t\t\tconst isTextOnly = multiOutputSteps.every((output) => 'text' in output);\n\t\t\tif (isTextOnly) {\n\t\t\t\tagentFinishSteps.returnValues.output = multiOutputSteps\n\t\t\t\t\t.map((output) => output.text)\n\t\t\t\t\t.join('\\n')\n\t\t\t\t\t.trim();\n\t\t\t}\n\t\t\treturn agentFinishSteps;\n\t\t}\n\t}\n\n\treturn agentFinishSteps;\n}\n\n/**\n * Wraps the parsed output so that it can be stored in memory.\n * If memory is connected, the output is stringified.\n *\n * @param output - The parsed output object\n * @param memory - The connected memory (if any)\n * @returns The formatted output object\n */\nexport function handleParsedStepOutput(\n\toutput: Record<string, unknown>,\n\tmemory?: BaseChatMemory,\n): { returnValues: Record<string, unknown>; log: string } {\n\treturn {\n\t\treturnValues: memory ? { output: JSON.stringify(output) } : output,\n\t\tlog: 'Final response formatted',\n\t};\n}\n\n/**\n * Parses agent steps using the provided output parser.\n * If the agent used the 'format_final_json_response' tool, the output is parsed accordingly.\n *\n * @param steps - The agent finish or action steps\n * @param outputParser - The output parser (if defined)\n * @param memory - The connected memory (if any)\n * @returns The parsed steps with the final output\n */\nexport const getAgentStepsParser =\n\t(outputParser?: N8nOutputParser, memory?: BaseChatMemory) =>\n\tasync (steps: AgentFinish | AgentAction[]): Promise<AgentFinish | AgentAction[]> => {\n\t\t// Check if the steps contain the 'format_final_json_response' tool invocation.\n\t\tif (Array.isArray(steps)) {\n\t\t\tconst responseParserTool = steps.find((step) => step.tool === 'format_final_json_response');\n\t\t\tif (responseParserTool && outputParser) {\n\t\t\t\tconst toolInput = responseParserTool.toolInput;\n\t\t\t\t// Ensure the tool input is a string\n\t\t\t\tconst parserInput = toolInput instanceof Object ? JSON.stringify(toolInput) : toolInput;\n\t\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t\t}\n\t\t}\n\n\t\t// Otherwise, if the steps contain a returnValues field, try to parse them manually.\n\t\tif (outputParser && typeof steps === 'object' && (steps as AgentFinish).returnValues) {\n\t\t\tconst finalResponse = (steps as AgentFinish).returnValues;\n\t\t\tlet parserInput: string;\n\n\t\t\tif (finalResponse instanceof Object) {\n\t\t\t\tif ('output' in finalResponse) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\t// If the output is an object, we will try to parse it as JSON\n\t\t\t\t\t\t// this is because parser expects stringified JSON object like { \"output\": { .... } }\n\t\t\t\t\t\t// so we try to parse the output before wrapping it and then stringify it\n\t\t\t\t\t\tparserInput = JSON.stringify({ output: jsonParse(finalResponse.output) });\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t// Fallback to the raw output if parsing fails.\n\t\t\t\t\t\tparserInput = finalResponse.output;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If the output is not an object, we will stringify it as it is\n\t\t\t\t\tparserInput = JSON.stringify(finalResponse);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tparserInput = finalResponse;\n\t\t\t}\n\n\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t}\n\n\t\treturn handleAgentFinishOutput(steps);\n\t};\n\n/* -----------------------------------------------------------\n Agent Setup Helpers\n----------------------------------------------------------- */\n/**\n * Retrieves the language model from the input connection.\n * Throws an error if the model is not a valid chat instance or does not support tools.\n *\n * @param ctx - The execution context\n * @returns The validated chat model\n */\nexport async function getChatModel(ctx: IExecuteFunctions): Promise<BaseChatModel> {\n\tconst model = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);\n\tif (!isChatInstance(model) || !model.bindTools) {\n\t\tthrow new NodeOperationError(\n\t\t\tctx.getNode(),\n\t\t\t'Tools Agent requires Chat Model which supports Tools calling',\n\t\t);\n\t}\n\treturn model;\n}\n\n/**\n * Retrieves the memory instance from the input connection if it is connected\n *\n * @param ctx - The execution context\n * @returns The connected memory (if any)\n */\nexport async function getOptionalMemory(\n\tctx: IExecuteFunctions,\n): Promise<BaseChatMemory | undefined> {\n\treturn (await ctx.getInputConnectionData(NodeConnectionTypes.AiMemory, 0)) as\n\t\t| BaseChatMemory\n\t\t| undefined;\n}\n\n/**\n * Retrieves the connected tools and (if an output parser is defined)\n * appends a structured output parser tool.\n *\n * @param ctx - The execution context\n * @param outputParser - The optional output parser\n * @returns The array of connected tools\n */\nexport async function getTools(\n\tctx: IExecuteFunctions,\n\toutputParser?: N8nOutputParser,\n): Promise<Array<DynamicStructuredTool | Tool>> {\n\tconst tools = (await getConnectedTools(ctx, true, false)) as Array<DynamicStructuredTool | Tool>;\n\n\t// If an output parser is available, create a dynamic tool to validate the final output.\n\tif (outputParser) {\n\t\tconst schema = getOutputParserSchema(outputParser);\n\t\tconst structuredOutputParserTool = new DynamicStructuredTool({\n\t\t\tschema,\n\t\t\tname: 'format_final_json_response',\n\t\t\tdescription:\n\t\t\t\t'Use this tool to format your final response to the user in a structured JSON format. This tool validates your output against a schema to ensure it meets the required format. ONLY use this tool when you have completed all necessary reasoning and are ready to provide your final answer. Do not use this tool for intermediate steps or for asking questions. The output from this tool will be directly returned to the user.',\n\t\t\t// We do not use a function here because we intercept the output with the parser.\n\t\t\tfunc: async () => '',\n\t\t});\n\t\ttools.push(structuredOutputParserTool);\n\t}\n\treturn tools;\n}\n\n/**\n * Prepares the prompt messages for the agent.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @param options - Options containing systemMessage and other parameters\n * @returns The array of prompt messages\n */\nexport async function prepareMessages(\n\tctx: IExecuteFunctions,\n\titemIndex: number,\n\toptions: {\n\t\tsystemMessage?: string;\n\t\tpassthroughBinaryImages?: boolean;\n\t\toutputParser?: N8nOutputParser;\n\t},\n): Promise<BaseMessagePromptTemplateLike[]> {\n\tconst useSystemMessage = options.systemMessage ?? ctx.getNode().typeVersion < 1.9;\n\n\tconst messages: BaseMessagePromptTemplateLike[] = [];\n\n\tif (useSystemMessage) {\n\t\tmessages.push([\n\t\t\t'system',\n\t\t\t`{system_message}${options.outputParser ? '\\n\\n{formatting_instructions}' : ''}`,\n\t\t]);\n\t} else if (options.outputParser) {\n\t\tmessages.push(['system', '{formatting_instructions}']);\n\t}\n\n\tmessages.push(['placeholder', '{chat_history}'], ['human', '{input}']);\n\n\t// If there is binary data and the node option permits it, add a binary message\n\tconst hasBinaryData = ctx.getInputData()?.[itemIndex]?.binary !== undefined;\n\tif (hasBinaryData && options.passthroughBinaryImages) {\n\t\tconst binaryMessage = await extractBinaryMessages(ctx, itemIndex);\n\t\tif (binaryMessage.content.length !== 0) {\n\t\t\tmessages.push(binaryMessage);\n\t\t} else {\n\t\t\tctx.logger.debug('Not attaching binary message, since its content was empty');\n\t\t}\n\t}\n\n\t// We add the agent scratchpad last, so that the agent will not run in loops\n\t// by adding binary messages between each interaction\n\tmessages.push(['placeholder', '{agent_scratchpad}']);\n\treturn messages;\n}\n\n/**\n * Creates the chat prompt from messages.\n *\n * @param messages - The messages array\n * @returns The ChatPromptTemplate instance\n */\nexport function preparePrompt(messages: BaseMessagePromptTemplateLike[]): ChatPromptTemplate {\n\treturn ChatPromptTemplate.fromMessages(messages);\n}\n\n/* -----------------------------------------------------------\n Main Executor Function\n----------------------------------------------------------- */\n/**\n * The main executor method for the Tools Agent.\n *\n * This function retrieves necessary components (model, memory, tools), prepares the prompt,\n * creates the agent, and processes each input item. The error handling for each item is also\n * managed here based on the node's continueOnFail setting.\n *\n * @returns The array of execution data for all processed items\n */\nexport async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {\n\tthis.logger.debug('Executing Tools Agent');\n\n\tconst returnData: INodeExecutionData[] = [];\n\tconst items = this.getInputData();\n\tconst outputParser = await getOptionalOutputParser(this);\n\tconst tools = await getTools(this, outputParser);\n\n\tfor (let itemIndex = 0; itemIndex < items.length; itemIndex++) {\n\t\ttry {\n\t\t\tconst model = await getChatModel(this);\n\t\t\tconst memory = await getOptionalMemory(this);\n\n\t\t\tconst input = getPromptInputByType({\n\t\t\t\tctx: this,\n\t\t\t\ti: itemIndex,\n\t\t\t\tinputKey: 'text',\n\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t});\n\t\t\tif (input === undefined) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), 'The “text” parameter is empty.');\n\t\t\t}\n\n\t\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\t\tsystemMessage?: string;\n\t\t\t\tmaxIterations?: number;\n\t\t\t\treturnIntermediateSteps?: boolean;\n\t\t\t\tpassthroughBinaryImages?: boolean;\n\t\t\t};\n\n\t\t\t// Prepare the prompt messages and prompt template.\n\t\t\tconst messages = await prepareMessages(this, itemIndex, {\n\t\t\t\tsystemMessage: options.systemMessage,\n\t\t\t\tpassthroughBinaryImages: options.passthroughBinaryImages ?? true,\n\t\t\t\toutputParser,\n\t\t\t});\n\t\t\tconst prompt = preparePrompt(messages);\n\n\t\t\t// Create the base agent that calls tools.\n\t\t\tconst agent = createToolCallingAgent({\n\t\t\t\tllm: model,\n\t\t\t\ttools,\n\t\t\t\tprompt,\n\t\t\t\tstreamRunnable: false,\n\t\t\t});\n\t\t\tagent.streamRunnable = false;\n\t\t\t// Wrap the agent with parsers and fixes.\n\t\t\tconst runnableAgent = RunnableSequence.from([\n\t\t\t\tagent,\n\t\t\t\tgetAgentStepsParser(outputParser, memory),\n\t\t\t\tfixEmptyContentMessage,\n\t\t\t]);\n\t\t\tconst executor = AgentExecutor.fromAgentAndTools({\n\t\t\t\tagent: runnableAgent,\n\t\t\t\tmemory,\n\t\t\t\ttools,\n\t\t\t\treturnIntermediateSteps: options.returnIntermediateSteps === true,\n\t\t\t\tmaxIterations: options.maxIterations ?? 10,\n\t\t\t});\n\n\t\t\t// Invoke the executor with the given input and system message.\n\t\t\tconst response = await executor.invoke(\n\t\t\t\t{\n\t\t\t\t\tinput,\n\t\t\t\t\tsystem_message: options.systemMessage ?? SYSTEM_MESSAGE,\n\t\t\t\t\tformatting_instructions:\n\t\t\t\t\t\t'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',\n\t\t\t\t},\n\t\t\t\t{ signal: this.getExecutionCancelSignal() },\n\t\t\t);\n\n\t\t\t// If memory and outputParser are connected, parse the output.\n\t\t\tif (memory && outputParser) {\n\t\t\t\tconst parsedOutput = jsonParse<{ output: Record<string, unknown> }>(\n\t\t\t\t\tresponse.output as string,\n\t\t\t\t);\n\t\t\t\tresponse.output = parsedOutput?.output ?? parsedOutput;\n\t\t\t}\n\n\t\t\t// Omit internal keys before returning the result.\n\t\t\tconst itemResult = {\n\t\t\t\tjson: omit(\n\t\t\t\t\tresponse,\n\t\t\t\t\t'system_message',\n\t\t\t\t\t'formatting_instructions',\n\t\t\t\t\t'input',\n\t\t\t\t\t'chat_history',\n\t\t\t\t\t'agent_scratchpad',\n\t\t\t\t),\n\t\t\t};\n\n\t\t\treturnData.push(itemResult);\n\t\t} catch (error) {\n\t\t\tif (this.continueOnFail()) {\n\t\t\t\treturnData.push({\n\t\t\t\t\tjson: { error: error.message },\n\t\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t\t});\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\treturn [returnData];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,sBAA6B;AAG7B,qBAAmC;AACnC,uBAAiC;AAEjC,mBAAsC;AAEtC,oBAAsD;AAEtD,oBAAqB;AACrB,0BAAoF;AAGpF,iBAAkB;AAElB,qBAAwE;AACxE,6BAGO;AAEP,oBAA+B;AASxB,SAAS,sBACf,cAEgC;AAChC,QAAM;AAAA;AAAA,IAEJ,aAAa,UAAU,KAAuC,aAAE,OAAO,EAAE,MAAM,aAAE,OAAO,EAAE,CAAC;AAAA;AAC7F,SAAO;AACR;AAaA,eAAsB,sBACrB,KACA,WACwB;AACxB,QAAM,aAAa,IAAI,aAAa,IAAI,SAAS,GAAG,UAAU,CAAC;AAC/D,QAAM,iBAAiB,MAAM,QAAQ;AAAA,IACpC,OAAO,OAAO,UAAU,EACtB,OAAO,CAAC,SAAS,KAAK,SAAS,WAAW,QAAQ,CAAC,EACnD,IAAI,OAAO,SAAS;AACpB,UAAI;AAGJ,UAAI,KAAK,IAAI;AACZ,cAAM,eAAe,MAAM,IAAI,QAAQ;AAAA,UACtC,MAAM,IAAI,QAAQ,gBAAgB,KAAK,EAAE;AAAA,QAC1C;AACA,0BAAkB,QAAQ,KAAK,QAAQ,WAAW,OAAO,KAAK,YAAY,EAAE;AAAA,UAC3E;AAAA,QACD,CAAC;AAAA,MACF,OAAO;AACN,0BAAkB,KAAK,KAAK,SAAS,QAAQ,IAC1C,KAAK,OACL,QAAQ,KAAK,QAAQ,WAAW,KAAK,IAAI;AAAA,MAC7C;AAEA,aAAO;AAAA,QACN,MAAM;AAAA,QACN,WAAW;AAAA,UACV,KAAK;AAAA,QACN;AAAA,MACD;AAAA,IACD,CAAC;AAAA,EACH;AACA,SAAO,IAAI,6BAAa;AAAA,IACvB,SAAS,CAAC,GAAG,cAAc;AAAA,EAC5B,CAAC;AACF;AAgBO,SAAS,uBACf,OACmC;AACnC,MAAI,CAAC,MAAM,QAAQ,KAAK,EAAG,QAAO;AAElC,QAAM,QAAQ,CAAC,SAAS;AACvB,QAAI,gBAAgB,QAAQ,KAAK,eAAe,QAAW;AAC1D,UAAI,MAAM,QAAQ,KAAK,UAAU,GAAG;AACnC,aAAK,WAAW,QAAQ,CAAC,YAAyB;AACjD,cAAI,aAAa,WAAW,MAAM,QAAQ,QAAQ,OAAO,GAAG;AAE3D,YAAC,QAAQ,QAA+C,QAAQ,CAAC,YAAY;AAC5E,kBAAI,QAAQ,UAAU,IAAI;AACzB,wBAAQ,QAAQ,CAAC;AAAA,cAClB;AAAA,YACD,CAAC;AAAA,UACF;AAAA,QACD,CAAC;AAAA,MACF;AAAA,IACD;AAAA,EACD,CAAC;AAED,SAAO;AACR;AAiCO,SAAS,wBACf,OAC8B;AAI9B,QAAM,mBAAmB;AAEzB,MAAI,iBAAiB,cAAc;AAClC,UAAM,gBAAgB,MAAM,QAAQ,iBAAiB,cAAc,MAAM;AACzE,QAAI,eAAe;AAElB,YAAM,mBAAmB,iBAAiB,aAAa;AAKvD,YAAM,aAAa,iBAAiB,MAAM,CAAC,WAAW,UAAU,MAAM;AACtE,UAAI,YAAY;AACf,yBAAiB,aAAa,SAAS,iBACrC,IAAI,CAAC,WAAW,OAAO,IAAI,EAC3B,KAAK,IAAI,EACT,KAAK;AAAA,MACR;AACA,aAAO;AAAA,IACR;AAAA,EACD;AAEA,SAAO;AACR;AAUO,SAAS,uBACf,QACA,QACyD;AACzD,SAAO;AAAA,IACN,cAAc,SAAS,EAAE,QAAQ,KAAK,UAAU,MAAM,EAAE,IAAI;AAAA,IAC5D,KAAK;AAAA,EACN;AACD;AAWO,MAAM,sBACZ,CAAC,cAAgC,WACjC,OAAO,UAA6E;AAEnF,MAAI,MAAM,QAAQ,KAAK,GAAG;AACzB,UAAM,qBAAqB,MAAM,KAAK,CAAC,SAAS,KAAK,SAAS,4BAA4B;AAC1F,QAAI,sBAAsB,cAAc;AACvC,YAAM,YAAY,mBAAmB;AAErC,YAAM,cAAc,qBAAqB,SAAS,KAAK,UAAU,SAAS,IAAI;AAC9E,YAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,aAAO,uBAAuB,cAAc,MAAM;AAAA,IACnD;AAAA,EACD;AAGA,MAAI,gBAAgB,OAAO,UAAU,YAAa,MAAsB,cAAc;AACrF,UAAM,gBAAiB,MAAsB;AAC7C,QAAI;AAEJ,QAAI,yBAAyB,QAAQ;AACpC,UAAI,YAAY,eAAe;AAC9B,YAAI;AAIH,wBAAc,KAAK,UAAU,EAAE,YAAQ,+BAAU,cAAc,MAAM,EAAE,CAAC;AAAA,QACzE,SAAS,OAAO;AAEf,wBAAc,cAAc;AAAA,QAC7B;AAAA,MACD,OAAO;AAEN,sBAAc,KAAK,UAAU,aAAa;AAAA,MAC3C;AAAA,IACD,OAAO;AACN,oBAAc;AAAA,IACf;AAEA,UAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,WAAO,uBAAuB,cAAc,MAAM;AAAA,EACnD;AAEA,SAAO,wBAAwB,KAAK;AACrC;AAYD,eAAsB,aAAa,KAAgD;AAClF,QAAM,QAAQ,MAAM,IAAI,uBAAuB,wCAAoB,iBAAiB,CAAC;AACrF,MAAI,KAAC,+BAAe,KAAK,KAAK,CAAC,MAAM,WAAW;AAC/C,UAAM,IAAI;AAAA,MACT,IAAI,QAAQ;AAAA,MACZ;AAAA,IACD;AAAA,EACD;AACA,SAAO;AACR;AAQA,eAAsB,kBACrB,KACsC;AACtC,SAAQ,MAAM,IAAI,uBAAuB,wCAAoB,UAAU,CAAC;AAGzE;AAUA,eAAsB,SACrB,KACA,cAC+C;AAC/C,QAAM,QAAS,UAAM,kCAAkB,KAAK,MAAM,KAAK;AAGvD,MAAI,cAAc;AACjB,UAAM,SAAS,sBAAsB,YAAY;AACjD,UAAM,6BAA6B,IAAI,mCAAsB;AAAA,MAC5D;AAAA,MACA,MAAM;AAAA,MACN,aACC;AAAA;AAAA,MAED,MAAM,YAAY;AAAA,IACnB,CAAC;AACD,UAAM,KAAK,0BAA0B;AAAA,EACtC;AACA,SAAO;AACR;AAUA,eAAsB,gBACrB,KACA,WACA,SAK2C;AAC3C,QAAM,mBAAmB,QAAQ,iBAAiB,IAAI,QAAQ,EAAE,cAAc;AAE9E,QAAM,WAA4C,CAAC;AAEnD,MAAI,kBAAkB;AACrB,aAAS,KAAK;AAAA,MACb;AAAA,MACA,mBAAmB,QAAQ,eAAe,kCAAkC,EAAE;AAAA,IAC/E,CAAC;AAAA,EACF,WAAW,QAAQ,cAAc;AAChC,aAAS,KAAK,CAAC,UAAU,2BAA2B,CAAC;AAAA,EACtD;AAEA,WAAS,KAAK,CAAC,eAAe,gBAAgB,GAAG,CAAC,SAAS,SAAS,CAAC;AAGrE,QAAM,gBAAgB,IAAI,aAAa,IAAI,SAAS,GAAG,WAAW;AAClE,MAAI,iBAAiB,QAAQ,yBAAyB;AACrD,UAAM,gBAAgB,MAAM,sBAAsB,KAAK,SAAS;AAChE,QAAI,cAAc,QAAQ,WAAW,GAAG;AACvC,eAAS,KAAK,aAAa;AAAA,IAC5B,OAAO;AACN,UAAI,OAAO,MAAM,2DAA2D;AAAA,IAC7E;AAAA,EACD;AAIA,WAAS,KAAK,CAAC,eAAe,oBAAoB,CAAC;AACnD,SAAO;AACR;AAQO,SAAS,cAAc,UAA+D;AAC5F,SAAO,kCAAmB,aAAa,QAAQ;AAChD;AAcA,eAAsB,oBAA4E;AACjG,OAAK,OAAO,MAAM,uBAAuB;AAEzC,QAAM,aAAmC,CAAC;AAC1C,QAAM,QAAQ,KAAK,aAAa;AAChC,QAAM,eAAe,UAAM,gDAAwB,IAAI;AACvD,QAAM,QAAQ,MAAM,SAAS,MAAM,YAAY;AAE/C,WAAS,YAAY,GAAG,YAAY,MAAM,QAAQ,aAAa;AAC9D,QAAI;AACH,YAAM,QAAQ,MAAM,aAAa,IAAI;AACrC,YAAM,SAAS,MAAM,kBAAkB,IAAI;AAE3C,YAAM,YAAQ,qCAAqB;AAAA,QAClC,KAAK;AAAA,QACL,GAAG;AAAA,QACH,UAAU;AAAA,QACV,eAAe;AAAA,MAChB,CAAC;AACD,UAAI,UAAU,QAAW;AACxB,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,0CAAgC;AAAA,MAC9E;AAEA,YAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAQ9D,YAAM,WAAW,MAAM,gBAAgB,MAAM,WAAW;AAAA,QACvD,eAAe,QAAQ;AAAA,QACvB,yBAAyB,QAAQ,2BAA2B;AAAA,QAC5D;AAAA,MACD,CAAC;AACD,YAAM,SAAS,cAAc,QAAQ;AAGrC,YAAM,YAAQ,sCAAuB;AAAA,QACpC,KAAK;AAAA,QACL;AAAA,QACA;AAAA,QACA,gBAAgB;AAAA,MACjB,CAAC;AACD,YAAM,iBAAiB;AAEvB,YAAM,gBAAgB,kCAAiB,KAAK;AAAA,QAC3C;AAAA,QACA,oBAAoB,cAAc,MAAM;AAAA,QACxC;AAAA,MACD,CAAC;AACD,YAAM,WAAW,4BAAc,kBAAkB;AAAA,QAChD,OAAO;AAAA,QACP;AAAA,QACA;AAAA,QACA,yBAAyB,QAAQ,4BAA4B;AAAA,QAC7D,eAAe,QAAQ,iBAAiB;AAAA,MACzC,CAAC;AAGD,YAAM,WAAW,MAAM,SAAS;AAAA,QAC/B;AAAA,UACC;AAAA,UACA,gBAAgB,QAAQ,iBAAiB;AAAA,UACzC,yBACC;AAAA,QACF;AAAA,QACA,EAAE,QAAQ,KAAK,yBAAyB,EAAE;AAAA,MAC3C;AAGA,UAAI,UAAU,cAAc;AAC3B,cAAM,mBAAe;AAAA,UACpB,SAAS;AAAA,QACV;AACA,iBAAS,SAAS,cAAc,UAAU;AAAA,MAC3C;AAGA,YAAM,aAAa;AAAA,QAClB,UAAM;AAAA,UACL;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACD;AAAA,MACD;AAEA,iBAAW,KAAK,UAAU;AAAA,IAC3B,SAAS,OAAO;AACf,UAAI,KAAK,eAAe,GAAG;AAC1B,mBAAW,KAAK;AAAA,UACf,MAAM,EAAE,OAAO,MAAM,QAAQ;AAAA,UAC7B,YAAY,EAAE,MAAM,UAAU;AAAA,QAC/B,CAAC;AACD;AAAA,MACD;AACA,YAAM;AAAA,IACP;AAAA,EACD;AAEA,SAAO,CAAC,UAAU;AACnB;","names":[]}
1
+ {"version":3,"sources":["../../../../../../nodes/agents/Agent/agents/ToolsAgent/execute.ts"],"sourcesContent":["import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport { HumanMessage } from '@langchain/core/messages';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';\nimport { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { RunnableSequence } from '@langchain/core/runnables';\nimport type { Tool } from '@langchain/core/tools';\nimport { DynamicStructuredTool } from '@langchain/core/tools';\nimport type { AgentAction, AgentFinish } from 'langchain/agents';\nimport { AgentExecutor, createToolCallingAgent } from 'langchain/agents';\nimport type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output_parser';\nimport { omit } from 'lodash';\nimport {\n\tBINARY_ENCODING,\n\tjsonParse,\n\tNodeConnectionTypes,\n\tNodeOperationError,\n\tsleep,\n} from 'n8n-workflow';\nimport type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport type { ZodObject } from 'zod';\nimport { z } from 'zod';\n\nimport { isChatInstance, getPromptInputByType, getConnectedTools } from '@utils/helpers';\nimport {\n\tgetOptionalOutputParser,\n\ttype N8nOutputParser,\n} from '@utils/output_parsers/N8nOutputParser';\n\nimport { SYSTEM_MESSAGE } from './prompt';\n\n/* -----------------------------------------------------------\n Output Parser Helper\n----------------------------------------------------------- */\n/**\n * Retrieve the output parser schema.\n * If the parser does not return a valid schema, default to a schema with a single text field.\n */\nexport function getOutputParserSchema(\n\toutputParser: N8nOutputParser,\n\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n): ZodObject<any, any, any, any> {\n\tconst schema =\n\t\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n\t\t(outputParser.getSchema() as ZodObject<any, any, any, any>) ?? z.object({ text: z.string() });\n\treturn schema;\n}\n\n/* -----------------------------------------------------------\n Binary Data Helpers\n----------------------------------------------------------- */\n/**\n * Extracts binary image messages from the input data.\n * When operating in filesystem mode, the binary stream is first converted to a buffer.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @returns A HumanMessage containing the binary image messages.\n */\nexport async function extractBinaryMessages(\n\tctx: IExecuteFunctions,\n\titemIndex: number,\n): Promise<HumanMessage> {\n\tconst binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};\n\tconst binaryMessages = await Promise.all(\n\t\tObject.values(binaryData)\n\t\t\t.filter((data) => data.mimeType.startsWith('image/'))\n\t\t\t.map(async (data) => {\n\t\t\t\tlet binaryUrlString: string;\n\n\t\t\t\t// In filesystem mode we need to get binary stream by id before converting it to buffer\n\t\t\t\tif (data.id) {\n\t\t\t\t\tconst binaryBuffer = await ctx.helpers.binaryToBuffer(\n\t\t\t\t\t\tawait ctx.helpers.getBinaryStream(data.id),\n\t\t\t\t\t);\n\t\t\t\t\tbinaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(\n\t\t\t\t\t\tBINARY_ENCODING,\n\t\t\t\t\t)}`;\n\t\t\t\t} else {\n\t\t\t\t\tbinaryUrlString = data.data.includes('base64')\n\t\t\t\t\t\t? data.data\n\t\t\t\t\t\t: `data:${data.mimeType};base64,${data.data}`;\n\t\t\t\t}\n\n\t\t\t\treturn {\n\t\t\t\t\ttype: 'image_url',\n\t\t\t\t\timage_url: {\n\t\t\t\t\t\turl: binaryUrlString,\n\t\t\t\t\t},\n\t\t\t\t};\n\t\t\t}),\n\t);\n\treturn new HumanMessage({\n\t\tcontent: [...binaryMessages],\n\t});\n}\n\n/* -----------------------------------------------------------\n Agent Output Format Helpers\n----------------------------------------------------------- */\n/**\n * Fixes empty content messages in agent steps.\n *\n * This function is necessary when using RunnableSequence.from in LangChain.\n * If a tool doesn't have any arguments, LangChain returns input: '' (empty string).\n * This can throw an error for some providers (like Anthropic) which expect the input to always be an object.\n * This function replaces empty string inputs with empty objects to prevent such errors.\n *\n * @param steps - The agent steps to fix\n * @returns The fixed agent steps\n */\nexport function fixEmptyContentMessage(\n\tsteps: AgentFinish | ToolsAgentAction[],\n): AgentFinish | ToolsAgentAction[] {\n\tif (!Array.isArray(steps)) return steps;\n\n\tsteps.forEach((step) => {\n\t\tif ('messageLog' in step && step.messageLog !== undefined) {\n\t\t\tif (Array.isArray(step.messageLog)) {\n\t\t\t\tstep.messageLog.forEach((message: BaseMessage) => {\n\t\t\t\t\tif ('content' in message && Array.isArray(message.content)) {\n\t\t\t\t\t\t// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access\n\t\t\t\t\t\t(message.content as Array<{ input?: string | object }>).forEach((content) => {\n\t\t\t\t\t\t\tif (content.input === '') {\n\t\t\t\t\t\t\t\tcontent.input = {};\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t});\n\n\treturn steps;\n}\n\n/**\n * Ensures consistent handling of outputs regardless of the model used,\n * providing a unified output format for further processing.\n *\n * This method is necessary to handle different output formats from various language models.\n * Specifically, it checks if the agent step is the final step (contains returnValues) and determines\n * if the output is a simple string (e.g., from OpenAI models) or an array of outputs (e.g., from Anthropic models).\n *\n * Examples:\n * 1. Anthropic model output:\n * ```json\n * {\n * \"output\": [\n * {\n * \"index\": 0,\n * \"type\": \"text\",\n * \"text\": \"The result of the calculation is approximately 1001.8166...\"\n * }\n * ]\n * }\n *```\n * 2. OpenAI model output:\n * ```json\n * {\n * \"output\": \"The result of the calculation is approximately 1001.82...\"\n * }\n * ```\n *\n * @param steps - The agent finish or agent action steps.\n * @returns The modified agent finish steps or the original steps.\n */\nexport function handleAgentFinishOutput(\n\tsteps: AgentFinish | AgentAction[],\n): AgentFinish | AgentAction[] {\n\ttype AgentMultiOutputFinish = AgentFinish & {\n\t\treturnValues: { output: Array<{ text: string; type: string; index: number }> };\n\t};\n\tconst agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;\n\n\tif (agentFinishSteps.returnValues) {\n\t\tconst isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);\n\t\tif (isMultiOutput) {\n\t\t\t// If all items in the multi-output array are of type 'text', merge them into a single string\n\t\t\tconst multiOutputSteps = agentFinishSteps.returnValues.output as Array<{\n\t\t\t\tindex: number;\n\t\t\t\ttype: string;\n\t\t\t\ttext: string;\n\t\t\t}>;\n\t\t\tconst isTextOnly = multiOutputSteps.every((output) => 'text' in output);\n\t\t\tif (isTextOnly) {\n\t\t\t\tagentFinishSteps.returnValues.output = multiOutputSteps\n\t\t\t\t\t.map((output) => output.text)\n\t\t\t\t\t.join('\\n')\n\t\t\t\t\t.trim();\n\t\t\t}\n\t\t\treturn agentFinishSteps;\n\t\t}\n\t}\n\n\treturn agentFinishSteps;\n}\n\n/**\n * Wraps the parsed output so that it can be stored in memory.\n * If memory is connected, the output is stringified.\n *\n * @param output - The parsed output object\n * @param memory - The connected memory (if any)\n * @returns The formatted output object\n */\nexport function handleParsedStepOutput(\n\toutput: Record<string, unknown>,\n\tmemory?: BaseChatMemory,\n): { returnValues: Record<string, unknown>; log: string } {\n\treturn {\n\t\treturnValues: memory ? { output: JSON.stringify(output) } : output,\n\t\tlog: 'Final response formatted',\n\t};\n}\n\n/**\n * Parses agent steps using the provided output parser.\n * If the agent used the 'format_final_json_response' tool, the output is parsed accordingly.\n *\n * @param steps - The agent finish or action steps\n * @param outputParser - The output parser (if defined)\n * @param memory - The connected memory (if any)\n * @returns The parsed steps with the final output\n */\nexport const getAgentStepsParser =\n\t(outputParser?: N8nOutputParser, memory?: BaseChatMemory) =>\n\tasync (steps: AgentFinish | AgentAction[]): Promise<AgentFinish | AgentAction[]> => {\n\t\t// Check if the steps contain the 'format_final_json_response' tool invocation.\n\t\tif (Array.isArray(steps)) {\n\t\t\tconst responseParserTool = steps.find((step) => step.tool === 'format_final_json_response');\n\t\t\tif (responseParserTool && outputParser) {\n\t\t\t\tconst toolInput = responseParserTool.toolInput;\n\t\t\t\t// Ensure the tool input is a string\n\t\t\t\tconst parserInput = toolInput instanceof Object ? JSON.stringify(toolInput) : toolInput;\n\t\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t\t}\n\t\t}\n\n\t\t// Otherwise, if the steps contain a returnValues field, try to parse them manually.\n\t\tif (outputParser && typeof steps === 'object' && (steps as AgentFinish).returnValues) {\n\t\t\tconst finalResponse = (steps as AgentFinish).returnValues;\n\t\t\tlet parserInput: string;\n\n\t\t\tif (finalResponse instanceof Object) {\n\t\t\t\tif ('output' in finalResponse) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\t// If the output is an object, we will try to parse it as JSON\n\t\t\t\t\t\t// this is because parser expects stringified JSON object like { \"output\": { .... } }\n\t\t\t\t\t\t// so we try to parse the output before wrapping it and then stringify it\n\t\t\t\t\t\tparserInput = JSON.stringify({ output: jsonParse(finalResponse.output) });\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t// Fallback to the raw output if parsing fails.\n\t\t\t\t\t\tparserInput = finalResponse.output;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If the output is not an object, we will stringify it as it is\n\t\t\t\t\tparserInput = JSON.stringify(finalResponse);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tparserInput = finalResponse;\n\t\t\t}\n\n\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t}\n\n\t\treturn handleAgentFinishOutput(steps);\n\t};\n\n/* -----------------------------------------------------------\n Agent Setup Helpers\n----------------------------------------------------------- */\n/**\n * Retrieves the language model from the input connection.\n * Throws an error if the model is not a valid chat instance or does not support tools.\n *\n * @param ctx - The execution context\n * @returns The validated chat model\n */\nexport async function getChatModel(ctx: IExecuteFunctions): Promise<BaseChatModel> {\n\tconst model = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);\n\tif (!isChatInstance(model) || !model.bindTools) {\n\t\tthrow new NodeOperationError(\n\t\t\tctx.getNode(),\n\t\t\t'Tools Agent requires Chat Model which supports Tools calling',\n\t\t);\n\t}\n\treturn model;\n}\n\n/**\n * Retrieves the memory instance from the input connection if it is connected\n *\n * @param ctx - The execution context\n * @returns The connected memory (if any)\n */\nexport async function getOptionalMemory(\n\tctx: IExecuteFunctions,\n): Promise<BaseChatMemory | undefined> {\n\treturn (await ctx.getInputConnectionData(NodeConnectionTypes.AiMemory, 0)) as\n\t\t| BaseChatMemory\n\t\t| undefined;\n}\n\n/**\n * Retrieves the connected tools and (if an output parser is defined)\n * appends a structured output parser tool.\n *\n * @param ctx - The execution context\n * @param outputParser - The optional output parser\n * @returns The array of connected tools\n */\nexport async function getTools(\n\tctx: IExecuteFunctions,\n\toutputParser?: N8nOutputParser,\n): Promise<Array<DynamicStructuredTool | Tool>> {\n\tconst tools = (await getConnectedTools(ctx, true, false)) as Array<DynamicStructuredTool | Tool>;\n\n\t// If an output parser is available, create a dynamic tool to validate the final output.\n\tif (outputParser) {\n\t\tconst schema = getOutputParserSchema(outputParser);\n\t\tconst structuredOutputParserTool = new DynamicStructuredTool({\n\t\t\tschema,\n\t\t\tname: 'format_final_json_response',\n\t\t\tdescription:\n\t\t\t\t'Use this tool to format your final response to the user in a structured JSON format. This tool validates your output against a schema to ensure it meets the required format. ONLY use this tool when you have completed all necessary reasoning and are ready to provide your final answer. Do not use this tool for intermediate steps or for asking questions. The output from this tool will be directly returned to the user.',\n\t\t\t// We do not use a function here because we intercept the output with the parser.\n\t\t\tfunc: async () => '',\n\t\t});\n\t\ttools.push(structuredOutputParserTool);\n\t}\n\treturn tools;\n}\n\n/**\n * Prepares the prompt messages for the agent.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @param options - Options containing systemMessage and other parameters\n * @returns The array of prompt messages\n */\nexport async function prepareMessages(\n\tctx: IExecuteFunctions,\n\titemIndex: number,\n\toptions: {\n\t\tsystemMessage?: string;\n\t\tpassthroughBinaryImages?: boolean;\n\t\toutputParser?: N8nOutputParser;\n\t},\n): Promise<BaseMessagePromptTemplateLike[]> {\n\tconst useSystemMessage = options.systemMessage ?? ctx.getNode().typeVersion < 1.9;\n\n\tconst messages: BaseMessagePromptTemplateLike[] = [];\n\n\tif (useSystemMessage) {\n\t\tmessages.push([\n\t\t\t'system',\n\t\t\t`{system_message}${options.outputParser ? '\\n\\n{formatting_instructions}' : ''}`,\n\t\t]);\n\t} else if (options.outputParser) {\n\t\tmessages.push(['system', '{formatting_instructions}']);\n\t}\n\n\tmessages.push(['placeholder', '{chat_history}'], ['human', '{input}']);\n\n\t// If there is binary data and the node option permits it, add a binary message\n\tconst hasBinaryData = ctx.getInputData()?.[itemIndex]?.binary !== undefined;\n\tif (hasBinaryData && options.passthroughBinaryImages) {\n\t\tconst binaryMessage = await extractBinaryMessages(ctx, itemIndex);\n\t\tif (binaryMessage.content.length !== 0) {\n\t\t\tmessages.push(binaryMessage);\n\t\t} else {\n\t\t\tctx.logger.debug('Not attaching binary message, since its content was empty');\n\t\t}\n\t}\n\n\t// We add the agent scratchpad last, so that the agent will not run in loops\n\t// by adding binary messages between each interaction\n\tmessages.push(['placeholder', '{agent_scratchpad}']);\n\treturn messages;\n}\n\n/**\n * Creates the chat prompt from messages.\n *\n * @param messages - The messages array\n * @returns The ChatPromptTemplate instance\n */\nexport function preparePrompt(messages: BaseMessagePromptTemplateLike[]): ChatPromptTemplate {\n\treturn ChatPromptTemplate.fromMessages(messages);\n}\n\n/* -----------------------------------------------------------\n Main Executor Function\n----------------------------------------------------------- */\n/**\n * The main executor method for the Tools Agent.\n *\n * This function retrieves necessary components (model, memory, tools), prepares the prompt,\n * creates the agent, and processes each input item. The error handling for each item is also\n * managed here based on the node's continueOnFail setting.\n *\n * @returns The array of execution data for all processed items\n */\nexport async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {\n\tthis.logger.debug('Executing Tools Agent');\n\n\tconst returnData: INodeExecutionData[] = [];\n\tconst items = this.getInputData();\n\tconst outputParser = await getOptionalOutputParser(this);\n\tconst memory = await getOptionalMemory(this);\n\tconst tools = await getTools(this, outputParser);\n\tconst { batchSize, delayBetweenBatches } = this.getNodeParameter('options.batching', 0, {\n\t\tbatchSize: 1,\n\t\tdelayBetweenBatches: 0,\n\t}) as {\n\t\tbatchSize: number;\n\t\tdelayBetweenBatches: number;\n\t};\n\n\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\tconst batch = items.slice(i, i + batchSize);\n\t\tconst batchPromises = batch.map(async (_item, batchItemIndex) => {\n\t\t\tconst itemIndex = i + batchItemIndex;\n\t\t\tconst model = await getChatModel(this);\n\n\t\t\tconst input = getPromptInputByType({\n\t\t\t\tctx: this,\n\t\t\t\ti: itemIndex,\n\t\t\t\tinputKey: 'text',\n\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t});\n\t\t\tif (input === undefined) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), 'The “text” parameter is empty.');\n\t\t\t}\n\n\t\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\t\tsystemMessage?: string;\n\t\t\t\tmaxIterations?: number;\n\t\t\t\treturnIntermediateSteps?: boolean;\n\t\t\t\tpassthroughBinaryImages?: boolean;\n\t\t\t};\n\n\t\t\t// Prepare the prompt messages and prompt template.\n\t\t\tconst messages = await prepareMessages(this, itemIndex, {\n\t\t\t\tsystemMessage: options.systemMessage,\n\t\t\t\tpassthroughBinaryImages: options.passthroughBinaryImages ?? true,\n\t\t\t\toutputParser,\n\t\t\t});\n\t\t\tconst prompt = preparePrompt(messages);\n\n\t\t\t// Create the base agent that calls tools.\n\t\t\tconst agent = createToolCallingAgent({\n\t\t\t\tllm: model,\n\t\t\t\ttools,\n\t\t\t\tprompt,\n\t\t\t\tstreamRunnable: false,\n\t\t\t});\n\t\t\tagent.streamRunnable = false;\n\t\t\t// Wrap the agent with parsers and fixes.\n\t\t\tconst runnableAgent = RunnableSequence.from([\n\t\t\t\tagent,\n\t\t\t\tgetAgentStepsParser(outputParser, memory),\n\t\t\t\tfixEmptyContentMessage,\n\t\t\t]);\n\t\t\tconst executor = AgentExecutor.fromAgentAndTools({\n\t\t\t\tagent: runnableAgent,\n\t\t\t\tmemory,\n\t\t\t\ttools,\n\t\t\t\treturnIntermediateSteps: options.returnIntermediateSteps === true,\n\t\t\t\tmaxIterations: options.maxIterations ?? 10,\n\t\t\t});\n\n\t\t\t// Invoke the executor with the given input and system message.\n\t\t\treturn await executor.invoke(\n\t\t\t\t{\n\t\t\t\t\tinput,\n\t\t\t\t\tsystem_message: options.systemMessage ?? SYSTEM_MESSAGE,\n\t\t\t\t\tformatting_instructions:\n\t\t\t\t\t\t'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',\n\t\t\t\t},\n\t\t\t\t{ signal: this.getExecutionCancelSignal() },\n\t\t\t);\n\t\t});\n\t\tconst batchResults = await Promise.allSettled(batchPromises);\n\n\t\tbatchResults.forEach((result, index) => {\n\t\t\tif (result.status === 'rejected') {\n\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: { error: result.reason as string },\n\t\t\t\t\t\tpairedItem: { item: index },\n\t\t\t\t\t});\n\t\t\t\t\treturn;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), result.reason);\n\t\t\t\t}\n\t\t\t}\n\t\t\tconst response = result.value;\n\t\t\t// If memory and outputParser are connected, parse the output.\n\t\t\tif (memory && outputParser) {\n\t\t\t\tconst parsedOutput = jsonParse<{ output: Record<string, unknown> }>(\n\t\t\t\t\tresponse.output as string,\n\t\t\t\t);\n\t\t\t\tresponse.output = parsedOutput?.output ?? parsedOutput;\n\t\t\t}\n\n\t\t\t// Omit internal keys before returning the result.\n\t\t\tconst itemResult = {\n\t\t\t\tjson: omit(\n\t\t\t\t\tresponse,\n\t\t\t\t\t'system_message',\n\t\t\t\t\t'formatting_instructions',\n\t\t\t\t\t'input',\n\t\t\t\t\t'chat_history',\n\t\t\t\t\t'agent_scratchpad',\n\t\t\t\t),\n\t\t\t};\n\n\t\t\treturnData.push(itemResult);\n\t\t});\n\n\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\tawait sleep(delayBetweenBatches);\n\t\t}\n\t}\n\n\treturn [returnData];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,sBAA6B;AAG7B,qBAAmC;AACnC,uBAAiC;AAEjC,mBAAsC;AAEtC,oBAAsD;AAEtD,oBAAqB;AACrB,0BAMO;AAGP,iBAAkB;AAElB,qBAAwE;AACxE,6BAGO;AAEP,oBAA+B;AASxB,SAAS,sBACf,cAEgC;AAChC,QAAM;AAAA;AAAA,IAEJ,aAAa,UAAU,KAAuC,aAAE,OAAO,EAAE,MAAM,aAAE,OAAO,EAAE,CAAC;AAAA;AAC7F,SAAO;AACR;AAaA,eAAsB,sBACrB,KACA,WACwB;AACxB,QAAM,aAAa,IAAI,aAAa,IAAI,SAAS,GAAG,UAAU,CAAC;AAC/D,QAAM,iBAAiB,MAAM,QAAQ;AAAA,IACpC,OAAO,OAAO,UAAU,EACtB,OAAO,CAAC,SAAS,KAAK,SAAS,WAAW,QAAQ,CAAC,EACnD,IAAI,OAAO,SAAS;AACpB,UAAI;AAGJ,UAAI,KAAK,IAAI;AACZ,cAAM,eAAe,MAAM,IAAI,QAAQ;AAAA,UACtC,MAAM,IAAI,QAAQ,gBAAgB,KAAK,EAAE;AAAA,QAC1C;AACA,0BAAkB,QAAQ,KAAK,QAAQ,WAAW,OAAO,KAAK,YAAY,EAAE;AAAA,UAC3E;AAAA,QACD,CAAC;AAAA,MACF,OAAO;AACN,0BAAkB,KAAK,KAAK,SAAS,QAAQ,IAC1C,KAAK,OACL,QAAQ,KAAK,QAAQ,WAAW,KAAK,IAAI;AAAA,MAC7C;AAEA,aAAO;AAAA,QACN,MAAM;AAAA,QACN,WAAW;AAAA,UACV,KAAK;AAAA,QACN;AAAA,MACD;AAAA,IACD,CAAC;AAAA,EACH;AACA,SAAO,IAAI,6BAAa;AAAA,IACvB,SAAS,CAAC,GAAG,cAAc;AAAA,EAC5B,CAAC;AACF;AAgBO,SAAS,uBACf,OACmC;AACnC,MAAI,CAAC,MAAM,QAAQ,KAAK,EAAG,QAAO;AAElC,QAAM,QAAQ,CAAC,SAAS;AACvB,QAAI,gBAAgB,QAAQ,KAAK,eAAe,QAAW;AAC1D,UAAI,MAAM,QAAQ,KAAK,UAAU,GAAG;AACnC,aAAK,WAAW,QAAQ,CAAC,YAAyB;AACjD,cAAI,aAAa,WAAW,MAAM,QAAQ,QAAQ,OAAO,GAAG;AAE3D,YAAC,QAAQ,QAA+C,QAAQ,CAAC,YAAY;AAC5E,kBAAI,QAAQ,UAAU,IAAI;AACzB,wBAAQ,QAAQ,CAAC;AAAA,cAClB;AAAA,YACD,CAAC;AAAA,UACF;AAAA,QACD,CAAC;AAAA,MACF;AAAA,IACD;AAAA,EACD,CAAC;AAED,SAAO;AACR;AAiCO,SAAS,wBACf,OAC8B;AAI9B,QAAM,mBAAmB;AAEzB,MAAI,iBAAiB,cAAc;AAClC,UAAM,gBAAgB,MAAM,QAAQ,iBAAiB,cAAc,MAAM;AACzE,QAAI,eAAe;AAElB,YAAM,mBAAmB,iBAAiB,aAAa;AAKvD,YAAM,aAAa,iBAAiB,MAAM,CAAC,WAAW,UAAU,MAAM;AACtE,UAAI,YAAY;AACf,yBAAiB,aAAa,SAAS,iBACrC,IAAI,CAAC,WAAW,OAAO,IAAI,EAC3B,KAAK,IAAI,EACT,KAAK;AAAA,MACR;AACA,aAAO;AAAA,IACR;AAAA,EACD;AAEA,SAAO;AACR;AAUO,SAAS,uBACf,QACA,QACyD;AACzD,SAAO;AAAA,IACN,cAAc,SAAS,EAAE,QAAQ,KAAK,UAAU,MAAM,EAAE,IAAI;AAAA,IAC5D,KAAK;AAAA,EACN;AACD;AAWO,MAAM,sBACZ,CAAC,cAAgC,WACjC,OAAO,UAA6E;AAEnF,MAAI,MAAM,QAAQ,KAAK,GAAG;AACzB,UAAM,qBAAqB,MAAM,KAAK,CAAC,SAAS,KAAK,SAAS,4BAA4B;AAC1F,QAAI,sBAAsB,cAAc;AACvC,YAAM,YAAY,mBAAmB;AAErC,YAAM,cAAc,qBAAqB,SAAS,KAAK,UAAU,SAAS,IAAI;AAC9E,YAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,aAAO,uBAAuB,cAAc,MAAM;AAAA,IACnD;AAAA,EACD;AAGA,MAAI,gBAAgB,OAAO,UAAU,YAAa,MAAsB,cAAc;AACrF,UAAM,gBAAiB,MAAsB;AAC7C,QAAI;AAEJ,QAAI,yBAAyB,QAAQ;AACpC,UAAI,YAAY,eAAe;AAC9B,YAAI;AAIH,wBAAc,KAAK,UAAU,EAAE,YAAQ,+BAAU,cAAc,MAAM,EAAE,CAAC;AAAA,QACzE,SAAS,OAAO;AAEf,wBAAc,cAAc;AAAA,QAC7B;AAAA,MACD,OAAO;AAEN,sBAAc,KAAK,UAAU,aAAa;AAAA,MAC3C;AAAA,IACD,OAAO;AACN,oBAAc;AAAA,IACf;AAEA,UAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,WAAO,uBAAuB,cAAc,MAAM;AAAA,EACnD;AAEA,SAAO,wBAAwB,KAAK;AACrC;AAYD,eAAsB,aAAa,KAAgD;AAClF,QAAM,QAAQ,MAAM,IAAI,uBAAuB,wCAAoB,iBAAiB,CAAC;AACrF,MAAI,KAAC,+BAAe,KAAK,KAAK,CAAC,MAAM,WAAW;AAC/C,UAAM,IAAI;AAAA,MACT,IAAI,QAAQ;AAAA,MACZ;AAAA,IACD;AAAA,EACD;AACA,SAAO;AACR;AAQA,eAAsB,kBACrB,KACsC;AACtC,SAAQ,MAAM,IAAI,uBAAuB,wCAAoB,UAAU,CAAC;AAGzE;AAUA,eAAsB,SACrB,KACA,cAC+C;AAC/C,QAAM,QAAS,UAAM,kCAAkB,KAAK,MAAM,KAAK;AAGvD,MAAI,cAAc;AACjB,UAAM,SAAS,sBAAsB,YAAY;AACjD,UAAM,6BAA6B,IAAI,mCAAsB;AAAA,MAC5D;AAAA,MACA,MAAM;AAAA,MACN,aACC;AAAA;AAAA,MAED,MAAM,YAAY;AAAA,IACnB,CAAC;AACD,UAAM,KAAK,0BAA0B;AAAA,EACtC;AACA,SAAO;AACR;AAUA,eAAsB,gBACrB,KACA,WACA,SAK2C;AAC3C,QAAM,mBAAmB,QAAQ,iBAAiB,IAAI,QAAQ,EAAE,cAAc;AAE9E,QAAM,WAA4C,CAAC;AAEnD,MAAI,kBAAkB;AACrB,aAAS,KAAK;AAAA,MACb;AAAA,MACA,mBAAmB,QAAQ,eAAe,kCAAkC,EAAE;AAAA,IAC/E,CAAC;AAAA,EACF,WAAW,QAAQ,cAAc;AAChC,aAAS,KAAK,CAAC,UAAU,2BAA2B,CAAC;AAAA,EACtD;AAEA,WAAS,KAAK,CAAC,eAAe,gBAAgB,GAAG,CAAC,SAAS,SAAS,CAAC;AAGrE,QAAM,gBAAgB,IAAI,aAAa,IAAI,SAAS,GAAG,WAAW;AAClE,MAAI,iBAAiB,QAAQ,yBAAyB;AACrD,UAAM,gBAAgB,MAAM,sBAAsB,KAAK,SAAS;AAChE,QAAI,cAAc,QAAQ,WAAW,GAAG;AACvC,eAAS,KAAK,aAAa;AAAA,IAC5B,OAAO;AACN,UAAI,OAAO,MAAM,2DAA2D;AAAA,IAC7E;AAAA,EACD;AAIA,WAAS,KAAK,CAAC,eAAe,oBAAoB,CAAC;AACnD,SAAO;AACR;AAQO,SAAS,cAAc,UAA+D;AAC5F,SAAO,kCAAmB,aAAa,QAAQ;AAChD;AAcA,eAAsB,oBAA4E;AACjG,OAAK,OAAO,MAAM,uBAAuB;AAEzC,QAAM,aAAmC,CAAC;AAC1C,QAAM,QAAQ,KAAK,aAAa;AAChC,QAAM,eAAe,UAAM,gDAAwB,IAAI;AACvD,QAAM,SAAS,MAAM,kBAAkB,IAAI;AAC3C,QAAM,QAAQ,MAAM,SAAS,MAAM,YAAY;AAC/C,QAAM,EAAE,WAAW,oBAAoB,IAAI,KAAK,iBAAiB,oBAAoB,GAAG;AAAA,IACvF,WAAW;AAAA,IACX,qBAAqB;AAAA,EACtB,CAAC;AAKD,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,UAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAC1C,UAAM,gBAAgB,MAAM,IAAI,OAAO,OAAO,mBAAmB;AAChE,YAAM,YAAY,IAAI;AACtB,YAAM,QAAQ,MAAM,aAAa,IAAI;AAErC,YAAM,YAAQ,qCAAqB;AAAA,QAClC,KAAK;AAAA,QACL,GAAG;AAAA,QACH,UAAU;AAAA,QACV,eAAe;AAAA,MAChB,CAAC;AACD,UAAI,UAAU,QAAW;AACxB,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,0CAAgC;AAAA,MAC9E;AAEA,YAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAQ9D,YAAM,WAAW,MAAM,gBAAgB,MAAM,WAAW;AAAA,QACvD,eAAe,QAAQ;AAAA,QACvB,yBAAyB,QAAQ,2BAA2B;AAAA,QAC5D;AAAA,MACD,CAAC;AACD,YAAM,SAAS,cAAc,QAAQ;AAGrC,YAAM,YAAQ,sCAAuB;AAAA,QACpC,KAAK;AAAA,QACL;AAAA,QACA;AAAA,QACA,gBAAgB;AAAA,MACjB,CAAC;AACD,YAAM,iBAAiB;AAEvB,YAAM,gBAAgB,kCAAiB,KAAK;AAAA,QAC3C;AAAA,QACA,oBAAoB,cAAc,MAAM;AAAA,QACxC;AAAA,MACD,CAAC;AACD,YAAM,WAAW,4BAAc,kBAAkB;AAAA,QAChD,OAAO;AAAA,QACP;AAAA,QACA;AAAA,QACA,yBAAyB,QAAQ,4BAA4B;AAAA,QAC7D,eAAe,QAAQ,iBAAiB;AAAA,MACzC,CAAC;AAGD,aAAO,MAAM,SAAS;AAAA,QACrB;AAAA,UACC;AAAA,UACA,gBAAgB,QAAQ,iBAAiB;AAAA,UACzC,yBACC;AAAA,QACF;AAAA,QACA,EAAE,QAAQ,KAAK,yBAAyB,EAAE;AAAA,MAC3C;AAAA,IACD,CAAC;AACD,UAAM,eAAe,MAAM,QAAQ,WAAW,aAAa;AAE3D,iBAAa,QAAQ,CAAC,QAAQ,UAAU;AACvC,UAAI,OAAO,WAAW,YAAY;AACjC,YAAI,KAAK,eAAe,GAAG;AAC1B,qBAAW,KAAK;AAAA,YACf,MAAM,EAAE,OAAO,OAAO,OAAiB;AAAA,YACvC,YAAY,EAAE,MAAM,MAAM;AAAA,UAC3B,CAAC;AACD;AAAA,QACD,OAAO;AACN,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,OAAO,MAAM;AAAA,QAC3D;AAAA,MACD;AACA,YAAM,WAAW,OAAO;AAExB,UAAI,UAAU,cAAc;AAC3B,cAAM,mBAAe;AAAA,UACpB,SAAS;AAAA,QACV;AACA,iBAAS,SAAS,cAAc,UAAU;AAAA,MAC3C;AAGA,YAAM,aAAa;AAAA,QAClB,UAAM;AAAA,UACL;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACD;AAAA,MACD;AAEA,iBAAW,KAAK,UAAU;AAAA,IAC3B,CAAC;AAED,QAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,gBAAM,2BAAM,mBAAmB;AAAA,IAChC;AAAA,EACD;AAEA,SAAO,CAAC,UAAU;AACnB;","names":[]}
@@ -67,13 +67,19 @@ class ChainLlm {
67
67
  this.logger.debug("Executing Basic LLM Chain");
68
68
  const items = this.getInputData();
69
69
  const returnData = [];
70
- for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
71
- try {
70
+ const { batchSize, delayBetweenBatches } = this.getNodeParameter("batching", 0, {
71
+ batchSize: 100,
72
+ delayBetweenBatches: 0
73
+ });
74
+ const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(this);
75
+ for (let i = 0; i < items.length; i += batchSize) {
76
+ const batch = items.slice(i, i + batchSize);
77
+ const batchPromises = batch.map(async (_item, batchItemIndex) => {
78
+ const itemIndex = i + batchItemIndex;
72
79
  const llm = await this.getInputConnectionData(
73
80
  import_n8n_workflow.NodeConnectionTypes.AiLanguageModel,
74
81
  0
75
82
  );
76
- const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(this);
77
83
  let prompt;
78
84
  if (this.getNode().typeVersion <= 1.3) {
79
85
  prompt = this.getNodeParameter("prompt", itemIndex);
@@ -93,7 +99,7 @@ class ChainLlm {
93
99
  itemIndex,
94
100
  []
95
101
  );
96
- const responses = await (0, import_methods.executeChain)({
102
+ return await (0, import_methods.executeChain)({
97
103
  context: this,
98
104
  itemIndex,
99
105
  query: prompt,
@@ -101,27 +107,39 @@ class ChainLlm {
101
107
  outputParser,
102
108
  messages
103
109
  });
104
- const shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;
110
+ });
111
+ const batchResults = await Promise.allSettled(batchPromises);
112
+ batchResults.forEach((promiseResult, batchItemIndex) => {
113
+ const itemIndex = i + batchItemIndex;
114
+ if (promiseResult.status === "rejected") {
115
+ const error = promiseResult.reason;
116
+ if (error instanceof import_n8n_workflow.NodeApiError && (0, import_error_handling.isOpenAiError)(error.cause)) {
117
+ const openAiErrorCode = error.cause.error?.code;
118
+ if (openAiErrorCode) {
119
+ const customMessage = (0, import_error_handling.getCustomErrorMessage)(openAiErrorCode);
120
+ if (customMessage) {
121
+ error.message = customMessage;
122
+ }
123
+ }
124
+ }
125
+ if (this.continueOnFail()) {
126
+ returnData.push({
127
+ json: { error: error.message },
128
+ pairedItem: { item: itemIndex }
129
+ });
130
+ return;
131
+ }
132
+ throw new import_n8n_workflow.NodeOperationError(this.getNode(), error);
133
+ }
134
+ const responses = promiseResult.value;
105
135
  responses.forEach((response) => {
106
136
  returnData.push({
107
- json: (0, import_methods.formatResponse)(response, shouldUnwrapObjects)
137
+ json: (0, import_methods.formatResponse)(response, this.getNode().typeVersion >= 1.6 || !!outputParser)
108
138
  });
109
139
  });
110
- } catch (error) {
111
- if (error instanceof import_n8n_workflow.NodeApiError && (0, import_error_handling.isOpenAiError)(error.cause)) {
112
- const openAiErrorCode = error.cause.error?.code;
113
- if (openAiErrorCode) {
114
- const customMessage = (0, import_error_handling.getCustomErrorMessage)(openAiErrorCode);
115
- if (customMessage) {
116
- error.message = customMessage;
117
- }
118
- }
119
- }
120
- if (this.continueOnFail()) {
121
- returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
122
- continue;
123
- }
124
- throw error;
140
+ });
141
+ if (i + batchSize < items.length && delayBetweenBatches > 0) {
142
+ await (0, import_n8n_workflow.sleep)(delayBetweenBatches);
125
143
  }
126
144
  }
127
145
  return [returnData];
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/chains/ChainLLM/ChainLlm.node.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type {\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tINodeType,\n\tINodeTypeDescription,\n} from 'n8n-workflow';\nimport { NodeApiError, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n// Import from centralized module\nimport {\n\texecuteChain,\n\tformatResponse,\n\tgetInputs,\n\tnodeProperties,\n\ttype MessageTemplate,\n} from './methods';\nimport {\n\tgetCustomErrorMessage as getCustomOpenAiErrorMessage,\n\tisOpenAiError,\n} from '../../vendors/OpenAi/helpers/error-handling';\n\n/**\n * Basic LLM Chain Node Implementation\n * Allows connecting to language models with optional structured output parsing\n */\nexport class ChainLlm implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Basic LLM Chain',\n\t\tname: 'chainLlm',\n\t\ticon: 'fa:link',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],\n\t\tdescription: 'A simple chain to prompt a large language model',\n\t\tdefaults: {\n\t\t\tname: 'Basic LLM Chain',\n\t\t\tcolor: '#909298',\n\t\t},\n\t\tcodex: {\n\t\t\talias: ['LangChain'],\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Chains', 'Root Nodes'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tinputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,\n\t\toutputs: [NodeConnectionTypes.Main],\n\t\tcredentials: [],\n\t\tproperties: nodeProperties,\n\t};\n\n\t/**\n\t * Main execution method for the node\n\t */\n\tasync execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {\n\t\tthis.logger.debug('Executing Basic LLM Chain');\n\t\tconst items = this.getInputData();\n\t\tconst returnData: INodeExecutionData[] = [];\n\n\t\t// Process each input item\n\t\tfor (let itemIndex = 0; itemIndex < items.length; itemIndex++) {\n\t\t\ttry {\n\t\t\t\t// Get the language model\n\t\t\t\tconst llm = (await this.getInputConnectionData(\n\t\t\t\t\tNodeConnectionTypes.AiLanguageModel,\n\t\t\t\t\t0,\n\t\t\t\t)) as BaseLanguageModel;\n\n\t\t\t\t// Get output parser if configured\n\t\t\t\tconst outputParser = await getOptionalOutputParser(this);\n\n\t\t\t\t// Get user prompt based on node version\n\t\t\t\tlet prompt: string;\n\n\t\t\t\tif (this.getNode().typeVersion <= 1.3) {\n\t\t\t\t\tprompt = this.getNodeParameter('prompt', itemIndex) as string;\n\t\t\t\t} else {\n\t\t\t\t\tprompt = getPromptInputByType({\n\t\t\t\t\t\tctx: this,\n\t\t\t\t\t\ti: itemIndex,\n\t\t\t\t\t\tinputKey: 'text',\n\t\t\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Validate prompt\n\t\t\t\tif (prompt === undefined) {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), \"The 'prompt' parameter is empty.\");\n\t\t\t\t}\n\n\t\t\t\t// Get chat messages if configured\n\t\t\t\tconst messages = this.getNodeParameter(\n\t\t\t\t\t'messages.messageValues',\n\t\t\t\t\titemIndex,\n\t\t\t\t\t[],\n\t\t\t\t) as MessageTemplate[];\n\n\t\t\t\t// Execute the chain\n\t\t\t\tconst responses = await executeChain({\n\t\t\t\t\tcontext: this,\n\t\t\t\t\titemIndex,\n\t\t\t\t\tquery: prompt,\n\t\t\t\t\tllm,\n\t\t\t\t\toutputParser,\n\t\t\t\t\tmessages,\n\t\t\t\t});\n\n\t\t\t\t// If the node version is 1.6(and LLM is using `response_format: json_object`) or higher or an output parser is configured,\n\t\t\t\t// we unwrap the response and return the object directly as JSON\n\t\t\t\tconst shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;\n\t\t\t\t// Process each response and add to return data\n\t\t\t\tresponses.forEach((response) => {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: formatResponse(response, shouldUnwrapObjects),\n\t\t\t\t\t});\n\t\t\t\t});\n\t\t\t} catch (error) {\n\t\t\t\t// Handle OpenAI specific rate limit errors\n\t\t\t\tif (error instanceof NodeApiError && isOpenAiError(error.cause)) {\n\t\t\t\t\tconst openAiErrorCode: string | undefined = (error.cause as any).error?.code;\n\t\t\t\t\tif (openAiErrorCode) {\n\t\t\t\t\t\tconst customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);\n\t\t\t\t\t\tif (customMessage) {\n\t\t\t\t\t\t\terror.message = customMessage;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Continue on failure if configured\n\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\treturnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tthrow error;\n\t\t\t}\n\t\t}\n\n\t\treturn [returnData];\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOA,0BAAsE;AAEtE,qBAAqC;AACrC,6BAAwC;AAGxC,qBAMO;AACP,4BAGO;AAMA,MAAM,SAA8B;AAAA,EAApC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,KAAK,KAAK,KAAK,KAAK,KAAK,GAAG;AAAA,MACzC,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA,OAAO;AAAA,QACN,OAAO,CAAC,WAAW;AAAA,QACnB,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,UAAU,YAAY;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,QAAQ,yBAAyB,yBAAU,SAAS,CAAC;AAAA,MACrD,SAAS,CAAC,wCAAoB,IAAI;AAAA,MAClC,aAAa,CAAC;AAAA,MACd,YAAY;AAAA,IACb;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,UAAkE;AACvE,SAAK,OAAO,MAAM,2BAA2B;AAC7C,UAAM,QAAQ,KAAK,aAAa;AAChC,UAAM,aAAmC,CAAC;AAG1C,aAAS,YAAY,GAAG,YAAY,MAAM,QAAQ,aAAa;AAC9D,UAAI;AAEH,cAAM,MAAO,MAAM,KAAK;AAAA,UACvB,wCAAoB;AAAA,UACpB;AAAA,QACD;AAGA,cAAM,eAAe,UAAM,gDAAwB,IAAI;AAGvD,YAAI;AAEJ,YAAI,KAAK,QAAQ,EAAE,eAAe,KAAK;AACtC,mBAAS,KAAK,iBAAiB,UAAU,SAAS;AAAA,QACnD,OAAO;AACN,uBAAS,qCAAqB;AAAA,YAC7B,KAAK;AAAA,YACL,GAAG;AAAA,YACH,UAAU;AAAA,YACV,eAAe;AAAA,UAChB,CAAC;AAAA,QACF;AAGA,YAAI,WAAW,QAAW;AACzB,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,kCAAkC;AAAA,QAChF;AAGA,cAAM,WAAW,KAAK;AAAA,UACrB;AAAA,UACA;AAAA,UACA,CAAC;AAAA,QACF;AAGA,cAAM,YAAY,UAAM,6BAAa;AAAA,UACpC,SAAS;AAAA,UACT;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA;AAAA,QACD,CAAC;AAID,cAAM,sBAAsB,KAAK,QAAQ,EAAE,eAAe,OAAO,CAAC,CAAC;AAEnE,kBAAU,QAAQ,CAAC,aAAa;AAC/B,qBAAW,KAAK;AAAA,YACf,UAAM,+BAAe,UAAU,mBAAmB;AAAA,UACnD,CAAC;AAAA,QACF,CAAC;AAAA,MACF,SAAS,OAAO;AAEf,YAAI,iBAAiB,wCAAgB,qCAAc,MAAM,KAAK,GAAG;AAChE,gBAAM,kBAAuC,MAAM,MAAc,OAAO;AACxE,cAAI,iBAAiB;AACpB,kBAAM,oBAAgB,sBAAAA,uBAA4B,eAAe;AACjE,gBAAI,eAAe;AAClB,oBAAM,UAAU;AAAA,YACjB;AAAA,UACD;AAAA,QACD;AAGA,YAAI,KAAK,eAAe,GAAG;AAC1B,qBAAW,KAAK,EAAE,MAAM,EAAE,OAAO,MAAM,QAAQ,GAAG,YAAY,EAAE,MAAM,UAAU,EAAE,CAAC;AACnF;AAAA,QACD;AAEA,cAAM;AAAA,MACP;AAAA,IACD;AAEA,WAAO,CAAC,UAAU;AAAA,EACnB;AACD;","names":["getCustomOpenAiErrorMessage"]}
1
+ {"version":3,"sources":["../../../../nodes/chains/ChainLLM/ChainLlm.node.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type {\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tINodeType,\n\tINodeTypeDescription,\n} from 'n8n-workflow';\nimport { NodeApiError, NodeConnectionTypes, NodeOperationError, sleep } from 'n8n-workflow';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n// Import from centralized module\nimport {\n\texecuteChain,\n\tformatResponse,\n\tgetInputs,\n\tnodeProperties,\n\ttype MessageTemplate,\n} from './methods';\nimport {\n\tgetCustomErrorMessage as getCustomOpenAiErrorMessage,\n\tisOpenAiError,\n} from '../../vendors/OpenAi/helpers/error-handling';\n\n/**\n * Basic LLM Chain Node Implementation\n * Allows connecting to language models with optional structured output parsing\n */\nexport class ChainLlm implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Basic LLM Chain',\n\t\tname: 'chainLlm',\n\t\ticon: 'fa:link',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],\n\t\tdescription: 'A simple chain to prompt a large language model',\n\t\tdefaults: {\n\t\t\tname: 'Basic LLM Chain',\n\t\t\tcolor: '#909298',\n\t\t},\n\t\tcodex: {\n\t\t\talias: ['LangChain'],\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Chains', 'Root Nodes'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tinputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,\n\t\toutputs: [NodeConnectionTypes.Main],\n\t\tcredentials: [],\n\t\tproperties: nodeProperties,\n\t};\n\n\t/**\n\t * Main execution method for the node\n\t */\n\tasync execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {\n\t\tthis.logger.debug('Executing Basic LLM Chain');\n\t\tconst items = this.getInputData();\n\t\tconst returnData: INodeExecutionData[] = [];\n\t\tconst { batchSize, delayBetweenBatches } = this.getNodeParameter('batching', 0, {\n\t\t\tbatchSize: 100,\n\t\t\tdelayBetweenBatches: 0,\n\t\t}) as {\n\t\t\tbatchSize: number;\n\t\t\tdelayBetweenBatches: number;\n\t\t};\n\t\t// Get output parser if configured\n\t\tconst outputParser = await getOptionalOutputParser(this);\n\n\t\t// Process items in batches\n\t\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\t\tconst batch = items.slice(i, i + batchSize);\n\t\t\tconst batchPromises = batch.map(async (_item, batchItemIndex) => {\n\t\t\t\tconst itemIndex = i + batchItemIndex;\n\n\t\t\t\t// Get the language model\n\t\t\t\tconst llm = (await this.getInputConnectionData(\n\t\t\t\t\tNodeConnectionTypes.AiLanguageModel,\n\t\t\t\t\t0,\n\t\t\t\t)) as BaseLanguageModel;\n\n\t\t\t\t// Get user prompt based on node version\n\t\t\t\tlet prompt: string;\n\n\t\t\t\tif (this.getNode().typeVersion <= 1.3) {\n\t\t\t\t\tprompt = this.getNodeParameter('prompt', itemIndex) as string;\n\t\t\t\t} else {\n\t\t\t\t\tprompt = getPromptInputByType({\n\t\t\t\t\t\tctx: this,\n\t\t\t\t\t\ti: itemIndex,\n\t\t\t\t\t\tinputKey: 'text',\n\t\t\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Validate prompt\n\t\t\t\tif (prompt === undefined) {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), \"The 'prompt' parameter is empty.\");\n\t\t\t\t}\n\n\t\t\t\t// Get chat messages if configured\n\t\t\t\tconst messages = this.getNodeParameter(\n\t\t\t\t\t'messages.messageValues',\n\t\t\t\t\titemIndex,\n\t\t\t\t\t[],\n\t\t\t\t) as MessageTemplate[];\n\n\t\t\t\treturn (await executeChain({\n\t\t\t\t\tcontext: this,\n\t\t\t\t\titemIndex,\n\t\t\t\t\tquery: prompt,\n\t\t\t\t\tllm,\n\t\t\t\t\toutputParser,\n\t\t\t\t\tmessages,\n\t\t\t\t})) as object[];\n\t\t\t});\n\n\t\t\tconst batchResults = await Promise.allSettled(batchPromises);\n\n\t\t\tbatchResults.forEach((promiseResult, batchItemIndex) => {\n\t\t\t\tconst itemIndex = i + batchItemIndex;\n\t\t\t\tif (promiseResult.status === 'rejected') {\n\t\t\t\t\tconst error = promiseResult.reason as Error;\n\t\t\t\t\t// Handle OpenAI specific rate limit errors\n\t\t\t\t\tif (error instanceof NodeApiError && isOpenAiError(error.cause)) {\n\t\t\t\t\t\tconst openAiErrorCode: string | undefined = (error.cause as any).error?.code;\n\t\t\t\t\t\tif (openAiErrorCode) {\n\t\t\t\t\t\t\tconst customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);\n\t\t\t\t\t\t\tif (customMessage) {\n\t\t\t\t\t\t\t\terror.message = customMessage;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\t\treturnData.push({\n\t\t\t\t\t\t\tjson: { error: error.message },\n\t\t\t\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t\t\t\t});\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error);\n\t\t\t\t}\n\n\t\t\t\tconst responses = promiseResult.value;\n\t\t\t\tresponses.forEach((response: object) => {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: formatResponse(response, this.getNode().typeVersion >= 1.6 || !!outputParser),\n\t\t\t\t\t});\n\t\t\t\t});\n\t\t\t});\n\n\t\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\t\tawait sleep(delayBetweenBatches);\n\t\t\t}\n\t\t}\n\n\t\treturn [returnData];\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOA,0BAA6E;AAE7E,qBAAqC;AACrC,6BAAwC;AAGxC,qBAMO;AACP,4BAGO;AAMA,MAAM,SAA8B;AAAA,EAApC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,KAAK,KAAK,KAAK,KAAK,KAAK,GAAG;AAAA,MACzC,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA,OAAO;AAAA,QACN,OAAO,CAAC,WAAW;AAAA,QACnB,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,UAAU,YAAY;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,QAAQ,yBAAyB,yBAAU,SAAS,CAAC;AAAA,MACrD,SAAS,CAAC,wCAAoB,IAAI;AAAA,MAClC,aAAa,CAAC;AAAA,MACd,YAAY;AAAA,IACb;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,UAAkE;AACvE,SAAK,OAAO,MAAM,2BAA2B;AAC7C,UAAM,QAAQ,KAAK,aAAa;AAChC,UAAM,aAAmC,CAAC;AAC1C,UAAM,EAAE,WAAW,oBAAoB,IAAI,KAAK,iBAAiB,YAAY,GAAG;AAAA,MAC/E,WAAW;AAAA,MACX,qBAAqB;AAAA,IACtB,CAAC;AAKD,UAAM,eAAe,UAAM,gDAAwB,IAAI;AAGvD,aAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,YAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAC1C,YAAM,gBAAgB,MAAM,IAAI,OAAO,OAAO,mBAAmB;AAChE,cAAM,YAAY,IAAI;AAGtB,cAAM,MAAO,MAAM,KAAK;AAAA,UACvB,wCAAoB;AAAA,UACpB;AAAA,QACD;AAGA,YAAI;AAEJ,YAAI,KAAK,QAAQ,EAAE,eAAe,KAAK;AACtC,mBAAS,KAAK,iBAAiB,UAAU,SAAS;AAAA,QACnD,OAAO;AACN,uBAAS,qCAAqB;AAAA,YAC7B,KAAK;AAAA,YACL,GAAG;AAAA,YACH,UAAU;AAAA,YACV,eAAe;AAAA,UAChB,CAAC;AAAA,QACF;AAGA,YAAI,WAAW,QAAW;AACzB,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,kCAAkC;AAAA,QAChF;AAGA,cAAM,WAAW,KAAK;AAAA,UACrB;AAAA,UACA;AAAA,UACA,CAAC;AAAA,QACF;AAEA,eAAQ,UAAM,6BAAa;AAAA,UAC1B,SAAS;AAAA,UACT;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA;AAAA,QACD,CAAC;AAAA,MACF,CAAC;AAED,YAAM,eAAe,MAAM,QAAQ,WAAW,aAAa;AAE3D,mBAAa,QAAQ,CAAC,eAAe,mBAAmB;AACvD,cAAM,YAAY,IAAI;AACtB,YAAI,cAAc,WAAW,YAAY;AACxC,gBAAM,QAAQ,cAAc;AAE5B,cAAI,iBAAiB,wCAAgB,qCAAc,MAAM,KAAK,GAAG;AAChE,kBAAM,kBAAuC,MAAM,MAAc,OAAO;AACxE,gBAAI,iBAAiB;AACpB,oBAAM,oBAAgB,sBAAAA,uBAA4B,eAAe;AACjE,kBAAI,eAAe;AAClB,sBAAM,UAAU;AAAA,cACjB;AAAA,YACD;AAAA,UACD;AAEA,cAAI,KAAK,eAAe,GAAG;AAC1B,uBAAW,KAAK;AAAA,cACf,MAAM,EAAE,OAAO,MAAM,QAAQ;AAAA,cAC7B,YAAY,EAAE,MAAM,UAAU;AAAA,YAC/B,CAAC;AACD;AAAA,UACD;AACA,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,KAAK;AAAA,QACnD;AAEA,cAAM,YAAY,cAAc;AAChC,kBAAU,QAAQ,CAAC,aAAqB;AACvC,qBAAW,KAAK;AAAA,YACf,UAAM,+BAAe,UAAU,KAAK,QAAQ,EAAE,eAAe,OAAO,CAAC,CAAC,YAAY;AAAA,UACnF,CAAC;AAAA,QACF,CAAC;AAAA,MACF,CAAC;AAED,UAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,kBAAM,2BAAM,mBAAmB;AAAA,MAChC;AAAA,IACD;AAEA,WAAO,CAAC,UAAU;AAAA,EACnB;AACD;","names":["getCustomOpenAiErrorMessage"]}
@@ -269,6 +269,30 @@ const nodeProperties = [
269
269
  hasOutputParser: [true]
270
270
  }
271
271
  }
272
+ },
273
+ {
274
+ displayName: "Batch Processing",
275
+ name: "batching",
276
+ type: "collection",
277
+ placeholder: "Add Batch Processing Option",
278
+ description: "Batch processing options for rate limiting",
279
+ default: {},
280
+ options: [
281
+ {
282
+ displayName: "Batch Size",
283
+ name: "batchSize",
284
+ default: 100,
285
+ type: "number",
286
+ description: "How many items to process in parallel. This is useful for rate limiting, but will impact the agents log output."
287
+ },
288
+ {
289
+ displayName: "Delay Between Batches",
290
+ name: "delayBetweenBatches",
291
+ default: 1e3,
292
+ type: "number",
293
+ description: "Delay in milliseconds between batches. This is useful for rate limiting."
294
+ }
295
+ ]
272
296
  }
273
297
  ];
274
298
  // Annotate the CommonJS export names for ESM import in node: