@n8n/n8n-nodes-langchain 1.93.0 → 1.94.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js +4 -21
  2. package/dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js.map +1 -1
  3. package/dist/nodes/agents/Agent/Agent.node.js +19 -394
  4. package/dist/nodes/agents/Agent/Agent.node.js.map +1 -1
  5. package/dist/nodes/agents/Agent/V1/AgentV1.node.js +427 -0
  6. package/dist/nodes/agents/Agent/V1/AgentV1.node.js.map +1 -0
  7. package/dist/nodes/agents/Agent/V2/AgentV2.node.js +162 -0
  8. package/dist/nodes/agents/Agent/V2/AgentV2.node.js.map +1 -0
  9. package/dist/nodes/agents/Agent/agents/ToolsAgent/{description.js → V1/description.js} +2 -34
  10. package/dist/nodes/agents/Agent/agents/ToolsAgent/V1/description.js.map +1 -0
  11. package/dist/nodes/agents/Agent/agents/ToolsAgent/V1/execute.js +119 -0
  12. package/dist/nodes/agents/Agent/agents/ToolsAgent/V1/execute.js.map +1 -0
  13. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/description.js +40 -0
  14. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/description.js.map +1 -0
  15. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js +139 -0
  16. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js.map +1 -0
  17. package/dist/nodes/agents/Agent/agents/ToolsAgent/{execute.js → common.js} +7 -97
  18. package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js.map +1 -0
  19. package/dist/nodes/agents/Agent/agents/ToolsAgent/options.js +62 -0
  20. package/dist/nodes/agents/Agent/agents/ToolsAgent/options.js.map +1 -0
  21. package/dist/nodes/chains/ChainLLM/ChainLlm.node.js +69 -51
  22. package/dist/nodes/chains/ChainLLM/ChainLlm.node.js.map +1 -1
  23. package/dist/nodes/chains/ChainLLM/methods/chainExecutor.js +8 -0
  24. package/dist/nodes/chains/ChainLLM/methods/chainExecutor.js.map +1 -1
  25. package/dist/nodes/chains/ChainLLM/methods/config.js +5 -0
  26. package/dist/nodes/chains/ChainLLM/methods/config.js.map +1 -1
  27. package/dist/nodes/chains/ChainLLM/methods/processItem.js +66 -0
  28. package/dist/nodes/chains/ChainLLM/methods/processItem.js.map +1 -0
  29. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js +71 -95
  30. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js.map +1 -1
  31. package/dist/nodes/chains/ChainRetrievalQA/constants.js +49 -0
  32. package/dist/nodes/chains/ChainRetrievalQA/constants.js.map +1 -0
  33. package/dist/nodes/chains/ChainRetrievalQA/processItem.js +91 -0
  34. package/dist/nodes/chains/ChainRetrievalQA/processItem.js.map +1 -0
  35. package/dist/nodes/chains/ChainSummarization/ChainSummarization.node.js +3 -2
  36. package/dist/nodes/chains/ChainSummarization/ChainSummarization.node.js.map +1 -1
  37. package/dist/nodes/chains/ChainSummarization/V2/ChainSummarizationV2.node.js +55 -78
  38. package/dist/nodes/chains/ChainSummarization/V2/ChainSummarizationV2.node.js.map +1 -1
  39. package/dist/nodes/chains/ChainSummarization/V2/processItem.js +95 -0
  40. package/dist/nodes/chains/ChainSummarization/V2/processItem.js.map +1 -0
  41. package/dist/nodes/chains/InformationExtractor/InformationExtractor.node.js +56 -33
  42. package/dist/nodes/chains/InformationExtractor/InformationExtractor.node.js.map +1 -1
  43. package/dist/nodes/chains/InformationExtractor/constants.js +31 -0
  44. package/dist/nodes/chains/InformationExtractor/constants.js.map +1 -0
  45. package/dist/nodes/chains/InformationExtractor/processItem.js +50 -0
  46. package/dist/nodes/chains/InformationExtractor/processItem.js.map +1 -0
  47. package/dist/nodes/chains/SentimentAnalysis/SentimentAnalysis.node.js +198 -71
  48. package/dist/nodes/chains/SentimentAnalysis/SentimentAnalysis.node.js.map +1 -1
  49. package/dist/nodes/chains/TextClassifier/TextClassifier.node.js +83 -54
  50. package/dist/nodes/chains/TextClassifier/TextClassifier.node.js.map +1 -1
  51. package/dist/nodes/chains/TextClassifier/constants.js +29 -0
  52. package/dist/nodes/chains/TextClassifier/constants.js.map +1 -0
  53. package/dist/nodes/chains/TextClassifier/processItem.js +65 -0
  54. package/dist/nodes/chains/TextClassifier/processItem.js.map +1 -0
  55. package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js +5 -1
  56. package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js.map +1 -1
  57. package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js +4 -1
  58. package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js.map +1 -1
  59. package/dist/nodes/llms/LMChatOpenAi/methods/loadModels.js +6 -1
  60. package/dist/nodes/llms/LMChatOpenAi/methods/loadModels.js.map +1 -1
  61. package/dist/nodes/llms/LMOpenAi/LmOpenAi.node.js +4 -1
  62. package/dist/nodes/llms/LMOpenAi/LmOpenAi.node.js.map +1 -1
  63. package/dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js +4 -0
  64. package/dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js.map +1 -1
  65. package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js +4 -0
  66. package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js.map +1 -1
  67. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/N8nOAuth2TokenCredential.js +16 -2
  68. package/dist/nodes/llms/LmChatAzureOpenAi/credentials/N8nOAuth2TokenCredential.js.map +1 -1
  69. package/dist/nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.js +3 -1
  70. package/dist/nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.js.map +1 -1
  71. package/dist/nodes/llms/LmChatGroq/LmChatGroq.node.js +2 -0
  72. package/dist/nodes/llms/LmChatGroq/LmChatGroq.node.js.map +1 -1
  73. package/dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js +3 -1
  74. package/dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js.map +1 -1
  75. package/dist/nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.js +3 -1
  76. package/dist/nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.js.map +1 -1
  77. package/dist/nodes/mcp/McpTrigger/McpTrigger.node.js +2 -2
  78. package/dist/nodes/mcp/McpTrigger/McpTrigger.node.js.map +1 -1
  79. package/dist/nodes/tools/ToolWorkflow/v2/ToolWorkflowV2.node.js +1 -0
  80. package/dist/nodes/tools/ToolWorkflow/v2/ToolWorkflowV2.node.js.map +1 -1
  81. package/dist/nodes/tools/ToolWorkflow/v2/utils/WorkflowToolService.js +2 -1
  82. package/dist/nodes/tools/ToolWorkflow/v2/utils/WorkflowToolService.js.map +1 -1
  83. package/dist/nodes/vendors/OpenAi/actions/image/generate.operation.js +6 -1
  84. package/dist/nodes/vendors/OpenAi/actions/image/generate.operation.js.map +1 -1
  85. package/dist/types/credentials.json +1 -1
  86. package/dist/types/nodes.json +11 -10
  87. package/dist/utils/httpProxyAgent.js +33 -0
  88. package/dist/utils/httpProxyAgent.js.map +1 -0
  89. package/dist/utils/sharedFields.js +29 -0
  90. package/dist/utils/sharedFields.js.map +1 -1
  91. package/package.json +7 -6
  92. package/dist/nodes/agents/Agent/agents/ToolsAgent/description.js.map +0 -1
  93. package/dist/nodes/agents/Agent/agents/ToolsAgent/execute.js.map +0 -1
@@ -22,9 +22,9 @@ __export(ChainLlm_node_exports, {
22
22
  });
23
23
  module.exports = __toCommonJS(ChainLlm_node_exports);
24
24
  var import_n8n_workflow = require("n8n-workflow");
25
- var import_helpers = require("../../../utils/helpers");
26
25
  var import_N8nOutputParser = require("../../../utils/output_parsers/N8nOutputParser");
27
26
  var import_methods = require("./methods");
27
+ var import_processItem = require("./methods/processItem");
28
28
  var import_error_handling = require("../../vendors/OpenAi/helpers/error-handling");
29
29
  class ChainLlm {
30
30
  constructor() {
@@ -34,7 +34,7 @@ class ChainLlm {
34
34
  icon: "fa:link",
35
35
  iconColor: "black",
36
36
  group: ["transform"],
37
- version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
37
+ version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
38
38
  description: "A simple chain to prompt a large language model",
39
39
  defaults: {
40
40
  name: "Basic LLM Chain",
@@ -67,61 +67,79 @@ class ChainLlm {
67
67
  this.logger.debug("Executing Basic LLM Chain");
68
68
  const items = this.getInputData();
69
69
  const returnData = [];
70
- for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
71
- try {
72
- const llm = await this.getInputConnectionData(
73
- import_n8n_workflow.NodeConnectionTypes.AiLanguageModel,
74
- 0
75
- );
76
- const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(this);
77
- let prompt;
78
- if (this.getNode().typeVersion <= 1.3) {
79
- prompt = this.getNodeParameter("prompt", itemIndex);
80
- } else {
81
- prompt = (0, import_helpers.getPromptInputByType)({
82
- ctx: this,
83
- i: itemIndex,
84
- inputKey: "text",
85
- promptTypeKey: "promptType"
86
- });
87
- }
88
- if (prompt === void 0) {
89
- throw new import_n8n_workflow.NodeOperationError(this.getNode(), "The 'prompt' parameter is empty.");
90
- }
91
- const messages = this.getNodeParameter(
92
- "messages.messageValues",
93
- itemIndex,
94
- []
95
- );
96
- const responses = await (0, import_methods.executeChain)({
97
- context: this,
98
- itemIndex,
99
- query: prompt,
100
- llm,
101
- outputParser,
102
- messages
70
+ const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(this);
71
+ const shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;
72
+ const batchSize = this.getNodeParameter("batching.batchSize", 0, 5);
73
+ const delayBetweenBatches = this.getNodeParameter(
74
+ "batching.delayBetweenBatches",
75
+ 0,
76
+ 0
77
+ );
78
+ if (this.getNode().typeVersion >= 1.7 && batchSize > 1) {
79
+ for (let i = 0; i < items.length; i += batchSize) {
80
+ const batch = items.slice(i, i + batchSize);
81
+ const batchPromises = batch.map(async (_item, batchItemIndex) => {
82
+ return await (0, import_processItem.processItem)(this, i + batchItemIndex);
103
83
  });
104
- const shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;
105
- responses.forEach((response) => {
106
- returnData.push({
107
- json: (0, import_methods.formatResponse)(response, shouldUnwrapObjects)
84
+ const batchResults = await Promise.allSettled(batchPromises);
85
+ batchResults.forEach((promiseResult, batchItemIndex) => {
86
+ const itemIndex = i + batchItemIndex;
87
+ if (promiseResult.status === "rejected") {
88
+ const error = promiseResult.reason;
89
+ if (error instanceof import_n8n_workflow.NodeApiError && (0, import_error_handling.isOpenAiError)(error.cause)) {
90
+ const openAiErrorCode = error.cause.error?.code;
91
+ if (openAiErrorCode) {
92
+ const customMessage = (0, import_error_handling.getCustomErrorMessage)(openAiErrorCode);
93
+ if (customMessage) {
94
+ error.message = customMessage;
95
+ }
96
+ }
97
+ }
98
+ if (this.continueOnFail()) {
99
+ returnData.push({
100
+ json: { error: error.message },
101
+ pairedItem: { item: itemIndex }
102
+ });
103
+ return;
104
+ }
105
+ throw new import_n8n_workflow.NodeOperationError(this.getNode(), error);
106
+ }
107
+ const responses = promiseResult.value;
108
+ responses.forEach((response) => {
109
+ returnData.push({
110
+ json: (0, import_methods.formatResponse)(response, shouldUnwrapObjects)
111
+ });
108
112
  });
109
113
  });
110
- } catch (error) {
111
- if (error instanceof import_n8n_workflow.NodeApiError && (0, import_error_handling.isOpenAiError)(error.cause)) {
112
- const openAiErrorCode = error.cause.error?.code;
113
- if (openAiErrorCode) {
114
- const customMessage = (0, import_error_handling.getCustomErrorMessage)(openAiErrorCode);
115
- if (customMessage) {
116
- error.message = customMessage;
114
+ if (i + batchSize < items.length && delayBetweenBatches > 0) {
115
+ await (0, import_n8n_workflow.sleep)(delayBetweenBatches);
116
+ }
117
+ }
118
+ } else {
119
+ for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
120
+ try {
121
+ const responses = await (0, import_processItem.processItem)(this, itemIndex);
122
+ responses.forEach((response) => {
123
+ returnData.push({
124
+ json: (0, import_methods.formatResponse)(response, shouldUnwrapObjects)
125
+ });
126
+ });
127
+ } catch (error) {
128
+ if (error instanceof import_n8n_workflow.NodeApiError && (0, import_error_handling.isOpenAiError)(error.cause)) {
129
+ const openAiErrorCode = error.cause.error?.code;
130
+ if (openAiErrorCode) {
131
+ const customMessage = (0, import_error_handling.getCustomErrorMessage)(openAiErrorCode);
132
+ if (customMessage) {
133
+ error.message = customMessage;
134
+ }
117
135
  }
118
136
  }
137
+ if (this.continueOnFail()) {
138
+ returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
139
+ continue;
140
+ }
141
+ throw error;
119
142
  }
120
- if (this.continueOnFail()) {
121
- returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
122
- continue;
123
- }
124
- throw error;
125
143
  }
126
144
  }
127
145
  return [returnData];
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/chains/ChainLLM/ChainLlm.node.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type {\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tINodeType,\n\tINodeTypeDescription,\n} from 'n8n-workflow';\nimport { NodeApiError, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n// Import from centralized module\nimport {\n\texecuteChain,\n\tformatResponse,\n\tgetInputs,\n\tnodeProperties,\n\ttype MessageTemplate,\n} from './methods';\nimport {\n\tgetCustomErrorMessage as getCustomOpenAiErrorMessage,\n\tisOpenAiError,\n} from '../../vendors/OpenAi/helpers/error-handling';\n\n/**\n * Basic LLM Chain Node Implementation\n * Allows connecting to language models with optional structured output parsing\n */\nexport class ChainLlm implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Basic LLM Chain',\n\t\tname: 'chainLlm',\n\t\ticon: 'fa:link',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],\n\t\tdescription: 'A simple chain to prompt a large language model',\n\t\tdefaults: {\n\t\t\tname: 'Basic LLM Chain',\n\t\t\tcolor: '#909298',\n\t\t},\n\t\tcodex: {\n\t\t\talias: ['LangChain'],\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Chains', 'Root Nodes'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tinputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,\n\t\toutputs: [NodeConnectionTypes.Main],\n\t\tcredentials: [],\n\t\tproperties: nodeProperties,\n\t};\n\n\t/**\n\t * Main execution method for the node\n\t */\n\tasync execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {\n\t\tthis.logger.debug('Executing Basic LLM Chain');\n\t\tconst items = this.getInputData();\n\t\tconst returnData: INodeExecutionData[] = [];\n\n\t\t// Process each input item\n\t\tfor (let itemIndex = 0; itemIndex < items.length; itemIndex++) {\n\t\t\ttry {\n\t\t\t\t// Get the language model\n\t\t\t\tconst llm = (await this.getInputConnectionData(\n\t\t\t\t\tNodeConnectionTypes.AiLanguageModel,\n\t\t\t\t\t0,\n\t\t\t\t)) as BaseLanguageModel;\n\n\t\t\t\t// Get output parser if configured\n\t\t\t\tconst outputParser = await getOptionalOutputParser(this);\n\n\t\t\t\t// Get user prompt based on node version\n\t\t\t\tlet prompt: string;\n\n\t\t\t\tif (this.getNode().typeVersion <= 1.3) {\n\t\t\t\t\tprompt = this.getNodeParameter('prompt', itemIndex) as string;\n\t\t\t\t} else {\n\t\t\t\t\tprompt = getPromptInputByType({\n\t\t\t\t\t\tctx: this,\n\t\t\t\t\t\ti: itemIndex,\n\t\t\t\t\t\tinputKey: 'text',\n\t\t\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Validate prompt\n\t\t\t\tif (prompt === undefined) {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), \"The 'prompt' parameter is empty.\");\n\t\t\t\t}\n\n\t\t\t\t// Get chat messages if configured\n\t\t\t\tconst messages = this.getNodeParameter(\n\t\t\t\t\t'messages.messageValues',\n\t\t\t\t\titemIndex,\n\t\t\t\t\t[],\n\t\t\t\t) as MessageTemplate[];\n\n\t\t\t\t// Execute the chain\n\t\t\t\tconst responses = await executeChain({\n\t\t\t\t\tcontext: this,\n\t\t\t\t\titemIndex,\n\t\t\t\t\tquery: prompt,\n\t\t\t\t\tllm,\n\t\t\t\t\toutputParser,\n\t\t\t\t\tmessages,\n\t\t\t\t});\n\n\t\t\t\t// If the node version is 1.6(and LLM is using `response_format: json_object`) or higher or an output parser is configured,\n\t\t\t\t// we unwrap the response and return the object directly as JSON\n\t\t\t\tconst shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;\n\t\t\t\t// Process each response and add to return data\n\t\t\t\tresponses.forEach((response) => {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: formatResponse(response, shouldUnwrapObjects),\n\t\t\t\t\t});\n\t\t\t\t});\n\t\t\t} catch (error) {\n\t\t\t\t// Handle OpenAI specific rate limit errors\n\t\t\t\tif (error instanceof NodeApiError && isOpenAiError(error.cause)) {\n\t\t\t\t\tconst openAiErrorCode: string | undefined = (error.cause as any).error?.code;\n\t\t\t\t\tif (openAiErrorCode) {\n\t\t\t\t\t\tconst customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);\n\t\t\t\t\t\tif (customMessage) {\n\t\t\t\t\t\t\terror.message = customMessage;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Continue on failure if configured\n\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\treturnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tthrow error;\n\t\t\t}\n\t\t}\n\n\t\treturn [returnData];\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOA,0BAAsE;AAEtE,qBAAqC;AACrC,6BAAwC;AAGxC,qBAMO;AACP,4BAGO;AAMA,MAAM,SAA8B;AAAA,EAApC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,KAAK,KAAK,KAAK,KAAK,KAAK,GAAG;AAAA,MACzC,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA,OAAO;AAAA,QACN,OAAO,CAAC,WAAW;AAAA,QACnB,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,UAAU,YAAY;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,QAAQ,yBAAyB,yBAAU,SAAS,CAAC;AAAA,MACrD,SAAS,CAAC,wCAAoB,IAAI;AAAA,MAClC,aAAa,CAAC;AAAA,MACd,YAAY;AAAA,IACb;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,UAAkE;AACvE,SAAK,OAAO,MAAM,2BAA2B;AAC7C,UAAM,QAAQ,KAAK,aAAa;AAChC,UAAM,aAAmC,CAAC;AAG1C,aAAS,YAAY,GAAG,YAAY,MAAM,QAAQ,aAAa;AAC9D,UAAI;AAEH,cAAM,MAAO,MAAM,KAAK;AAAA,UACvB,wCAAoB;AAAA,UACpB;AAAA,QACD;AAGA,cAAM,eAAe,UAAM,gDAAwB,IAAI;AAGvD,YAAI;AAEJ,YAAI,KAAK,QAAQ,EAAE,eAAe,KAAK;AACtC,mBAAS,KAAK,iBAAiB,UAAU,SAAS;AAAA,QACnD,OAAO;AACN,uBAAS,qCAAqB;AAAA,YAC7B,KAAK;AAAA,YACL,GAAG;AAAA,YACH,UAAU;AAAA,YACV,eAAe;AAAA,UAChB,CAAC;AAAA,QACF;AAGA,YAAI,WAAW,QAAW;AACzB,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,kCAAkC;AAAA,QAChF;AAGA,cAAM,WAAW,KAAK;AAAA,UACrB;AAAA,UACA;AAAA,UACA,CAAC;AAAA,QACF;AAGA,cAAM,YAAY,UAAM,6BAAa;AAAA,UACpC,SAAS;AAAA,UACT;AAAA,UACA,OAAO;AAAA,UACP;AAAA,UACA;AAAA,UACA;AAAA,QACD,CAAC;AAID,cAAM,sBAAsB,KAAK,QAAQ,EAAE,eAAe,OAAO,CAAC,CAAC;AAEnE,kBAAU,QAAQ,CAAC,aAAa;AAC/B,qBAAW,KAAK;AAAA,YACf,UAAM,+BAAe,UAAU,mBAAmB;AAAA,UACnD,CAAC;AAAA,QACF,CAAC;AAAA,MACF,SAAS,OAAO;AAEf,YAAI,iBAAiB,wCAAgB,qCAAc,MAAM,KAAK,GAAG;AAChE,gBAAM,kBAAuC,MAAM,MAAc,OAAO;AACxE,cAAI,iBAAiB;AACpB,kBAAM,oBAAgB,sBAAAA,uBAA4B,eAAe;AACjE,gBAAI,eAAe;AAClB,oBAAM,UAAU;AAAA,YACjB;AAAA,UACD;AAAA,QACD;AAGA,YAAI,KAAK,eAAe,GAAG;AAC1B,qBAAW,KAAK,EAAE,MAAM,EAAE,OAAO,MAAM,QAAQ,GAAG,YAAY,EAAE,MAAM,UAAU,EAAE,CAAC;AACnF;AAAA,QACD;AAEA,cAAM;AAAA,MACP;AAAA,IACD;AAEA,WAAO,CAAC,UAAU;AAAA,EACnB;AACD;","names":["getCustomOpenAiErrorMessage"]}
1
+ {"version":3,"sources":["../../../../nodes/chains/ChainLLM/ChainLlm.node.ts"],"sourcesContent":["import type {\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tINodeType,\n\tINodeTypeDescription,\n} from 'n8n-workflow';\nimport { NodeApiError, NodeConnectionTypes, NodeOperationError, sleep } from 'n8n-workflow';\n\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n// Import from centralized module\nimport { formatResponse, getInputs, nodeProperties } from './methods';\nimport { processItem } from './methods/processItem';\nimport {\n\tgetCustomErrorMessage as getCustomOpenAiErrorMessage,\n\tisOpenAiError,\n} from '../../vendors/OpenAi/helpers/error-handling';\n\n/**\n * Basic LLM Chain Node Implementation\n * Allows connecting to language models with optional structured output parsing\n */\nexport class ChainLlm implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Basic LLM Chain',\n\t\tname: 'chainLlm',\n\t\ticon: 'fa:link',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],\n\t\tdescription: 'A simple chain to prompt a large language model',\n\t\tdefaults: {\n\t\t\tname: 'Basic LLM Chain',\n\t\t\tcolor: '#909298',\n\t\t},\n\t\tcodex: {\n\t\t\talias: ['LangChain'],\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Chains', 'Root Nodes'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tinputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,\n\t\toutputs: [NodeConnectionTypes.Main],\n\t\tcredentials: [],\n\t\tproperties: nodeProperties,\n\t};\n\n\t/**\n\t * Main execution method for the node\n\t */\n\tasync execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {\n\t\tthis.logger.debug('Executing Basic LLM Chain');\n\t\tconst items = this.getInputData();\n\t\tconst returnData: INodeExecutionData[] = [];\n\t\tconst outputParser = await getOptionalOutputParser(this);\n\t\t// If the node version is 1.6(and LLM is using `response_format: json_object`) or higher or an output parser is configured,\n\t\t// we unwrap the response and return the object directly as JSON\n\t\tconst shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;\n\n\t\tconst batchSize = this.getNodeParameter('batching.batchSize', 0, 5) as number;\n\t\tconst delayBetweenBatches = this.getNodeParameter(\n\t\t\t'batching.delayBetweenBatches',\n\t\t\t0,\n\t\t\t0,\n\t\t) as number;\n\n\t\tif (this.getNode().typeVersion >= 1.7 && batchSize > 1) {\n\t\t\t// Process items in batches\n\t\t\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\t\t\tconst batch = items.slice(i, i + batchSize);\n\t\t\t\tconst batchPromises = batch.map(async (_item, batchItemIndex) => {\n\t\t\t\t\treturn await processItem(this, i + batchItemIndex);\n\t\t\t\t});\n\n\t\t\t\tconst batchResults = await Promise.allSettled(batchPromises);\n\n\t\t\t\tbatchResults.forEach((promiseResult, batchItemIndex) => {\n\t\t\t\t\tconst itemIndex = i + batchItemIndex;\n\t\t\t\t\tif (promiseResult.status === 'rejected') {\n\t\t\t\t\t\tconst error = promiseResult.reason as Error;\n\t\t\t\t\t\t// Handle OpenAI specific rate limit errors\n\t\t\t\t\t\tif (error instanceof NodeApiError && isOpenAiError(error.cause)) {\n\t\t\t\t\t\t\tconst openAiErrorCode: string | undefined = (error.cause as any).error?.code;\n\t\t\t\t\t\t\tif (openAiErrorCode) {\n\t\t\t\t\t\t\t\tconst customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);\n\t\t\t\t\t\t\t\tif (customMessage) {\n\t\t\t\t\t\t\t\t\terror.message = customMessage;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\t\t\treturnData.push({\n\t\t\t\t\t\t\t\tjson: { error: error.message },\n\t\t\t\t\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error);\n\t\t\t\t\t}\n\n\t\t\t\t\tconst responses = promiseResult.value;\n\t\t\t\t\tresponses.forEach((response: unknown) => {\n\t\t\t\t\t\treturnData.push({\n\t\t\t\t\t\t\tjson: formatResponse(response, shouldUnwrapObjects),\n\t\t\t\t\t\t});\n\t\t\t\t\t});\n\t\t\t\t});\n\n\t\t\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\t\t\tawait sleep(delayBetweenBatches);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Process each input item\n\t\t\tfor (let itemIndex = 0; itemIndex < items.length; itemIndex++) {\n\t\t\t\ttry {\n\t\t\t\t\tconst responses = await processItem(this, itemIndex);\n\n\t\t\t\t\t// Process each response and add to return data\n\t\t\t\t\tresponses.forEach((response) => {\n\t\t\t\t\t\treturnData.push({\n\t\t\t\t\t\t\tjson: formatResponse(response, shouldUnwrapObjects),\n\t\t\t\t\t\t});\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// Handle OpenAI specific rate limit errors\n\t\t\t\t\tif (error instanceof NodeApiError && isOpenAiError(error.cause)) {\n\t\t\t\t\t\tconst openAiErrorCode: string | undefined = (error.cause as any).error?.code;\n\t\t\t\t\t\tif (openAiErrorCode) {\n\t\t\t\t\t\t\tconst customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);\n\t\t\t\t\t\t\tif (customMessage) {\n\t\t\t\t\t\t\t\terror.message = customMessage;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// Continue on failure if configured\n\t\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\t\treturnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\n\t\t\t\t\tthrow error;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn [returnData];\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAMA,0BAA6E;AAE7E,6BAAwC;AAGxC,qBAA0D;AAC1D,yBAA4B;AAC5B,4BAGO;AAMA,MAAM,SAA8B;AAAA,EAApC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,KAAK,KAAK,KAAK,KAAK,KAAK,KAAK,GAAG;AAAA,MAC9C,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA,OAAO;AAAA,QACN,OAAO,CAAC,WAAW;AAAA,QACnB,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,UAAU,YAAY;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,QAAQ,yBAAyB,yBAAU,SAAS,CAAC;AAAA,MACrD,SAAS,CAAC,wCAAoB,IAAI;AAAA,MAClC,aAAa,CAAC;AAAA,MACd,YAAY;AAAA,IACb;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,UAAkE;AACvE,SAAK,OAAO,MAAM,2BAA2B;AAC7C,UAAM,QAAQ,KAAK,aAAa;AAChC,UAAM,aAAmC,CAAC;AAC1C,UAAM,eAAe,UAAM,gDAAwB,IAAI;AAGvD,UAAM,sBAAsB,KAAK,QAAQ,EAAE,eAAe,OAAO,CAAC,CAAC;AAEnE,UAAM,YAAY,KAAK,iBAAiB,sBAAsB,GAAG,CAAC;AAClE,UAAM,sBAAsB,KAAK;AAAA,MAChC;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,QAAI,KAAK,QAAQ,EAAE,eAAe,OAAO,YAAY,GAAG;AAEvD,eAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,cAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAC1C,cAAM,gBAAgB,MAAM,IAAI,OAAO,OAAO,mBAAmB;AAChE,iBAAO,UAAM,gCAAY,MAAM,IAAI,cAAc;AAAA,QAClD,CAAC;AAED,cAAM,eAAe,MAAM,QAAQ,WAAW,aAAa;AAE3D,qBAAa,QAAQ,CAAC,eAAe,mBAAmB;AACvD,gBAAM,YAAY,IAAI;AACtB,cAAI,cAAc,WAAW,YAAY;AACxC,kBAAM,QAAQ,cAAc;AAE5B,gBAAI,iBAAiB,wCAAgB,qCAAc,MAAM,KAAK,GAAG;AAChE,oBAAM,kBAAuC,MAAM,MAAc,OAAO;AACxE,kBAAI,iBAAiB;AACpB,sBAAM,oBAAgB,sBAAAA,uBAA4B,eAAe;AACjE,oBAAI,eAAe;AAClB,wBAAM,UAAU;AAAA,gBACjB;AAAA,cACD;AAAA,YACD;AAEA,gBAAI,KAAK,eAAe,GAAG;AAC1B,yBAAW,KAAK;AAAA,gBACf,MAAM,EAAE,OAAO,MAAM,QAAQ;AAAA,gBAC7B,YAAY,EAAE,MAAM,UAAU;AAAA,cAC/B,CAAC;AACD;AAAA,YACD;AACA,kBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,KAAK;AAAA,UACnD;AAEA,gBAAM,YAAY,cAAc;AAChC,oBAAU,QAAQ,CAAC,aAAsB;AACxC,uBAAW,KAAK;AAAA,cACf,UAAM,+BAAe,UAAU,mBAAmB;AAAA,YACnD,CAAC;AAAA,UACF,CAAC;AAAA,QACF,CAAC;AAED,YAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,oBAAM,2BAAM,mBAAmB;AAAA,QAChC;AAAA,MACD;AAAA,IACD,OAAO;AAEN,eAAS,YAAY,GAAG,YAAY,MAAM,QAAQ,aAAa;AAC9D,YAAI;AACH,gBAAM,YAAY,UAAM,gCAAY,MAAM,SAAS;AAGnD,oBAAU,QAAQ,CAAC,aAAa;AAC/B,uBAAW,KAAK;AAAA,cACf,UAAM,+BAAe,UAAU,mBAAmB;AAAA,YACnD,CAAC;AAAA,UACF,CAAC;AAAA,QACF,SAAS,OAAO;AAEf,cAAI,iBAAiB,wCAAgB,qCAAc,MAAM,KAAK,GAAG;AAChE,kBAAM,kBAAuC,MAAM,MAAc,OAAO;AACxE,gBAAI,iBAAiB;AACpB,oBAAM,oBAAgB,sBAAAA,uBAA4B,eAAe;AACjE,kBAAI,eAAe;AAClB,sBAAM,UAAU;AAAA,cACjB;AAAA,YACD;AAAA,UACD;AAGA,cAAI,KAAK,eAAe,GAAG;AAC1B,uBAAW,KAAK,EAAE,MAAM,EAAE,OAAO,MAAM,QAAQ,GAAG,YAAY,EAAE,MAAM,UAAU,EAAE,CAAC;AACnF;AAAA,UACD;AAEA,gBAAM;AAAA,QACP;AAAA,MACD;AAAA,IACD;AAEA,WAAO,CAAC,UAAU;AAAA,EACnB;AACD;","names":["getCustomOpenAiErrorMessage"]}
@@ -21,6 +21,7 @@ __export(chainExecutor_exports, {
21
21
  NaiveJsonOutputParser: () => NaiveJsonOutputParser,
22
22
  executeChain: () => executeChain,
23
23
  getOutputParserForLLM: () => getOutputParserForLLM,
24
+ isModelInThinkingMode: () => isModelInThinkingMode,
24
25
  isModelWithFormat: () => isModelWithFormat,
25
26
  isModelWithResponseFormat: () => isModelWithResponseFormat
26
27
  });
@@ -41,6 +42,9 @@ class NaiveJsonOutputParser extends import_output_parsers.JsonOutputParser {
41
42
  function isModelWithResponseFormat(llm) {
42
43
  return "modelKwargs" in llm && !!llm.modelKwargs && typeof llm.modelKwargs === "object" && "response_format" in llm.modelKwargs;
43
44
  }
45
+ function isModelInThinkingMode(llm) {
46
+ return "lc_kwargs" in llm && "invocationKwargs" in llm.lc_kwargs && typeof llm.lc_kwargs.invocationKwargs === "object" && "thinking" in llm.lc_kwargs.invocationKwargs && llm.lc_kwargs.invocationKwargs.thinking.type === "enabled";
47
+ }
44
48
  function isModelWithFormat(llm) {
45
49
  return "format" in llm && typeof llm.format !== "undefined";
46
50
  }
@@ -51,6 +55,9 @@ function getOutputParserForLLM(llm) {
51
55
  if (isModelWithFormat(llm) && llm.format === "json") {
52
56
  return new NaiveJsonOutputParser();
53
57
  }
58
+ if (isModelInThinkingMode(llm)) {
59
+ return new NaiveJsonOutputParser();
60
+ }
54
61
  return new import_output_parsers.StringOutputParser();
55
62
  }
56
63
  async function executeSimpleChain({
@@ -108,6 +115,7 @@ async function executeChain({
108
115
  NaiveJsonOutputParser,
109
116
  executeChain,
110
117
  getOutputParserForLLM,
118
+ isModelInThinkingMode,
111
119
  isModelWithFormat,
112
120
  isModelWithResponseFormat
113
121
  });
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/chainExecutor.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type { BaseLLMOutputParser } from '@langchain/core/output_parsers';\nimport { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers';\nimport type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';\nimport type { IExecuteFunctions } from 'n8n-workflow';\n\nimport { getTracingConfig } from '@utils/tracing';\n\nimport { createPromptTemplate } from './promptUtils';\nimport type { ChainExecutionParams } from './types';\n\nexport class NaiveJsonOutputParser<\n\tT extends Record<string, any> = Record<string, any>,\n> extends JsonOutputParser<T> {\n\tasync parse(text: string): Promise<T> {\n\t\t// First try direct JSON parsing\n\t\ttry {\n\t\t\tconst directParsed = JSON.parse(text);\n\t\t\treturn directParsed as T;\n\t\t} catch (e) {\n\t\t\t// If fails, fall back to JsonOutputParser parser\n\t\t\treturn await super.parse(text);\n\t\t}\n\t}\n}\n\n/**\n * Type guard to check if the LLM has a modelKwargs property(OpenAI)\n */\nexport function isModelWithResponseFormat(\n\tllm: BaseLanguageModel,\n): llm is BaseLanguageModel & { modelKwargs: { response_format: { type: string } } } {\n\treturn (\n\t\t'modelKwargs' in llm &&\n\t\t!!llm.modelKwargs &&\n\t\ttypeof llm.modelKwargs === 'object' &&\n\t\t'response_format' in llm.modelKwargs\n\t);\n}\n\n/**\n * Type guard to check if the LLM has a format property(Ollama)\n */\nexport function isModelWithFormat(\n\tllm: BaseLanguageModel,\n): llm is BaseLanguageModel & { format: string } {\n\treturn 'format' in llm && typeof llm.format !== 'undefined';\n}\n\n/**\n * Determines if an LLM is configured to output JSON and returns the appropriate output parser\n */\nexport function getOutputParserForLLM(\n\tllm: BaseLanguageModel,\n): BaseLLMOutputParser<string | Record<string, unknown>> {\n\tif (isModelWithResponseFormat(llm) && llm.modelKwargs?.response_format?.type === 'json_object') {\n\t\treturn new NaiveJsonOutputParser();\n\t}\n\n\tif (isModelWithFormat(llm) && llm.format === 'json') {\n\t\treturn new NaiveJsonOutputParser();\n\t}\n\n\treturn new StringOutputParser();\n}\n\n/**\n * Creates a simple chain for LLMs without output parsers\n */\nasync function executeSimpleChain({\n\tcontext,\n\tllm,\n\tquery,\n\tprompt,\n}: {\n\tcontext: IExecuteFunctions;\n\tllm: BaseLanguageModel;\n\tquery: string;\n\tprompt: ChatPromptTemplate | PromptTemplate;\n}) {\n\tconst outputParser = getOutputParserForLLM(llm);\n\n\tconst chain = prompt.pipe(llm).pipe(outputParser).withConfig(getTracingConfig(context));\n\n\t// Execute the chain\n\tconst response = await chain.invoke({\n\t\tquery,\n\t\tsignal: context.getExecutionCancelSignal(),\n\t});\n\n\t// Ensure response is always returned as an array\n\treturn [response];\n}\n\n/**\n * Creates and executes an LLM chain with the given prompt and optional output parsers\n */\nexport async function executeChain({\n\tcontext,\n\titemIndex,\n\tquery,\n\tllm,\n\toutputParser,\n\tmessages,\n}: ChainExecutionParams): Promise<unknown[]> {\n\t// If no output parsers provided, use a simple chain with basic prompt template\n\tif (!outputParser) {\n\t\tconst promptTemplate = await createPromptTemplate({\n\t\t\tcontext,\n\t\t\titemIndex,\n\t\t\tllm,\n\t\t\tmessages,\n\t\t\tquery,\n\t\t});\n\n\t\treturn await executeSimpleChain({\n\t\t\tcontext,\n\t\t\tllm,\n\t\t\tquery,\n\t\t\tprompt: promptTemplate,\n\t\t});\n\t}\n\n\tconst formatInstructions = outputParser.getFormatInstructions();\n\n\t// Create a prompt template with format instructions\n\tconst promptWithInstructions = await createPromptTemplate({\n\t\tcontext,\n\t\titemIndex,\n\t\tllm,\n\t\tmessages,\n\t\tformatInstructions,\n\t\tquery,\n\t});\n\n\tconst chain = promptWithInstructions\n\t\t.pipe(llm)\n\t\t.pipe(outputParser)\n\t\t.withConfig(getTracingConfig(context));\n\tconst response = await chain.invoke({ query }, { signal: context.getExecutionCancelSignal() });\n\n\t// Ensure response is always returned as an array\n\t// eslint-disable-next-line @typescript-eslint/no-unsafe-return\n\treturn Array.isArray(response) ? response : [response];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,4BAAqD;AAIrD,qBAAiC;AAEjC,yBAAqC;AAG9B,MAAM,8BAEH,uCAAoB;AAAA,EAC7B,MAAM,MAAM,MAA0B;AAErC,QAAI;AACH,YAAM,eAAe,KAAK,MAAM,IAAI;AACpC,aAAO;AAAA,IACR,SAAS,GAAG;AAEX,aAAO,MAAM,MAAM,MAAM,IAAI;AAAA,IAC9B;AAAA,EACD;AACD;AAKO,SAAS,0BACf,KACoF;AACpF,SACC,iBAAiB,OACjB,CAAC,CAAC,IAAI,eACN,OAAO,IAAI,gBAAgB,YAC3B,qBAAqB,IAAI;AAE3B;AAKO,SAAS,kBACf,KACgD;AAChD,SAAO,YAAY,OAAO,OAAO,IAAI,WAAW;AACjD;AAKO,SAAS,sBACf,KACwD;AACxD,MAAI,0BAA0B,GAAG,KAAK,IAAI,aAAa,iBAAiB,SAAS,eAAe;AAC/F,WAAO,IAAI,sBAAsB;AAAA,EAClC;AAEA,MAAI,kBAAkB,GAAG,KAAK,IAAI,WAAW,QAAQ;AACpD,WAAO,IAAI,sBAAsB;AAAA,EAClC;AAEA,SAAO,IAAI,yCAAmB;AAC/B;AAKA,eAAe,mBAAmB;AAAA,EACjC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACD,GAKG;AACF,QAAM,eAAe,sBAAsB,GAAG;AAE9C,QAAM,QAAQ,OAAO,KAAK,GAAG,EAAE,KAAK,YAAY,EAAE,eAAW,iCAAiB,OAAO,CAAC;AAGtF,QAAM,WAAW,MAAM,MAAM,OAAO;AAAA,IACnC;AAAA,IACA,QAAQ,QAAQ,yBAAyB;AAAA,EAC1C,CAAC;AAGD,SAAO,CAAC,QAAQ;AACjB;AAKA,eAAsB,aAAa;AAAA,EAClC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACD,GAA6C;AAE5C,MAAI,CAAC,cAAc;AAClB,UAAM,iBAAiB,UAAM,yCAAqB;AAAA,MACjD;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACD,CAAC;AAED,WAAO,MAAM,mBAAmB;AAAA,MAC/B;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,IACT,CAAC;AAAA,EACF;AAEA,QAAM,qBAAqB,aAAa,sBAAsB;AAG9D,QAAM,yBAAyB,UAAM,yCAAqB;AAAA,IACzD;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACD,CAAC;AAED,QAAM,QAAQ,uBACZ,KAAK,GAAG,EACR,KAAK,YAAY,EACjB,eAAW,iCAAiB,OAAO,CAAC;AACtC,QAAM,WAAW,MAAM,MAAM,OAAO,EAAE,MAAM,GAAG,EAAE,QAAQ,QAAQ,yBAAyB,EAAE,CAAC;AAI7F,SAAO,MAAM,QAAQ,QAAQ,IAAI,WAAW,CAAC,QAAQ;AACtD;","names":[]}
1
+ {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/chainExecutor.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type { BaseLLMOutputParser } from '@langchain/core/output_parsers';\nimport { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers';\nimport type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';\nimport type { IExecuteFunctions } from 'n8n-workflow';\n\nimport { getTracingConfig } from '@utils/tracing';\n\nimport { createPromptTemplate } from './promptUtils';\nimport type { ChainExecutionParams } from './types';\n\nexport class NaiveJsonOutputParser<\n\tT extends Record<string, any> = Record<string, any>,\n> extends JsonOutputParser<T> {\n\tasync parse(text: string): Promise<T> {\n\t\t// First try direct JSON parsing\n\t\ttry {\n\t\t\tconst directParsed = JSON.parse(text);\n\t\t\treturn directParsed as T;\n\t\t} catch (e) {\n\t\t\t// If fails, fall back to JsonOutputParser parser\n\t\t\treturn await super.parse(text);\n\t\t}\n\t}\n}\n\n/**\n * Type guard to check if the LLM has a modelKwargs property(OpenAI)\n */\nexport function isModelWithResponseFormat(\n\tllm: BaseLanguageModel,\n): llm is BaseLanguageModel & { modelKwargs: { response_format: { type: string } } } {\n\treturn (\n\t\t'modelKwargs' in llm &&\n\t\t!!llm.modelKwargs &&\n\t\ttypeof llm.modelKwargs === 'object' &&\n\t\t'response_format' in llm.modelKwargs\n\t);\n}\n\nexport function isModelInThinkingMode(\n\tllm: BaseLanguageModel,\n): llm is BaseLanguageModel & { lc_kwargs: { invocationKwargs: { thinking: { type: string } } } } {\n\treturn (\n\t\t'lc_kwargs' in llm &&\n\t\t'invocationKwargs' in llm.lc_kwargs &&\n\t\ttypeof llm.lc_kwargs.invocationKwargs === 'object' &&\n\t\t'thinking' in llm.lc_kwargs.invocationKwargs &&\n\t\tllm.lc_kwargs.invocationKwargs.thinking.type === 'enabled'\n\t);\n}\n\n/**\n * Type guard to check if the LLM has a format property(Ollama)\n */\nexport function isModelWithFormat(\n\tllm: BaseLanguageModel,\n): llm is BaseLanguageModel & { format: string } {\n\treturn 'format' in llm && typeof llm.format !== 'undefined';\n}\n\n/**\n * Determines if an LLM is configured to output JSON and returns the appropriate output parser\n */\nexport function getOutputParserForLLM(\n\tllm: BaseLanguageModel,\n): BaseLLMOutputParser<string | Record<string, unknown>> {\n\tif (isModelWithResponseFormat(llm) && llm.modelKwargs?.response_format?.type === 'json_object') {\n\t\treturn new NaiveJsonOutputParser();\n\t}\n\n\tif (isModelWithFormat(llm) && llm.format === 'json') {\n\t\treturn new NaiveJsonOutputParser();\n\t}\n\n\tif (isModelInThinkingMode(llm)) {\n\t\treturn new NaiveJsonOutputParser();\n\t}\n\n\treturn new StringOutputParser();\n}\n\n/**\n * Creates a simple chain for LLMs without output parsers\n */\nasync function executeSimpleChain({\n\tcontext,\n\tllm,\n\tquery,\n\tprompt,\n}: {\n\tcontext: IExecuteFunctions;\n\tllm: BaseLanguageModel;\n\tquery: string;\n\tprompt: ChatPromptTemplate | PromptTemplate;\n}) {\n\tconst outputParser = getOutputParserForLLM(llm);\n\n\tconst chain = prompt.pipe(llm).pipe(outputParser).withConfig(getTracingConfig(context));\n\n\t// Execute the chain\n\tconst response = await chain.invoke({\n\t\tquery,\n\t\tsignal: context.getExecutionCancelSignal(),\n\t});\n\n\t// Ensure response is always returned as an array\n\treturn [response];\n}\n\n/**\n * Creates and executes an LLM chain with the given prompt and optional output parsers\n */\nexport async function executeChain({\n\tcontext,\n\titemIndex,\n\tquery,\n\tllm,\n\toutputParser,\n\tmessages,\n}: ChainExecutionParams): Promise<unknown[]> {\n\t// If no output parsers provided, use a simple chain with basic prompt template\n\tif (!outputParser) {\n\t\tconst promptTemplate = await createPromptTemplate({\n\t\t\tcontext,\n\t\t\titemIndex,\n\t\t\tllm,\n\t\t\tmessages,\n\t\t\tquery,\n\t\t});\n\n\t\treturn await executeSimpleChain({\n\t\t\tcontext,\n\t\t\tllm,\n\t\t\tquery,\n\t\t\tprompt: promptTemplate,\n\t\t});\n\t}\n\n\tconst formatInstructions = outputParser.getFormatInstructions();\n\n\t// Create a prompt template with format instructions\n\tconst promptWithInstructions = await createPromptTemplate({\n\t\tcontext,\n\t\titemIndex,\n\t\tllm,\n\t\tmessages,\n\t\tformatInstructions,\n\t\tquery,\n\t});\n\n\tconst chain = promptWithInstructions\n\t\t.pipe(llm)\n\t\t.pipe(outputParser)\n\t\t.withConfig(getTracingConfig(context));\n\tconst response = await chain.invoke({ query }, { signal: context.getExecutionCancelSignal() });\n\n\t// Ensure response is always returned as an array\n\t// eslint-disable-next-line @typescript-eslint/no-unsafe-return\n\treturn Array.isArray(response) ? response : [response];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,4BAAqD;AAIrD,qBAAiC;AAEjC,yBAAqC;AAG9B,MAAM,8BAEH,uCAAoB;AAAA,EAC7B,MAAM,MAAM,MAA0B;AAErC,QAAI;AACH,YAAM,eAAe,KAAK,MAAM,IAAI;AACpC,aAAO;AAAA,IACR,SAAS,GAAG;AAEX,aAAO,MAAM,MAAM,MAAM,IAAI;AAAA,IAC9B;AAAA,EACD;AACD;AAKO,SAAS,0BACf,KACoF;AACpF,SACC,iBAAiB,OACjB,CAAC,CAAC,IAAI,eACN,OAAO,IAAI,gBAAgB,YAC3B,qBAAqB,IAAI;AAE3B;AAEO,SAAS,sBACf,KACiG;AACjG,SACC,eAAe,OACf,sBAAsB,IAAI,aAC1B,OAAO,IAAI,UAAU,qBAAqB,YAC1C,cAAc,IAAI,UAAU,oBAC5B,IAAI,UAAU,iBAAiB,SAAS,SAAS;AAEnD;AAKO,SAAS,kBACf,KACgD;AAChD,SAAO,YAAY,OAAO,OAAO,IAAI,WAAW;AACjD;AAKO,SAAS,sBACf,KACwD;AACxD,MAAI,0BAA0B,GAAG,KAAK,IAAI,aAAa,iBAAiB,SAAS,eAAe;AAC/F,WAAO,IAAI,sBAAsB;AAAA,EAClC;AAEA,MAAI,kBAAkB,GAAG,KAAK,IAAI,WAAW,QAAQ;AACpD,WAAO,IAAI,sBAAsB;AAAA,EAClC;AAEA,MAAI,sBAAsB,GAAG,GAAG;AAC/B,WAAO,IAAI,sBAAsB;AAAA,EAClC;AAEA,SAAO,IAAI,yCAAmB;AAC/B;AAKA,eAAe,mBAAmB;AAAA,EACjC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACD,GAKG;AACF,QAAM,eAAe,sBAAsB,GAAG;AAE9C,QAAM,QAAQ,OAAO,KAAK,GAAG,EAAE,KAAK,YAAY,EAAE,eAAW,iCAAiB,OAAO,CAAC;AAGtF,QAAM,WAAW,MAAM,MAAM,OAAO;AAAA,IACnC;AAAA,IACA,QAAQ,QAAQ,yBAAyB;AAAA,EAC1C,CAAC;AAGD,SAAO,CAAC,QAAQ;AACjB;AAKA,eAAsB,aAAa;AAAA,EAClC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACD,GAA6C;AAE5C,MAAI,CAAC,cAAc;AAClB,UAAM,iBAAiB,UAAM,yCAAqB;AAAA,MACjD;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACD,CAAC;AAED,WAAO,MAAM,mBAAmB;AAAA,MAC/B;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,IACT,CAAC;AAAA,EACF;AAEA,QAAM,qBAAqB,aAAa,sBAAsB;AAG9D,QAAM,yBAAyB,UAAM,yCAAqB;AAAA,IACzD;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACD,CAAC;AAED,QAAM,QAAQ,uBACZ,KAAK,GAAG,EACR,KAAK,YAAY,EACjB,eAAW,iCAAiB,OAAO,CAAC;AACtC,QAAM,WAAW,MAAM,MAAM,OAAO,EAAE,MAAM,GAAG,EAAE,QAAQ,QAAQ,yBAAyB,EAAE,CAAC;AAI7F,SAAO,MAAM,QAAQ,QAAQ,IAAI,WAAW,CAAC,QAAQ;AACtD;","names":[]}
@@ -259,6 +259,11 @@ const nodeProperties = [
259
259
  }
260
260
  ]
261
261
  },
262
+ (0, import_sharedFields.getBatchingOptionFields)({
263
+ show: {
264
+ "@version": [{ _cnd: { gte: 1.7 } }]
265
+ }
266
+ }),
262
267
  {
263
268
  displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${import_n8n_workflow.NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
264
269
  name: "notice",
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/config.ts"],"sourcesContent":["import {\n\tAIMessagePromptTemplate,\n\tHumanMessagePromptTemplate,\n\tSystemMessagePromptTemplate,\n} from '@langchain/core/prompts';\nimport type { IDataObject, INodeInputConfiguration, INodeProperties } from 'n8n-workflow';\nimport { NodeConnectionTypes } from 'n8n-workflow';\n\nimport { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';\nimport { getTemplateNoticeField } from '@utils/sharedFields';\n\n/**\n * Dynamic input configuration generation based on node parameters\n */\nexport function getInputs(parameters: IDataObject) {\n\tconst inputs: INodeInputConfiguration[] = [\n\t\t{ displayName: '', type: 'main' },\n\t\t{\n\t\t\tdisplayName: 'Model',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_languageModel',\n\t\t\trequired: true,\n\t\t},\n\t];\n\n\t// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we\n\t// always add the output parser input\n\tconst hasOutputParser = parameters?.hasOutputParser;\n\tif (hasOutputParser === undefined || hasOutputParser === true) {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Output Parser',\n\t\t\ttype: 'ai_outputParser',\n\t\t\tmaxConnections: 1,\n\t\t\trequired: false,\n\t\t});\n\t}\n\n\treturn inputs;\n}\n\n/**\n * Node properties configuration\n */\nexport const nodeProperties: INodeProperties[] = [\n\tgetTemplateNoticeField(1978),\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chat_input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.1, 1.2],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chatInput }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...promptTypeOptions,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.2, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...textFromPreviousNode,\n\t\tdisplayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },\n\t},\n\t{\n\t\tdisplayName: 'Prompt (User Message)',\n\t\tname: 'text',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '',\n\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\ttypeOptions: {\n\t\t\trows: 2,\n\t\t},\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tpromptType: ['define'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Require Specific Output Format',\n\t\tname: 'hasOutputParser',\n\t\ttype: 'boolean',\n\t\tdefault: false,\n\t\tnoDataExpression: true,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Chat Messages (if Using a Chat Model)',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tdefault: {},\n\t\tplaceholder: 'Add prompt',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tname: 'messageValues',\n\t\t\t\tdisplayName: 'Prompt',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Type Name or ID',\n\t\t\t\t\t\tname: 'type',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'AI',\n\t\t\t\t\t\t\t\tvalue: AIMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'System',\n\t\t\t\t\t\t\t\tvalue: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: HumanMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message Type',\n\t\t\t\t\t\tname: 'messageType',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Simple text message',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (Binary)',\n\t\t\t\t\t\t\t\tvalue: 'imageBinary',\n\t\t\t\t\t\t\t\tdescription: 'Process the binary input from the previous node',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (URL)',\n\t\t\t\t\t\t\t\tvalue: 'imageUrl',\n\t\t\t\t\t\t\t\tdescription: 'Process the image from the specified URL',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Data Field Name',\n\t\t\t\t\t\tname: 'binaryImageDataKey',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: 'data',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"The name of the field in the chain's input that contains the binary image file to be processed\",\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image URL',\n\t\t\t\t\t\tname: 'imageUrl',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription: 'URL to the image to be processed',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Details',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Control how the model processes the image and generates its textual understanding',\n\t\t\t\t\t\tname: 'imageDetail',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Auto',\n\t\t\t\t\t\t\t\tvalue: 'auto',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'auto',\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message',\n\t\t\t\t\t\tname: 'message',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\thide: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\t{\n\t\tdisplayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,\n\t\tname: 'notice',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\thasOutputParser: [true],\n\t\t\t},\n\t\t},\n\t},\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,qBAIO;AAEP,0BAAoC;AAEpC,0BAAwD;AACxD,0BAAuC;AAKhC,SAAS,UAAU,YAAyB;AAClD,QAAM,SAAoC;AAAA,IACzC,EAAE,aAAa,IAAI,MAAM,OAAO;AAAA,IAChC;AAAA,MACC,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AAIA,QAAM,kBAAkB,YAAY;AACpC,MAAI,oBAAoB,UAAa,oBAAoB,MAAM;AAC9D,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,MAAM;AAAA,MACN,gBAAgB;AAAA,MAChB,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAEA,SAAO;AACR;AAKO,MAAM,iBAAoC;AAAA,MAChD,4CAAuB,IAAI;AAAA,EAC3B;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,CAAC;AAAA,MACf;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,KAAK,GAAG;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG;AAAA,MACjB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,KAAK,GAAG;AAAA,MAC9B;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB,EAAE,MAAM,EAAE,YAAY,CAAC,MAAM,GAAG,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC,EAAE,EAAE;AAAA,EACxF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,aAAa;AAAA,IACb,aAAa;AAAA,MACZ,MAAM;AAAA,IACP;AAAA,IACA,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,QAAQ;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,GAAG;AAAA,MACzB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,gBAAgB;AAAA,IACjB;AAAA,IACA,SAAS,CAAC;AAAA,IACV,aAAa;AAAA,IACb,SAAS;AAAA,MACR;AAAA,QACC,MAAM;AAAA,QACN,aAAa;AAAA,QACb,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,uCAAwB,QAAQ;AAAA,cACxC;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,2CAA4B,QAAQ;AAAA,cAC5C;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,0CAA2B,QAAQ;AAAA,cAC3C;AAAA,YACD;AAAA,YACA,SAAS,2CAA4B,QAAQ;AAAA,UAC9C;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,cAC5C;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aACC;AAAA,YACD,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,aAAa;AAAA,cAC5B;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aAAa;AAAA,YACb,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,UAAU;AAAA,cACzB;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,aACC;AAAA,YACD,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,gBAC3C,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UAEA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,UAAU;AAAA,YACV,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa,8FAA8F,wCAAoB,cAAc;AAAA,IAC7I,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,iBAAiB,CAAC,IAAI;AAAA,MACvB;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/config.ts"],"sourcesContent":["import {\n\tAIMessagePromptTemplate,\n\tHumanMessagePromptTemplate,\n\tSystemMessagePromptTemplate,\n} from '@langchain/core/prompts';\nimport type { IDataObject, INodeInputConfiguration, INodeProperties } from 'n8n-workflow';\nimport { NodeConnectionTypes } from 'n8n-workflow';\n\nimport { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';\nimport { getBatchingOptionFields, getTemplateNoticeField } from '@utils/sharedFields';\n\n/**\n * Dynamic input configuration generation based on node parameters\n */\nexport function getInputs(parameters: IDataObject) {\n\tconst inputs: INodeInputConfiguration[] = [\n\t\t{ displayName: '', type: 'main' },\n\t\t{\n\t\t\tdisplayName: 'Model',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_languageModel',\n\t\t\trequired: true,\n\t\t},\n\t];\n\n\t// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we\n\t// always add the output parser input\n\tconst hasOutputParser = parameters?.hasOutputParser;\n\tif (hasOutputParser === undefined || hasOutputParser === true) {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Output Parser',\n\t\t\ttype: 'ai_outputParser',\n\t\t\tmaxConnections: 1,\n\t\t\trequired: false,\n\t\t});\n\t}\n\n\treturn inputs;\n}\n\n/**\n * Node properties configuration\n */\nexport const nodeProperties: INodeProperties[] = [\n\tgetTemplateNoticeField(1978),\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chat_input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.1, 1.2],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chatInput }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...promptTypeOptions,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.2, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...textFromPreviousNode,\n\t\tdisplayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },\n\t},\n\t{\n\t\tdisplayName: 'Prompt (User Message)',\n\t\tname: 'text',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '',\n\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\ttypeOptions: {\n\t\t\trows: 2,\n\t\t},\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tpromptType: ['define'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Require Specific Output Format',\n\t\tname: 'hasOutputParser',\n\t\ttype: 'boolean',\n\t\tdefault: false,\n\t\tnoDataExpression: true,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Chat Messages (if Using a Chat Model)',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tdefault: {},\n\t\tplaceholder: 'Add prompt',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tname: 'messageValues',\n\t\t\t\tdisplayName: 'Prompt',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Type Name or ID',\n\t\t\t\t\t\tname: 'type',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'AI',\n\t\t\t\t\t\t\t\tvalue: AIMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'System',\n\t\t\t\t\t\t\t\tvalue: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: HumanMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message Type',\n\t\t\t\t\t\tname: 'messageType',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Simple text message',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (Binary)',\n\t\t\t\t\t\t\t\tvalue: 'imageBinary',\n\t\t\t\t\t\t\t\tdescription: 'Process the binary input from the previous node',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (URL)',\n\t\t\t\t\t\t\t\tvalue: 'imageUrl',\n\t\t\t\t\t\t\t\tdescription: 'Process the image from the specified URL',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Data Field Name',\n\t\t\t\t\t\tname: 'binaryImageDataKey',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: 'data',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"The name of the field in the chain's input that contains the binary image file to be processed\",\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image URL',\n\t\t\t\t\t\tname: 'imageUrl',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription: 'URL to the image to be processed',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Details',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Control how the model processes the image and generates its textual understanding',\n\t\t\t\t\t\tname: 'imageDetail',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Auto',\n\t\t\t\t\t\t\t\tvalue: 'auto',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'auto',\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message',\n\t\t\t\t\t\tname: 'message',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\thide: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\tgetBatchingOptionFields({\n\t\tshow: {\n\t\t\t'@version': [{ _cnd: { gte: 1.7 } }],\n\t\t},\n\t}),\n\t{\n\t\tdisplayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,\n\t\tname: 'notice',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\thasOutputParser: [true],\n\t\t\t},\n\t\t},\n\t},\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,qBAIO;AAEP,0BAAoC;AAEpC,0BAAwD;AACxD,0BAAgE;AAKzD,SAAS,UAAU,YAAyB;AAClD,QAAM,SAAoC;AAAA,IACzC,EAAE,aAAa,IAAI,MAAM,OAAO;AAAA,IAChC;AAAA,MACC,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AAIA,QAAM,kBAAkB,YAAY;AACpC,MAAI,oBAAoB,UAAa,oBAAoB,MAAM;AAC9D,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,MAAM;AAAA,MACN,gBAAgB;AAAA,MAChB,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAEA,SAAO;AACR;AAKO,MAAM,iBAAoC;AAAA,MAChD,4CAAuB,IAAI;AAAA,EAC3B;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,CAAC;AAAA,MACf;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,KAAK,GAAG;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG;AAAA,MACjB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,KAAK,GAAG;AAAA,MAC9B;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB,EAAE,MAAM,EAAE,YAAY,CAAC,MAAM,GAAG,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC,EAAE,EAAE;AAAA,EACxF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,aAAa;AAAA,IACb,aAAa;AAAA,MACZ,MAAM;AAAA,IACP;AAAA,IACA,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,QAAQ;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,GAAG;AAAA,MACzB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,gBAAgB;AAAA,IACjB;AAAA,IACA,SAAS,CAAC;AAAA,IACV,aAAa;AAAA,IACb,SAAS;AAAA,MACR;AAAA,QACC,MAAM;AAAA,QACN,aAAa;AAAA,QACb,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,uCAAwB,QAAQ;AAAA,cACxC;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,2CAA4B,QAAQ;AAAA,cAC5C;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,0CAA2B,QAAQ;AAAA,cAC3C;AAAA,YACD;AAAA,YACA,SAAS,2CAA4B,QAAQ;AAAA,UAC9C;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,cAC5C;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aACC;AAAA,YACD,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,aAAa;AAAA,cAC5B;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aAAa;AAAA,YACb,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,UAAU;AAAA,cACzB;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,aACC;AAAA,YACD,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,gBAC3C,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UAEA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,UAAU;AAAA,YACV,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,MACA,6CAAwB;AAAA,IACvB,MAAM;AAAA,MACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,IACpC;AAAA,EACD,CAAC;AAAA,EACD;AAAA,IACC,aAAa,8FAA8F,wCAAoB,cAAc;AAAA,IAC7I,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,iBAAiB,CAAC,IAAI;AAAA,MACvB;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
@@ -0,0 +1,66 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+ var processItem_exports = {};
20
+ __export(processItem_exports, {
21
+ processItem: () => processItem
22
+ });
23
+ module.exports = __toCommonJS(processItem_exports);
24
+ var import_n8n_workflow = require("n8n-workflow");
25
+ var import_helpers = require("../../../../utils/helpers");
26
+ var import_N8nOutputParser = require("../../../../utils/output_parsers/N8nOutputParser");
27
+ var import_chainExecutor = require("./chainExecutor");
28
+ const processItem = async (ctx, itemIndex) => {
29
+ const llm = await ctx.getInputConnectionData(
30
+ import_n8n_workflow.NodeConnectionTypes.AiLanguageModel,
31
+ 0
32
+ );
33
+ const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(ctx);
34
+ let prompt;
35
+ if (ctx.getNode().typeVersion <= 1.3) {
36
+ prompt = ctx.getNodeParameter("prompt", itemIndex);
37
+ } else {
38
+ prompt = (0, import_helpers.getPromptInputByType)({
39
+ ctx,
40
+ i: itemIndex,
41
+ inputKey: "text",
42
+ promptTypeKey: "promptType"
43
+ });
44
+ }
45
+ if (prompt === void 0) {
46
+ throw new import_n8n_workflow.NodeOperationError(ctx.getNode(), "The 'prompt' parameter is empty.");
47
+ }
48
+ const messages = ctx.getNodeParameter(
49
+ "messages.messageValues",
50
+ itemIndex,
51
+ []
52
+ );
53
+ return await (0, import_chainExecutor.executeChain)({
54
+ context: ctx,
55
+ itemIndex,
56
+ query: prompt,
57
+ llm,
58
+ outputParser,
59
+ messages
60
+ });
61
+ };
62
+ // Annotate the CommonJS export names for ESM import in node:
63
+ 0 && (module.exports = {
64
+ processItem
65
+ });
66
+ //# sourceMappingURL=processItem.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/processItem.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport { type IExecuteFunctions, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\nimport { executeChain } from './chainExecutor';\nimport { type MessageTemplate } from './types';\n\nexport const processItem = async (ctx: IExecuteFunctions, itemIndex: number) => {\n\tconst llm = (await ctx.getInputConnectionData(\n\t\tNodeConnectionTypes.AiLanguageModel,\n\t\t0,\n\t)) as BaseLanguageModel;\n\n\t// Get output parser if configured\n\tconst outputParser = await getOptionalOutputParser(ctx);\n\n\t// Get user prompt based on node version\n\tlet prompt: string;\n\n\tif (ctx.getNode().typeVersion <= 1.3) {\n\t\tprompt = ctx.getNodeParameter('prompt', itemIndex) as string;\n\t} else {\n\t\tprompt = getPromptInputByType({\n\t\t\tctx,\n\t\t\ti: itemIndex,\n\t\t\tinputKey: 'text',\n\t\t\tpromptTypeKey: 'promptType',\n\t\t});\n\t}\n\n\t// Validate prompt\n\tif (prompt === undefined) {\n\t\tthrow new NodeOperationError(ctx.getNode(), \"The 'prompt' parameter is empty.\");\n\t}\n\n\t// Get chat messages if configured\n\tconst messages = ctx.getNodeParameter(\n\t\t'messages.messageValues',\n\t\titemIndex,\n\t\t[],\n\t) as MessageTemplate[];\n\n\t// Execute the chain\n\treturn await executeChain({\n\t\tcontext: ctx,\n\t\titemIndex,\n\t\tquery: prompt,\n\t\tllm,\n\t\toutputParser,\n\t\tmessages,\n\t});\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAgF;AAEhF,qBAAqC;AACrC,6BAAwC;AAExC,2BAA6B;AAGtB,MAAM,cAAc,OAAO,KAAwB,cAAsB;AAC/E,QAAM,MAAO,MAAM,IAAI;AAAA,IACtB,wCAAoB;AAAA,IACpB;AAAA,EACD;AAGA,QAAM,eAAe,UAAM,gDAAwB,GAAG;AAGtD,MAAI;AAEJ,MAAI,IAAI,QAAQ,EAAE,eAAe,KAAK;AACrC,aAAS,IAAI,iBAAiB,UAAU,SAAS;AAAA,EAClD,OAAO;AACN,iBAAS,qCAAqB;AAAA,MAC7B;AAAA,MACA,GAAG;AAAA,MACH,UAAU;AAAA,MACV,eAAe;AAAA,IAChB,CAAC;AAAA,EACF;AAGA,MAAI,WAAW,QAAW;AACzB,UAAM,IAAI,uCAAmB,IAAI,QAAQ,GAAG,kCAAkC;AAAA,EAC/E;AAGA,QAAM,WAAW,IAAI;AAAA,IACpB;AAAA,IACA;AAAA,IACA,CAAC;AAAA,EACF;AAGA,SAAO,UAAM,mCAAa;AAAA,IACzB,SAAS;AAAA,IACT;AAAA,IACA,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,EACD,CAAC;AACF;","names":[]}
@@ -21,29 +21,11 @@ __export(ChainRetrievalQa_node_exports, {
21
21
  ChainRetrievalQa: () => ChainRetrievalQa
22
22
  });
23
23
  module.exports = __toCommonJS(ChainRetrievalQa_node_exports);
24
- var import_prompts = require("@langchain/core/prompts");
25
- var import_combine_documents = require("langchain/chains/combine_documents");
26
- var import_retrieval = require("langchain/chains/retrieval");
27
24
  var import_n8n_workflow = require("n8n-workflow");
28
25
  var import_descriptions = require("../../../utils/descriptions");
29
- var import_helpers = require("../../../utils/helpers");
30
26
  var import_sharedFields = require("../../../utils/sharedFields");
31
- var import_tracing = require("../../../utils/tracing");
32
- const SYSTEM_PROMPT_TEMPLATE = `You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.
33
- If you don't know the answer, just say that you don't know, don't try to make up an answer.
34
- ----------------
35
- Context: {context}`;
36
- const LEGACY_INPUT_TEMPLATE_KEY = "question";
37
- const INPUT_TEMPLATE_KEY = "input";
38
- const systemPromptOption = {
39
- displayName: "System Prompt Template",
40
- name: "systemPromptTemplate",
41
- type: "string",
42
- default: SYSTEM_PROMPT_TEMPLATE,
43
- typeOptions: {
44
- rows: 6
45
- }
46
- };
27
+ var import_constants = require("./constants");
28
+ var import_processItem = require("./processItem");
47
29
  class ChainRetrievalQa {
48
30
  constructor() {
49
31
  this.description = {
@@ -52,7 +34,7 @@ class ChainRetrievalQa {
52
34
  icon: "fa:link",
53
35
  iconColor: "black",
54
36
  group: ["transform"],
55
- version: [1, 1.1, 1.2, 1.3, 1.4, 1.5],
37
+ version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
56
38
  description: "Answer questions about retrieved documents",
57
39
  defaults: {
58
40
  name: "Question and Answer Chain",
@@ -164,8 +146,8 @@ class ChainRetrievalQa {
164
146
  placeholder: "Add Option",
165
147
  options: [
166
148
  {
167
- ...systemPromptOption,
168
- description: `Template string used for the system prompt. This should include the variable \`{context}\` for the provided context. For text completion models, you should also include the variable \`{${LEGACY_INPUT_TEMPLATE_KEY}}\` for the user\u2019s query.`,
149
+ ...import_constants.systemPromptOption,
150
+ description: `Template string used for the system prompt. This should include the variable \`{context}\` for the provided context. For text completion models, you should also include the variable \`{${import_constants.LEGACY_INPUT_TEMPLATE_KEY}}\` for the user\u2019s query.`,
169
151
  displayOptions: {
170
152
  show: {
171
153
  "@version": [{ _cnd: { lt: 1.5 } }]
@@ -173,14 +155,19 @@ class ChainRetrievalQa {
173
155
  }
174
156
  },
175
157
  {
176
- ...systemPromptOption,
177
- description: `Template string used for the system prompt. This should include the variable \`{context}\` for the provided context. For text completion models, you should also include the variable \`{${INPUT_TEMPLATE_KEY}}\` for the user\u2019s query.`,
158
+ ...import_constants.systemPromptOption,
159
+ description: `Template string used for the system prompt. This should include the variable \`{context}\` for the provided context. For text completion models, you should also include the variable \`{${import_constants.INPUT_TEMPLATE_KEY}}\` for the user\u2019s query.`,
178
160
  displayOptions: {
179
161
  show: {
180
162
  "@version": [{ _cnd: { gte: 1.5 } }]
181
163
  }
182
164
  }
183
- }
165
+ },
166
+ (0, import_sharedFields.getBatchingOptionFields)({
167
+ show: {
168
+ "@version": [{ _cnd: { gte: 1.6 } }]
169
+ }
170
+ })
184
171
  ]
185
172
  }
186
173
  ]
@@ -190,79 +177,68 @@ class ChainRetrievalQa {
190
177
  this.logger.debug("Executing Retrieval QA Chain");
191
178
  const items = this.getInputData();
192
179
  const returnData = [];
193
- for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
194
- try {
195
- const model = await this.getInputConnectionData(
196
- import_n8n_workflow.NodeConnectionTypes.AiLanguageModel,
197
- 0
198
- );
199
- const retriever = await this.getInputConnectionData(
200
- import_n8n_workflow.NodeConnectionTypes.AiRetriever,
201
- 0
202
- );
203
- let query;
204
- if (this.getNode().typeVersion <= 1.2) {
205
- query = this.getNodeParameter("query", itemIndex);
206
- } else {
207
- query = (0, import_helpers.getPromptInputByType)({
208
- ctx: this,
209
- i: itemIndex,
210
- inputKey: "text",
211
- promptTypeKey: "promptType"
212
- });
213
- }
214
- if (query === void 0) {
215
- throw new import_n8n_workflow.NodeOperationError(this.getNode(), "The \u2018query\u2018 parameter is empty.");
216
- }
217
- const options = this.getNodeParameter("options", itemIndex, {});
218
- let templateText = options.systemPromptTemplate ?? SYSTEM_PROMPT_TEMPLATE;
219
- if (this.getNode().typeVersion < 1.5) {
220
- templateText = templateText.replace(
221
- `{${LEGACY_INPUT_TEMPLATE_KEY}}`,
222
- `{${INPUT_TEMPLATE_KEY}}`
223
- );
224
- }
225
- let promptTemplate;
226
- if ((0, import_helpers.isChatInstance)(model)) {
227
- const messages = [
228
- import_prompts.SystemMessagePromptTemplate.fromTemplate(templateText),
229
- import_prompts.HumanMessagePromptTemplate.fromTemplate("{input}")
230
- ];
231
- promptTemplate = import_prompts.ChatPromptTemplate.fromMessages(messages);
232
- } else {
233
- const questionSuffix = options.systemPromptTemplate === void 0 ? "\n\nQuestion: {input}\nAnswer:" : "";
234
- promptTemplate = new import_prompts.PromptTemplate({
235
- template: templateText + questionSuffix,
236
- inputVariables: ["context", "input"]
237
- });
238
- }
239
- const combineDocsChain = await (0, import_combine_documents.createStuffDocumentsChain)({
240
- llm: model,
241
- prompt: promptTemplate
180
+ const batchSize = this.getNodeParameter("options.batching.batchSize", 0, 5);
181
+ const delayBetweenBatches = this.getNodeParameter(
182
+ "options.batching.delayBetweenBatches",
183
+ 0,
184
+ 0
185
+ );
186
+ if (this.getNode().typeVersion >= 1.6 && batchSize >= 1) {
187
+ for (let i = 0; i < items.length; i += batchSize) {
188
+ const batch = items.slice(i, i + batchSize);
189
+ const batchPromises = batch.map(async (_item, batchItemIndex) => {
190
+ return await (0, import_processItem.processItem)(this, i + batchItemIndex);
242
191
  });
243
- const retrievalChain = await (0, import_retrieval.createRetrievalChain)({
244
- combineDocsChain,
245
- retriever
192
+ const batchResults = await Promise.allSettled(batchPromises);
193
+ batchResults.forEach((response, index) => {
194
+ if (response.status === "rejected") {
195
+ const error = response.reason;
196
+ if (this.continueOnFail()) {
197
+ const metadata = (0, import_n8n_workflow.parseErrorMetadata)(error);
198
+ returnData.push({
199
+ json: { error: error.message },
200
+ pairedItem: { item: index },
201
+ metadata
202
+ });
203
+ return;
204
+ } else {
205
+ throw error;
206
+ }
207
+ }
208
+ const output = response.value;
209
+ const answer = output.answer;
210
+ if (this.getNode().typeVersion >= 1.5) {
211
+ returnData.push({ json: { response: answer } });
212
+ } else {
213
+ returnData.push({ json: { response: { text: answer } } });
214
+ }
246
215
  });
247
- const tracingConfig = (0, import_tracing.getTracingConfig)(this);
248
- const response = await retrievalChain.withConfig(tracingConfig).invoke({ input: query }, { signal: this.getExecutionCancelSignal() });
249
- const answer = response.answer;
250
- if (this.getNode().typeVersion >= 1.5) {
251
- returnData.push({ json: { response: answer } });
252
- } else {
253
- returnData.push({ json: { response: { text: answer } } });
216
+ if (i + batchSize < items.length && delayBetweenBatches > 0) {
217
+ await (0, import_n8n_workflow.sleep)(delayBetweenBatches);
254
218
  }
255
- } catch (error) {
256
- if (this.continueOnFail()) {
257
- const metadata = (0, import_n8n_workflow.parseErrorMetadata)(error);
258
- returnData.push({
259
- json: { error: error.message },
260
- pairedItem: { item: itemIndex },
261
- metadata
262
- });
263
- continue;
219
+ }
220
+ } else {
221
+ for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
222
+ try {
223
+ const response = await (0, import_processItem.processItem)(this, itemIndex);
224
+ const answer = response.answer;
225
+ if (this.getNode().typeVersion >= 1.5) {
226
+ returnData.push({ json: { response: answer } });
227
+ } else {
228
+ returnData.push({ json: { response: { text: answer } } });
229
+ }
230
+ } catch (error) {
231
+ if (this.continueOnFail()) {
232
+ const metadata = (0, import_n8n_workflow.parseErrorMetadata)(error);
233
+ returnData.push({
234
+ json: { error: error.message },
235
+ pairedItem: { item: itemIndex },
236
+ metadata
237
+ });
238
+ continue;
239
+ }
240
+ throw error;
264
241
  }
265
- throw error;
266
242
  }
267
243
  }
268
244
  return [returnData];