@n8n/n8n-nodes-langchain 1.99.1 → 1.100.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/dist/nodes/agents/Agent/V2/AgentV2.node.js +51 -34
  2. package/dist/nodes/agents/Agent/V2/AgentV2.node.js.map +1 -1
  3. package/dist/nodes/agents/Agent/agents/ToolsAgent/V1/execute.js.map +1 -1
  4. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js +57 -28
  5. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js.map +1 -1
  6. package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js +12 -2
  7. package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js.map +1 -1
  8. package/dist/nodes/chains/ChainLLM/methods/chainExecutor.js +13 -4
  9. package/dist/nodes/chains/ChainLLM/methods/chainExecutor.js.map +1 -1
  10. package/dist/nodes/chains/ChainLLM/methods/config.js +32 -0
  11. package/dist/nodes/chains/ChainLLM/methods/config.js.map +1 -1
  12. package/dist/nodes/chains/ChainLLM/methods/processItem.js +38 -6
  13. package/dist/nodes/chains/ChainLLM/methods/processItem.js.map +1 -1
  14. package/dist/nodes/chains/ChainLLM/methods/types.js.map +1 -1
  15. package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js +1 -1
  16. package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js.map +1 -1
  17. package/dist/nodes/llms/N8nLlmTracing.js +2 -6
  18. package/dist/nodes/llms/N8nLlmTracing.js.map +1 -1
  19. package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js +43 -14
  20. package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js.map +1 -1
  21. package/dist/nodes/tools/ToolWorkflow/v2/utils/WorkflowToolService.js +74 -41
  22. package/dist/nodes/tools/ToolWorkflow/v2/utils/WorkflowToolService.js.map +1 -1
  23. package/dist/types/nodes.json +7 -7
  24. package/dist/utils/descriptions.js +1 -1
  25. package/dist/utils/descriptions.js.map +1 -1
  26. package/dist/utils/helpers.js +29 -0
  27. package/dist/utils/helpers.js.map +1 -1
  28. package/dist/utils/output_parsers/N8nOutputParser.js +2 -2
  29. package/dist/utils/output_parsers/N8nOutputParser.js.map +1 -1
  30. package/dist/utils/tokenizer/token-estimator.js +119 -0
  31. package/dist/utils/tokenizer/token-estimator.js.map +1 -0
  32. package/package.json +12 -10
@@ -36,6 +36,15 @@ function getInputs(parameters) {
36
36
  required: true
37
37
  }
38
38
  ];
39
+ const needsFallback = parameters?.needsFallback;
40
+ if (needsFallback === void 0 || needsFallback === true) {
41
+ inputs.push({
42
+ displayName: "Fallback Model",
43
+ maxConnections: 1,
44
+ type: "ai_languageModel",
45
+ required: true
46
+ });
47
+ }
39
48
  const hasOutputParser = parameters?.hasOutputParser;
40
49
  if (hasOutputParser === void 0 || hasOutputParser === true) {
41
50
  inputs.push({
@@ -125,6 +134,18 @@ const nodeProperties = [
125
134
  }
126
135
  }
127
136
  },
137
+ {
138
+ displayName: "Enable Fallback Model",
139
+ name: "needsFallback",
140
+ type: "boolean",
141
+ default: false,
142
+ noDataExpression: true,
143
+ displayOptions: {
144
+ hide: {
145
+ "@version": [1, 1.1, 1.3]
146
+ }
147
+ }
148
+ },
128
149
  {
129
150
  displayName: "Chat Messages (if Using a Chat Model)",
130
151
  name: "messages",
@@ -274,6 +295,17 @@ const nodeProperties = [
274
295
  hasOutputParser: [true]
275
296
  }
276
297
  }
298
+ },
299
+ {
300
+ displayName: "Connect an additional language model on the canvas to use it as a fallback if the main model fails",
301
+ name: "fallbackNotice",
302
+ type: "notice",
303
+ default: "",
304
+ displayOptions: {
305
+ show: {
306
+ needsFallback: [true]
307
+ }
308
+ }
277
309
  }
278
310
  ];
279
311
  // Annotate the CommonJS export names for ESM import in node:
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/config.ts"],"sourcesContent":["import {\n\tAIMessagePromptTemplate,\n\tHumanMessagePromptTemplate,\n\tSystemMessagePromptTemplate,\n} from '@langchain/core/prompts';\nimport type { IDataObject, INodeInputConfiguration, INodeProperties } from 'n8n-workflow';\nimport { NodeConnectionTypes } from 'n8n-workflow';\n\nimport { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';\nimport { getBatchingOptionFields, getTemplateNoticeField } from '@utils/sharedFields';\n\n/**\n * Dynamic input configuration generation based on node parameters\n */\nexport function getInputs(parameters: IDataObject) {\n\tconst inputs: INodeInputConfiguration[] = [\n\t\t{ displayName: '', type: 'main' },\n\t\t{\n\t\t\tdisplayName: 'Model',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_languageModel',\n\t\t\trequired: true,\n\t\t},\n\t];\n\n\t// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we\n\t// always add the output parser input\n\tconst hasOutputParser = parameters?.hasOutputParser;\n\tif (hasOutputParser === undefined || hasOutputParser === true) {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Output Parser',\n\t\t\ttype: 'ai_outputParser',\n\t\t\tmaxConnections: 1,\n\t\t\trequired: false,\n\t\t});\n\t}\n\n\treturn inputs;\n}\n\n/**\n * Node properties configuration\n */\nexport const nodeProperties: INodeProperties[] = [\n\tgetTemplateNoticeField(1978),\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chat_input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.1, 1.2],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chatInput }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...promptTypeOptions,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.2, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...textFromPreviousNode,\n\t\tdisplayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },\n\t},\n\t{\n\t\tdisplayName: 'Prompt (User Message)',\n\t\tname: 'text',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '',\n\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\ttypeOptions: {\n\t\t\trows: 2,\n\t\t},\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tpromptType: ['define'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Require Specific Output Format',\n\t\tname: 'hasOutputParser',\n\t\ttype: 'boolean',\n\t\tdefault: false,\n\t\tnoDataExpression: true,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Chat Messages (if Using a Chat Model)',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tdefault: {},\n\t\tplaceholder: 'Add prompt',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tname: 'messageValues',\n\t\t\t\tdisplayName: 'Prompt',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Type Name or ID',\n\t\t\t\t\t\tname: 'type',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'AI',\n\t\t\t\t\t\t\t\tvalue: AIMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'System',\n\t\t\t\t\t\t\t\tvalue: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: HumanMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message Type',\n\t\t\t\t\t\tname: 'messageType',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Simple text message',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (Binary)',\n\t\t\t\t\t\t\t\tvalue: 'imageBinary',\n\t\t\t\t\t\t\t\tdescription: 'Process the binary input from the previous node',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (URL)',\n\t\t\t\t\t\t\t\tvalue: 'imageUrl',\n\t\t\t\t\t\t\t\tdescription: 'Process the image from the specified URL',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Data Field Name',\n\t\t\t\t\t\tname: 'binaryImageDataKey',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: 'data',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"The name of the field in the chain's input that contains the binary image file to be processed\",\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image URL',\n\t\t\t\t\t\tname: 'imageUrl',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription: 'URL to the image to be processed',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Details',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Control how the model processes the image and generates its textual understanding',\n\t\t\t\t\t\tname: 'imageDetail',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Auto',\n\t\t\t\t\t\t\t\tvalue: 'auto',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'auto',\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message',\n\t\t\t\t\t\tname: 'message',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\thide: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\tgetBatchingOptionFields({\n\t\tshow: {\n\t\t\t'@version': [{ _cnd: { gte: 1.7 } }],\n\t\t},\n\t}),\n\t{\n\t\tdisplayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,\n\t\tname: 'notice',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\thasOutputParser: [true],\n\t\t\t},\n\t\t},\n\t},\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,qBAIO;AAEP,0BAAoC;AAEpC,0BAAwD;AACxD,0BAAgE;AAKzD,SAAS,UAAU,YAAyB;AAClD,QAAM,SAAoC;AAAA,IACzC,EAAE,aAAa,IAAI,MAAM,OAAO;AAAA,IAChC;AAAA,MACC,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AAIA,QAAM,kBAAkB,YAAY;AACpC,MAAI,oBAAoB,UAAa,oBAAoB,MAAM;AAC9D,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,MAAM;AAAA,MACN,gBAAgB;AAAA,MAChB,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAEA,SAAO;AACR;AAKO,MAAM,iBAAoC;AAAA,MAChD,4CAAuB,IAAI;AAAA,EAC3B;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,CAAC;AAAA,MACf;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,KAAK,GAAG;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG;AAAA,MACjB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,KAAK,GAAG;AAAA,MAC9B;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB,EAAE,MAAM,EAAE,YAAY,CAAC,MAAM,GAAG,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC,EAAE,EAAE;AAAA,EACxF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,aAAa;AAAA,IACb,aAAa;AAAA,MACZ,MAAM;AAAA,IACP;AAAA,IACA,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,QAAQ;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,GAAG;AAAA,MACzB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,gBAAgB;AAAA,IACjB;AAAA,IACA,SAAS,CAAC;AAAA,IACV,aAAa;AAAA,IACb,SAAS;AAAA,MACR;AAAA,QACC,MAAM;AAAA,QACN,aAAa;AAAA,QACb,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,uCAAwB,QAAQ;AAAA,cACxC;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,2CAA4B,QAAQ;AAAA,cAC5C;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,0CAA2B,QAAQ;AAAA,cAC3C;AAAA,YACD;AAAA,YACA,SAAS,2CAA4B,QAAQ;AAAA,UAC9C;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,cAC5C;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aACC;AAAA,YACD,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,aAAa;AAAA,cAC5B;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aAAa;AAAA,YACb,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,UAAU;AAAA,cACzB;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,aACC;AAAA,YACD,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,gBAC3C,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UAEA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,UAAU;AAAA,YACV,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,MACA,6CAAwB;AAAA,IACvB,MAAM;AAAA,MACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,IACpC;AAAA,EACD,CAAC;AAAA,EACD;AAAA,IACC,aAAa,8FAA8F,wCAAoB,cAAc;AAAA,IAC7I,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,iBAAiB,CAAC,IAAI;AAAA,MACvB;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/config.ts"],"sourcesContent":["import {\n\tAIMessagePromptTemplate,\n\tHumanMessagePromptTemplate,\n\tSystemMessagePromptTemplate,\n} from '@langchain/core/prompts';\nimport type { IDataObject, INodeInputConfiguration, INodeProperties } from 'n8n-workflow';\nimport { NodeConnectionTypes } from 'n8n-workflow';\n\nimport { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';\nimport { getBatchingOptionFields, getTemplateNoticeField } from '@utils/sharedFields';\n\n/**\n * Dynamic input configuration generation based on node parameters\n */\nexport function getInputs(parameters: IDataObject) {\n\tconst inputs: INodeInputConfiguration[] = [\n\t\t{ displayName: '', type: 'main' },\n\t\t{\n\t\t\tdisplayName: 'Model',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_languageModel',\n\t\t\trequired: true,\n\t\t},\n\t];\n\n\tconst needsFallback = parameters?.needsFallback;\n\n\tif (needsFallback === undefined || needsFallback === true) {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Fallback Model',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_languageModel',\n\t\t\trequired: true,\n\t\t});\n\t}\n\n\t// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we\n\t// always add the output parser input\n\tconst hasOutputParser = parameters?.hasOutputParser;\n\tif (hasOutputParser === undefined || hasOutputParser === true) {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Output Parser',\n\t\t\ttype: 'ai_outputParser',\n\t\t\tmaxConnections: 1,\n\t\t\trequired: false,\n\t\t});\n\t}\n\n\treturn inputs;\n}\n\n/**\n * Node properties configuration\n */\nexport const nodeProperties: INodeProperties[] = [\n\tgetTemplateNoticeField(1978),\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chat_input }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.1, 1.2],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Prompt',\n\t\tname: 'prompt',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '={{ $json.chatInput }}',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\t'@version': [1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...promptTypeOptions,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.2, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\t...textFromPreviousNode,\n\t\tdisplayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },\n\t},\n\t{\n\t\tdisplayName: 'Prompt (User Message)',\n\t\tname: 'text',\n\t\ttype: 'string',\n\t\trequired: true,\n\t\tdefault: '',\n\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\ttypeOptions: {\n\t\t\trows: 2,\n\t\t},\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tpromptType: ['define'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Require Specific Output Format',\n\t\tname: 'hasOutputParser',\n\t\ttype: 'boolean',\n\t\tdefault: false,\n\t\tnoDataExpression: true,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Enable Fallback Model',\n\t\tname: 'needsFallback',\n\t\ttype: 'boolean',\n\t\tdefault: false,\n\t\tnoDataExpression: true,\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\t'@version': [1, 1.1, 1.3],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Chat Messages (if Using a Chat Model)',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tdefault: {},\n\t\tplaceholder: 'Add prompt',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tname: 'messageValues',\n\t\t\t\tdisplayName: 'Prompt',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Type Name or ID',\n\t\t\t\t\t\tname: 'type',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'AI',\n\t\t\t\t\t\t\t\tvalue: AIMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'System',\n\t\t\t\t\t\t\t\tvalue: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: HumanMessagePromptTemplate.lc_name(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: SystemMessagePromptTemplate.lc_name(),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message Type',\n\t\t\t\t\t\tname: 'messageType',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Simple text message',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (Binary)',\n\t\t\t\t\t\t\t\tvalue: 'imageBinary',\n\t\t\t\t\t\t\t\tdescription: 'Process the binary input from the previous node',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Image (URL)',\n\t\t\t\t\t\t\t\tvalue: 'imageUrl',\n\t\t\t\t\t\t\t\tdescription: 'Process the image from the specified URL',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Data Field Name',\n\t\t\t\t\t\tname: 'binaryImageDataKey',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: 'data',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"The name of the field in the chain's input that contains the binary image file to be processed\",\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image URL',\n\t\t\t\t\t\tname: 'imageUrl',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdescription: 'URL to the image to be processed',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\tmessageType: ['imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Image Details',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Control how the model processes the image and generates its textual understanding',\n\t\t\t\t\t\tname: 'imageDetail',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\ttype: [HumanMessagePromptTemplate.lc_name()],\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Auto',\n\t\t\t\t\t\t\t\tvalue: 'auto',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'auto',\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Message',\n\t\t\t\t\t\tname: 'message',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\trequired: true,\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\thide: {\n\t\t\t\t\t\t\t\tmessageType: ['imageBinary', 'imageUrl'],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\tgetBatchingOptionFields({\n\t\tshow: {\n\t\t\t'@version': [{ _cnd: { gte: 1.7 } }],\n\t\t},\n\t}),\n\t{\n\t\tdisplayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,\n\t\tname: 'notice',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\thasOutputParser: [true],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName:\n\t\t\t'Connect an additional language model on the canvas to use it as a fallback if the main model fails',\n\t\tname: 'fallbackNotice',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tneedsFallback: [true],\n\t\t\t},\n\t\t},\n\t},\n];\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,qBAIO;AAEP,0BAAoC;AAEpC,0BAAwD;AACxD,0BAAgE;AAKzD,SAAS,UAAU,YAAyB;AAClD,QAAM,SAAoC;AAAA,IACzC,EAAE,aAAa,IAAI,MAAM,OAAO;AAAA,IAChC;AAAA,MACC,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AAEA,QAAM,gBAAgB,YAAY;AAElC,MAAI,kBAAkB,UAAa,kBAAkB,MAAM;AAC1D,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAIA,QAAM,kBAAkB,YAAY;AACpC,MAAI,oBAAoB,UAAa,oBAAoB,MAAM;AAC9D,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,MAAM;AAAA,MACN,gBAAgB;AAAA,MAChB,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAEA,SAAO;AACR;AAKO,MAAM,iBAAoC;AAAA,MAChD,4CAAuB,IAAI;AAAA,EAC3B;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,CAAC;AAAA,MACf;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,KAAK,GAAG;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG;AAAA,MACjB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,KAAK,GAAG;AAAA,MAC9B;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,GAAG;AAAA,IACH,gBAAgB,EAAE,MAAM,EAAE,YAAY,CAAC,MAAM,GAAG,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC,EAAE,EAAE;AAAA,EACxF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,IACV,SAAS;AAAA,IACT,aAAa;AAAA,IACb,aAAa;AAAA,MACZ,MAAM;AAAA,IACP;AAAA,IACA,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,QAAQ;AAAA,MACtB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,GAAG;AAAA,MACzB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,YAAY,CAAC,GAAG,KAAK,GAAG;AAAA,MACzB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,gBAAgB;AAAA,IACjB;AAAA,IACA,SAAS,CAAC;AAAA,IACV,aAAa;AAAA,IACb,SAAS;AAAA,MACR;AAAA,QACC,MAAM;AAAA,QACN,aAAa;AAAA,QACb,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,uCAAwB,QAAQ;AAAA,cACxC;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,2CAA4B,QAAQ;AAAA,cAC5C;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO,0CAA2B,QAAQ;AAAA,cAC3C;AAAA,YACD;AAAA,YACA,SAAS,2CAA4B,QAAQ;AAAA,UAC9C;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,cAC5C;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aACC;AAAA,YACD,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,aAAa;AAAA,cAC5B;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,SAAS;AAAA,YACT,UAAU;AAAA,YACV,aAAa;AAAA,YACb,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,UAAU;AAAA,cACzB;AAAA,YACD;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,aACC;AAAA,YACD,MAAM;AAAA,YACN,MAAM;AAAA,YACN,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,MAAM,CAAC,0CAA2B,QAAQ,CAAC;AAAA,gBAC3C,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,UAEA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,UAAU;AAAA,YACV,gBAAgB;AAAA,cACf,MAAM;AAAA,gBACL,aAAa,CAAC,eAAe,UAAU;AAAA,cACxC;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,MACA,6CAAwB;AAAA,IACvB,MAAM;AAAA,MACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,IACpC;AAAA,EACD,CAAC;AAAA,EACD;AAAA,IACC,aAAa,8FAA8F,wCAAoB,cAAc;AAAA,IAC7I,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,iBAAiB,CAAC,IAAI;AAAA,MACvB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aACC;AAAA,IACD,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,eAAe,CAAC,IAAI;AAAA,MACrB;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
@@ -1,7 +1,9 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
5
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
8
  var __export = (target, all) => {
7
9
  for (var name in all)
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
15
17
  }
16
18
  return to;
17
19
  };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
18
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
29
  var processItem_exports = {};
20
30
  __export(processItem_exports, {
@@ -22,15 +32,36 @@ __export(processItem_exports, {
22
32
  });
23
33
  module.exports = __toCommonJS(processItem_exports);
24
34
  var import_n8n_workflow = require("n8n-workflow");
35
+ var import_node_assert = __toESM(require("node:assert"));
25
36
  var import_helpers = require("../../../../utils/helpers");
26
37
  var import_N8nOutputParser = require("../../../../utils/output_parsers/N8nOutputParser");
27
38
  var import_chainExecutor = require("./chainExecutor");
39
+ async function getChatModel(ctx, index = 0) {
40
+ const connectedModels = await ctx.getInputConnectionData(import_n8n_workflow.NodeConnectionTypes.AiLanguageModel, 0);
41
+ let model;
42
+ if (Array.isArray(connectedModels) && index !== void 0) {
43
+ if (connectedModels.length <= index) {
44
+ return void 0;
45
+ }
46
+ const reversedModels = [...connectedModels].reverse();
47
+ model = reversedModels[index];
48
+ } else {
49
+ model = connectedModels;
50
+ }
51
+ return model;
52
+ }
28
53
  const processItem = async (ctx, itemIndex) => {
29
- const llm = await ctx.getInputConnectionData(
30
- import_n8n_workflow.NodeConnectionTypes.AiLanguageModel,
31
- 0
32
- );
33
- const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(ctx);
54
+ const needsFallback = ctx.getNodeParameter("needsFallback", 0, false);
55
+ const llm = await getChatModel(ctx, 0);
56
+ (0, import_node_assert.default)(llm, "Please connect a model to the Chat Model input");
57
+ const fallbackLlm = needsFallback ? await getChatModel(ctx, 1) : null;
58
+ if (needsFallback && !fallbackLlm) {
59
+ throw new import_n8n_workflow.NodeOperationError(
60
+ ctx.getNode(),
61
+ "Please connect a model to the Fallback Model input or disable the fallback option"
62
+ );
63
+ }
64
+ const outputParser = await (0, import_N8nOutputParser.getOptionalOutputParser)(ctx, itemIndex);
34
65
  let prompt;
35
66
  if (ctx.getNode().typeVersion <= 1.3) {
36
67
  prompt = ctx.getNodeParameter("prompt", itemIndex);
@@ -56,7 +87,8 @@ const processItem = async (ctx, itemIndex) => {
56
87
  query: prompt,
57
88
  llm,
58
89
  outputParser,
59
- messages
90
+ messages,
91
+ fallbackLlm
60
92
  });
61
93
  };
62
94
  // Annotate the CommonJS export names for ESM import in node:
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/processItem.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport { type IExecuteFunctions, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\nimport { executeChain } from './chainExecutor';\nimport { type MessageTemplate } from './types';\n\nexport const processItem = async (ctx: IExecuteFunctions, itemIndex: number) => {\n\tconst llm = (await ctx.getInputConnectionData(\n\t\tNodeConnectionTypes.AiLanguageModel,\n\t\t0,\n\t)) as BaseLanguageModel;\n\n\t// Get output parser if configured\n\tconst outputParser = await getOptionalOutputParser(ctx);\n\n\t// Get user prompt based on node version\n\tlet prompt: string;\n\n\tif (ctx.getNode().typeVersion <= 1.3) {\n\t\tprompt = ctx.getNodeParameter('prompt', itemIndex) as string;\n\t} else {\n\t\tprompt = getPromptInputByType({\n\t\t\tctx,\n\t\t\ti: itemIndex,\n\t\t\tinputKey: 'text',\n\t\t\tpromptTypeKey: 'promptType',\n\t\t});\n\t}\n\n\t// Validate prompt\n\tif (prompt === undefined) {\n\t\tthrow new NodeOperationError(ctx.getNode(), \"The 'prompt' parameter is empty.\");\n\t}\n\n\t// Get chat messages if configured\n\tconst messages = ctx.getNodeParameter(\n\t\t'messages.messageValues',\n\t\titemIndex,\n\t\t[],\n\t) as MessageTemplate[];\n\n\t// Execute the chain\n\treturn await executeChain({\n\t\tcontext: ctx,\n\t\titemIndex,\n\t\tquery: prompt,\n\t\tllm,\n\t\toutputParser,\n\t\tmessages,\n\t});\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAgF;AAEhF,qBAAqC;AACrC,6BAAwC;AAExC,2BAA6B;AAGtB,MAAM,cAAc,OAAO,KAAwB,cAAsB;AAC/E,QAAM,MAAO,MAAM,IAAI;AAAA,IACtB,wCAAoB;AAAA,IACpB;AAAA,EACD;AAGA,QAAM,eAAe,UAAM,gDAAwB,GAAG;AAGtD,MAAI;AAEJ,MAAI,IAAI,QAAQ,EAAE,eAAe,KAAK;AACrC,aAAS,IAAI,iBAAiB,UAAU,SAAS;AAAA,EAClD,OAAO;AACN,iBAAS,qCAAqB;AAAA,MAC7B;AAAA,MACA,GAAG;AAAA,MACH,UAAU;AAAA,MACV,eAAe;AAAA,IAChB,CAAC;AAAA,EACF;AAGA,MAAI,WAAW,QAAW;AACzB,UAAM,IAAI,uCAAmB,IAAI,QAAQ,GAAG,kCAAkC;AAAA,EAC/E;AAGA,QAAM,WAAW,IAAI;AAAA,IACpB;AAAA,IACA;AAAA,IACA,CAAC;AAAA,EACF;AAGA,SAAO,UAAM,mCAAa;AAAA,IACzB,SAAS;AAAA,IACT;AAAA,IACA,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,EACD,CAAC;AACF;","names":[]}
1
+ {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/processItem.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport { type IExecuteFunctions, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\nimport assert from 'node:assert';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\nimport { executeChain } from './chainExecutor';\nimport { type MessageTemplate } from './types';\n\nasync function getChatModel(\n\tctx: IExecuteFunctions,\n\tindex: number = 0,\n): Promise<BaseLanguageModel | undefined> {\n\tconst connectedModels = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);\n\n\tlet model;\n\n\tif (Array.isArray(connectedModels) && index !== undefined) {\n\t\tif (connectedModels.length <= index) {\n\t\t\treturn undefined;\n\t\t}\n\t\t// We get the models in reversed order from the workflow so we need to reverse them again to match the right index\n\t\tconst reversedModels = [...connectedModels].reverse();\n\t\tmodel = reversedModels[index] as BaseLanguageModel;\n\t} else {\n\t\tmodel = connectedModels as BaseLanguageModel;\n\t}\n\n\treturn model;\n}\n\nexport const processItem = async (ctx: IExecuteFunctions, itemIndex: number) => {\n\tconst needsFallback = ctx.getNodeParameter('needsFallback', 0, false) as boolean;\n\tconst llm = await getChatModel(ctx, 0);\n\tassert(llm, 'Please connect a model to the Chat Model input');\n\n\tconst fallbackLlm = needsFallback ? await getChatModel(ctx, 1) : null;\n\tif (needsFallback && !fallbackLlm) {\n\t\tthrow new NodeOperationError(\n\t\t\tctx.getNode(),\n\t\t\t'Please connect a model to the Fallback Model input or disable the fallback option',\n\t\t);\n\t}\n\n\t// Get output parser if configured\n\tconst outputParser = await getOptionalOutputParser(ctx, itemIndex);\n\n\t// Get user prompt based on node version\n\tlet prompt: string;\n\n\tif (ctx.getNode().typeVersion <= 1.3) {\n\t\tprompt = ctx.getNodeParameter('prompt', itemIndex) as string;\n\t} else {\n\t\tprompt = getPromptInputByType({\n\t\t\tctx,\n\t\t\ti: itemIndex,\n\t\t\tinputKey: 'text',\n\t\t\tpromptTypeKey: 'promptType',\n\t\t});\n\t}\n\n\t// Validate prompt\n\tif (prompt === undefined) {\n\t\tthrow new NodeOperationError(ctx.getNode(), \"The 'prompt' parameter is empty.\");\n\t}\n\n\t// Get chat messages if configured\n\tconst messages = ctx.getNodeParameter(\n\t\t'messages.messageValues',\n\t\titemIndex,\n\t\t[],\n\t) as MessageTemplate[];\n\n\t// Execute the chain\n\treturn await executeChain({\n\t\tcontext: ctx,\n\t\titemIndex,\n\t\tquery: prompt,\n\t\tllm,\n\t\toutputParser,\n\t\tmessages,\n\t\tfallbackLlm,\n\t});\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAgF;AAChF,yBAAmB;AAEnB,qBAAqC;AACrC,6BAAwC;AAExC,2BAA6B;AAG7B,eAAe,aACd,KACA,QAAgB,GACyB;AACzC,QAAM,kBAAkB,MAAM,IAAI,uBAAuB,wCAAoB,iBAAiB,CAAC;AAE/F,MAAI;AAEJ,MAAI,MAAM,QAAQ,eAAe,KAAK,UAAU,QAAW;AAC1D,QAAI,gBAAgB,UAAU,OAAO;AACpC,aAAO;AAAA,IACR;AAEA,UAAM,iBAAiB,CAAC,GAAG,eAAe,EAAE,QAAQ;AACpD,YAAQ,eAAe,KAAK;AAAA,EAC7B,OAAO;AACN,YAAQ;AAAA,EACT;AAEA,SAAO;AACR;AAEO,MAAM,cAAc,OAAO,KAAwB,cAAsB;AAC/E,QAAM,gBAAgB,IAAI,iBAAiB,iBAAiB,GAAG,KAAK;AACpE,QAAM,MAAM,MAAM,aAAa,KAAK,CAAC;AACrC,yBAAAA,SAAO,KAAK,gDAAgD;AAE5D,QAAM,cAAc,gBAAgB,MAAM,aAAa,KAAK,CAAC,IAAI;AACjE,MAAI,iBAAiB,CAAC,aAAa;AAClC,UAAM,IAAI;AAAA,MACT,IAAI,QAAQ;AAAA,MACZ;AAAA,IACD;AAAA,EACD;AAGA,QAAM,eAAe,UAAM,gDAAwB,KAAK,SAAS;AAGjE,MAAI;AAEJ,MAAI,IAAI,QAAQ,EAAE,eAAe,KAAK;AACrC,aAAS,IAAI,iBAAiB,UAAU,SAAS;AAAA,EAClD,OAAO;AACN,iBAAS,qCAAqB;AAAA,MAC7B;AAAA,MACA,GAAG;AAAA,MACH,UAAU;AAAA,MACV,eAAe;AAAA,IAChB,CAAC;AAAA,EACF;AAGA,MAAI,WAAW,QAAW;AACzB,UAAM,IAAI,uCAAmB,IAAI,QAAQ,GAAG,kCAAkC;AAAA,EAC/E;AAGA,QAAM,WAAW,IAAI;AAAA,IACpB;AAAA,IACA;AAAA,IACA,CAAC;AAAA,EACF;AAGA,SAAO,UAAM,mCAAa;AAAA,IACzB,SAAS;AAAA,IACT;AAAA,IACA,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACD,CAAC;AACF;","names":["assert"]}
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/types.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { IExecuteFunctions } from 'n8n-workflow';\n\nimport type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n/**\n * Interface for describing a message template in the UI\n */\nexport interface MessageTemplate {\n\ttype: string;\n\tmessage: string;\n\tmessageType: 'text' | 'imageBinary' | 'imageUrl';\n\tbinaryImageDataKey?: string;\n\timageUrl?: string;\n\timageDetail?: 'auto' | 'low' | 'high';\n}\n\n/**\n * Parameters for prompt creation\n */\nexport interface PromptParams {\n\tcontext: IExecuteFunctions;\n\titemIndex: number;\n\tllm: BaseLanguageModel | BaseChatModel;\n\tmessages?: MessageTemplate[];\n\tformatInstructions?: string;\n\tquery?: string;\n}\n\n/**\n * Parameters for chain execution\n */\nexport interface ChainExecutionParams {\n\tcontext: IExecuteFunctions;\n\titemIndex: number;\n\tquery: string;\n\tllm: BaseLanguageModel;\n\toutputParser?: N8nOutputParser;\n\tmessages?: MessageTemplate[];\n}\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]}
1
+ {"version":3,"sources":["../../../../../nodes/chains/ChainLLM/methods/types.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { IExecuteFunctions } from 'n8n-workflow';\n\nimport type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n/**\n * Interface for describing a message template in the UI\n */\nexport interface MessageTemplate {\n\ttype: string;\n\tmessage: string;\n\tmessageType: 'text' | 'imageBinary' | 'imageUrl';\n\tbinaryImageDataKey?: string;\n\timageUrl?: string;\n\timageDetail?: 'auto' | 'low' | 'high';\n}\n\n/**\n * Parameters for prompt creation\n */\nexport interface PromptParams {\n\tcontext: IExecuteFunctions;\n\titemIndex: number;\n\tllm: BaseLanguageModel | BaseChatModel;\n\tmessages?: MessageTemplate[];\n\tformatInstructions?: string;\n\tquery?: string;\n}\n\n/**\n * Parameters for chain execution\n */\nexport interface ChainExecutionParams {\n\tcontext: IExecuteFunctions;\n\titemIndex: number;\n\tquery: string;\n\tllm: BaseLanguageModel;\n\toutputParser?: N8nOutputParser;\n\tmessages?: MessageTemplate[];\n\tfallbackLlm?: BaseLanguageModel | null;\n}\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]}
@@ -128,7 +128,7 @@ class LmChatGoogleGemini {
128
128
  property: "model"
129
129
  }
130
130
  },
131
- default: "models/gemini-1.0-pro"
131
+ default: "models/gemini-2.5-flash"
132
132
  },
133
133
  import_additional_options.additionalOptions
134
134
  ]
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\nimport type { SafetySetting } from '@google/generative-ai';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tNodeError,\n\tINodeType,\n\tINodeTypeDescription,\n\tISupplyDataFunctions,\n\tSupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nfunction errorDescriptionMapper(error: NodeError) {\n\tif (error.description?.includes('properties: should be non-empty for OBJECT type')) {\n\t\treturn 'Google Gemini requires at least one <a href=\"https://docs.n8n.io/advanced-ai/examples/using-the-fromai-function/\" target=\"_blank\">dynamic parameter</a> when using tools';\n\t}\n\n\treturn error.description ?? 'Unknown error';\n}\nexport class LmChatGoogleGemini implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Gemini Chat Model',\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased\n\t\tname: 'lmChatGoogleGemini',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Gemini',\n\t\tdefaults: {\n\t\t\tname: 'Google Gemini Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node\n\t\tinputs: [],\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googlePalmApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials.host }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/v1beta/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'models',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: \"={{ !$responseItem.name.includes('embedding') }}\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tdescription: '={{$responseItem.description}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'models/gemini-1.0-pro',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googlePalmApi');\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 1024,\n\t\t\ttemperature: 0.7,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\tconst model = new ChatGoogleGenerativeAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tbaseUrl: credentials.host as string,\n\t\t\tmodel: modelName,\n\t\t\ttopK: options.topK,\n\t\t\ttopP: options.topP,\n\t\t\ttemperature: options.temperature,\n\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\tsafetySettings,\n\t\t\tcallbacks: [new N8nLlmTracing(this, { errorDescriptionMapper })],\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,0BAAuC;AACvC,0BAAoC;AASpC,0BAA6C;AAE7C,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAE9B,SAAS,uBAAuB,OAAkB;AACjD,MAAI,MAAM,aAAa,SAAS,iDAAiD,GAAG;AACnF,WAAO;AAAA,EACR;AAEA,SAAO,MAAM,eAAe;AAC7B;AACO,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA;AAAA,MAEA,QAAQ,CAAC;AAAA;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,wBACP,aAAa;AAAA,sBACd;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,eAAe;AAE7D,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,2CAAuB;AAAA,MACxC,QAAQ,YAAY;AAAA,MACpB,SAAS,YAAY;AAAA,MACrB,OAAO;AAAA,MACP,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,aAAa,QAAQ;AAAA,MACrB,iBAAiB,QAAQ;AAAA,MACzB;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,MAAM,EAAE,uBAAuB,CAAC,CAAC;AAAA,MAC/D,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\nimport type { SafetySetting } from '@google/generative-ai';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tNodeError,\n\tINodeType,\n\tINodeTypeDescription,\n\tISupplyDataFunctions,\n\tSupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nfunction errorDescriptionMapper(error: NodeError) {\n\tif (error.description?.includes('properties: should be non-empty for OBJECT type')) {\n\t\treturn 'Google Gemini requires at least one <a href=\"https://docs.n8n.io/advanced-ai/examples/using-the-fromai-function/\" target=\"_blank\">dynamic parameter</a> when using tools';\n\t}\n\n\treturn error.description ?? 'Unknown error';\n}\nexport class LmChatGoogleGemini implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Gemini Chat Model',\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased\n\t\tname: 'lmChatGoogleGemini',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Gemini',\n\t\tdefaults: {\n\t\t\tname: 'Google Gemini Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node\n\t\tinputs: [],\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googlePalmApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials.host }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/v1beta/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'models',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: \"={{ !$responseItem.name.includes('embedding') }}\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tdescription: '={{$responseItem.description}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'models/gemini-2.5-flash',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googlePalmApi');\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 1024,\n\t\t\ttemperature: 0.7,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\tconst model = new ChatGoogleGenerativeAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tbaseUrl: credentials.host as string,\n\t\t\tmodel: modelName,\n\t\t\ttopK: options.topK,\n\t\t\ttopP: options.topP,\n\t\t\ttemperature: options.temperature,\n\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\tsafetySettings,\n\t\t\tcallbacks: [new N8nLlmTracing(this, { errorDescriptionMapper })],\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,0BAAuC;AACvC,0BAAoC;AASpC,0BAA6C;AAE7C,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAE9B,SAAS,uBAAuB,OAAkB;AACjD,MAAI,MAAM,aAAa,SAAS,iDAAiD,GAAG;AACnF,WAAO;AAAA,EACR;AAEA,SAAO,MAAM,eAAe;AAC7B;AACO,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA;AAAA,MAEA,QAAQ,CAAC;AAAA;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,wBACP,aAAa;AAAA,sBACd;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,eAAe;AAE7D,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,2CAAuB;AAAA,MACxC,QAAQ,YAAY;AAAA,MACpB,SAAS,YAAY;AAAA,MACrB,OAAO;AAAA,MACP,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,aAAa,QAAQ;AAAA,MACrB,iBAAiB,QAAQ;AAAA,MACzB;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,MAAM,EAAE,uBAAuB,CAAC,CAAC;AAAA,MAC/D,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
@@ -43,7 +43,7 @@ var import_base2 = require("@langchain/core/language_models/base");
43
43
  var import_pick = __toESM(require("lodash/pick"));
44
44
  var import_n8n_workflow = require("n8n-workflow");
45
45
  var import_helpers = require("../../utils/helpers");
46
- var import_tiktoken = require("../../utils/tokenizer/tiktoken");
46
+ var import_token_estimator = require("../../utils/tokenizer/token-estimator");
47
47
  var _parentRunIndex;
48
48
  const TIKTOKEN_ESTIMATE_MODEL = "gpt-4o";
49
49
  class N8nLlmTracing extends import_base.BaseCallbackHandler {
@@ -86,11 +86,7 @@ class N8nLlmTracing extends import_base.BaseCallbackHandler {
86
86
  }
87
87
  async estimateTokensFromStringList(list) {
88
88
  const embeddingModel = (0, import_base2.getModelNameForTiktoken)(TIKTOKEN_ESTIMATE_MODEL);
89
- const encoder = await (0, import_tiktoken.encodingForModel)(embeddingModel);
90
- const encodedListLength = await Promise.all(
91
- list.map(async (text) => encoder.encode(text).length)
92
- );
93
- return encodedListLength.reduce((acc, curr) => acc + curr, 0);
89
+ return await (0, import_token_estimator.estimateTokensFromStringList)(list, embeddingModel);
94
90
  }
95
91
  async handleLLMEnd(output, runId) {
96
92
  const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../nodes/llms/N8nLlmTracing.ts"],"sourcesContent":["import { BaseCallbackHandler } from '@langchain/core/callbacks/base';\nimport type { SerializedFields } from '@langchain/core/dist/load/map_keys';\nimport { getModelNameForTiktoken } from '@langchain/core/language_models/base';\nimport type {\n\tSerialized,\n\tSerializedNotImplemented,\n\tSerializedSecret,\n} from '@langchain/core/load/serializable';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { LLMResult } from '@langchain/core/outputs';\nimport pick from 'lodash/pick';\nimport type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';\nimport { NodeConnectionTypes, NodeError, NodeOperationError } from 'n8n-workflow';\n\nimport { logAiEvent } from '@utils/helpers';\nimport { encodingForModel } from '@utils/tokenizer/tiktoken';\n\ntype TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {\n\tcompletionTokens: number;\n\tpromptTokens: number;\n\ttotalTokens: number;\n};\n\ntype RunDetail = {\n\tindex: number;\n\tmessages: BaseMessage[] | string[] | string;\n\toptions: SerializedSecret | SerializedNotImplemented | SerializedFields;\n};\n\nconst TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';\nexport class N8nLlmTracing extends BaseCallbackHandler {\n\tname = 'N8nLlmTracing';\n\n\t// This flag makes sure that LangChain will wait for the handlers to finish before continuing\n\t// This is crucial for the handleLLMError handler to work correctly (it should be called before the error is propagated to the root node)\n\tawaitHandlers = true;\n\n\tconnectionType = NodeConnectionTypes.AiLanguageModel;\n\n\tpromptTokensEstimate = 0;\n\n\tcompletionTokensEstimate = 0;\n\n\t#parentRunIndex?: number;\n\n\t/**\n\t * A map to associate LLM run IDs to run details.\n\t * Key: Unique identifier for each LLM run (run ID)\n\t * Value: RunDetails object\n\t *\n\t */\n\trunsMap: Record<string, RunDetail> = {};\n\n\toptions = {\n\t\t// Default(OpenAI format) parser\n\t\ttokensUsageParser: (llmOutput: LLMResult['llmOutput']) => {\n\t\t\tconst completionTokens = (llmOutput?.tokenUsage?.completionTokens as number) ?? 0;\n\t\t\tconst promptTokens = (llmOutput?.tokenUsage?.promptTokens as number) ?? 0;\n\n\t\t\treturn {\n\t\t\t\tcompletionTokens,\n\t\t\t\tpromptTokens,\n\t\t\t\ttotalTokens: completionTokens + promptTokens,\n\t\t\t};\n\t\t},\n\t\terrorDescriptionMapper: (error: NodeError) => error.description,\n\t};\n\n\tconstructor(\n\t\tprivate executionFunctions: ISupplyDataFunctions,\n\t\toptions?: {\n\t\t\ttokensUsageParser?: TokensUsageParser;\n\t\t\terrorDescriptionMapper?: (error: NodeError) => string;\n\t\t},\n\t) {\n\t\tsuper();\n\t\tthis.options = { ...this.options, ...options };\n\t}\n\n\tasync estimateTokensFromGeneration(generations: LLMResult['generations']) {\n\t\tconst messages = generations.flatMap((gen) => gen.map((g) => g.text));\n\t\treturn await this.estimateTokensFromStringList(messages);\n\t}\n\n\tasync estimateTokensFromStringList(list: string[]) {\n\t\tconst embeddingModel = getModelNameForTiktoken(TIKTOKEN_ESTIMATE_MODEL);\n\t\tconst encoder = await encodingForModel(embeddingModel);\n\n\t\tconst encodedListLength = await Promise.all(\n\t\t\tlist.map(async (text) => encoder.encode(text).length),\n\t\t);\n\n\t\treturn encodedListLength.reduce((acc, curr) => acc + curr, 0);\n\t}\n\n\tasync handleLLMEnd(output: LLMResult, runId: string) {\n\t\t// The fallback should never happen since handleLLMStart should always set the run details\n\t\t// but just in case, we set the index to the length of the runsMap\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\toutput.generations = output.generations.map((gen) =>\n\t\t\tgen.map((g) => pick(g, ['text', 'generationInfo'])),\n\t\t);\n\n\t\tconst tokenUsageEstimate = {\n\t\t\tcompletionTokens: 0,\n\t\t\tpromptTokens: 0,\n\t\t\ttotalTokens: 0,\n\t\t};\n\t\tconst tokenUsage = this.options.tokensUsageParser(output.llmOutput);\n\n\t\tif (output.generations.length > 0) {\n\t\t\ttokenUsageEstimate.completionTokens = await this.estimateTokensFromGeneration(\n\t\t\t\toutput.generations,\n\t\t\t);\n\n\t\t\ttokenUsageEstimate.promptTokens = this.promptTokensEstimate;\n\t\t\ttokenUsageEstimate.totalTokens =\n\t\t\t\ttokenUsageEstimate.completionTokens + this.promptTokensEstimate;\n\t\t}\n\t\tconst response: {\n\t\t\tresponse: { generations: LLMResult['generations'] };\n\t\t\ttokenUsageEstimate?: typeof tokenUsageEstimate;\n\t\t\ttokenUsage?: typeof tokenUsage;\n\t\t} = {\n\t\t\tresponse: { generations: output.generations },\n\t\t};\n\n\t\t// If the LLM response contains actual tokens usage, otherwise fallback to the estimate\n\t\tif (tokenUsage.completionTokens > 0) {\n\t\t\tresponse.tokenUsage = tokenUsage;\n\t\t} else {\n\t\t\tresponse.tokenUsageEstimate = tokenUsageEstimate;\n\t\t}\n\n\t\tconst parsedMessages =\n\t\t\ttypeof runDetails.messages === 'string'\n\t\t\t\t? runDetails.messages\n\t\t\t\t: runDetails.messages.map((message) => {\n\t\t\t\t\t\tif (typeof message === 'string') return message;\n\t\t\t\t\t\tif (typeof message?.toJSON === 'function') return message.toJSON();\n\n\t\t\t\t\t\treturn message;\n\t\t\t\t\t});\n\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined ? this.#parentRunIndex + runDetails.index : undefined;\n\n\t\tthis.executionFunctions.addOutputData(\n\t\t\tthis.connectionType,\n\t\t\trunDetails.index,\n\t\t\t[[{ json: { ...response } }]],\n\t\t\tundefined,\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-generated-output', {\n\t\t\tmessages: parsedMessages,\n\t\t\toptions: runDetails.options,\n\t\t\tresponse,\n\t\t});\n\t}\n\n\tasync handleLLMStart(llm: Serialized, prompts: string[], runId: string) {\n\t\tconst estimatedTokens = await this.estimateTokensFromStringList(prompts);\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined\n\t\t\t\t? this.#parentRunIndex + this.executionFunctions.getNextRunIndex()\n\t\t\t\t: undefined;\n\n\t\tconst options = llm.type === 'constructor' ? llm.kwargs : llm;\n\t\tconst { index } = this.executionFunctions.addInputData(\n\t\t\tthis.connectionType,\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\tjson: {\n\t\t\t\t\t\t\tmessages: prompts,\n\t\t\t\t\t\t\testimatedTokens,\n\t\t\t\t\t\t\toptions,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t],\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\t// Save the run details for later use when processing `handleLLMEnd` event\n\t\tthis.runsMap[runId] = {\n\t\t\tindex,\n\t\t\toptions,\n\t\t\tmessages: prompts,\n\t\t};\n\t\tthis.promptTokensEstimate = estimatedTokens;\n\t}\n\n\tasync handleLLMError(\n\t\terror: IDataObject | Error,\n\t\trunId: string,\n\t\tparentRunId?: string | undefined,\n\t) {\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\t// Filter out non-x- headers to avoid leaking sensitive information in logs\n\t\tif (typeof error === 'object' && error?.hasOwnProperty('headers')) {\n\t\t\tconst errorWithHeaders = error as { headers: Record<string, unknown> };\n\n\t\t\tObject.keys(errorWithHeaders.headers).forEach((key) => {\n\t\t\t\tif (!key.startsWith('x-')) {\n\t\t\t\t\tdelete errorWithHeaders.headers[key];\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\n\t\tif (error instanceof NodeError) {\n\t\t\tif (this.options.errorDescriptionMapper) {\n\t\t\t\terror.description = this.options.errorDescriptionMapper(error);\n\t\t\t}\n\n\t\t\tthis.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);\n\t\t} else {\n\t\t\t// If the error is not a NodeError, we wrap it in a NodeOperationError\n\t\t\tthis.executionFunctions.addOutputData(\n\t\t\t\tthis.connectionType,\n\t\t\t\trunDetails.index,\n\t\t\t\tnew NodeOperationError(this.executionFunctions.getNode(), error as JsonObject, {\n\t\t\t\t\tfunctionality: 'configuration-node',\n\t\t\t\t}),\n\t\t\t);\n\t\t}\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-errored', {\n\t\t\terror: Object.keys(error).length === 0 ? error.toString() : error,\n\t\t\trunId,\n\t\t\tparentRunId,\n\t\t});\n\t}\n\n\t// Used to associate subsequent runs with the correct parent run in subnodes of subnodes\n\tsetParentRunIndex(runIndex: number) {\n\t\tthis.#parentRunIndex = runIndex;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAAoC;AAEpC,IAAAA,eAAwC;AAQxC,kBAAiB;AAEjB,0BAAmE;AAEnE,qBAA2B;AAC3B,sBAAiC;AAfjC;AA6BA,MAAM,0BAA0B;AACzB,MAAM,sBAAsB,gCAAoB;AAAA,EAsCtD,YACS,oBACR,SAIC;AACD,UAAM;AANE;AAtCT,gBAAO;AAIP;AAAA;AAAA,yBAAgB;AAEhB,0BAAiB,wCAAoB;AAErC,gCAAuB;AAEvB,oCAA2B;AAE3B;AAQA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,mBAAqC,CAAC;AAEtC,mBAAU;AAAA;AAAA,MAET,mBAAmB,CAAC,cAAsC;AACzD,cAAM,mBAAoB,WAAW,YAAY,oBAA+B;AAChF,cAAM,eAAgB,WAAW,YAAY,gBAA2B;AAExE,eAAO;AAAA,UACN;AAAA,UACA;AAAA,UACA,aAAa,mBAAmB;AAAA,QACjC;AAAA,MACD;AAAA,MACA,wBAAwB,CAAC,UAAqB,MAAM;AAAA,IACrD;AAUC,SAAK,UAAU,EAAE,GAAG,KAAK,SAAS,GAAG,QAAQ;AAAA,EAC9C;AAAA,EAEA,MAAM,6BAA6B,aAAuC;AACzE,UAAM,WAAW,YAAY,QAAQ,CAAC,QAAQ,IAAI,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC;AACpE,WAAO,MAAM,KAAK,6BAA6B,QAAQ;AAAA,EACxD;AAAA,EAEA,MAAM,6BAA6B,MAAgB;AAClD,UAAM,qBAAiB,sCAAwB,uBAAuB;AACtE,UAAM,UAAU,UAAM,kCAAiB,cAAc;AAErD,UAAM,oBAAoB,MAAM,QAAQ;AAAA,MACvC,KAAK,IAAI,OAAO,SAAS,QAAQ,OAAO,IAAI,EAAE,MAAM;AAAA,IACrD;AAEA,WAAO,kBAAkB,OAAO,CAAC,KAAK,SAAS,MAAM,MAAM,CAAC;AAAA,EAC7D;AAAA,EAEA,MAAM,aAAa,QAAmB,OAAe;AAGpD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAEpF,WAAO,cAAc,OAAO,YAAY;AAAA,MAAI,CAAC,QAC5C,IAAI,IAAI,CAAC,UAAM,YAAAC,SAAK,GAAG,CAAC,QAAQ,gBAAgB,CAAC,CAAC;AAAA,IACnD;AAEA,UAAM,qBAAqB;AAAA,MAC1B,kBAAkB;AAAA,MAClB,cAAc;AAAA,MACd,aAAa;AAAA,IACd;AACA,UAAM,aAAa,KAAK,QAAQ,kBAAkB,OAAO,SAAS;AAElE,QAAI,OAAO,YAAY,SAAS,GAAG;AAClC,yBAAmB,mBAAmB,MAAM,KAAK;AAAA,QAChD,OAAO;AAAA,MACR;AAEA,yBAAmB,eAAe,KAAK;AACvC,yBAAmB,cAClB,mBAAmB,mBAAmB,KAAK;AAAA,IAC7C;AACA,UAAM,WAIF;AAAA,MACH,UAAU,EAAE,aAAa,OAAO,YAAY;AAAA,IAC7C;AAGA,QAAI,WAAW,mBAAmB,GAAG;AACpC,eAAS,aAAa;AAAA,IACvB,OAAO;AACN,eAAS,qBAAqB;AAAA,IAC/B;AAEA,UAAM,iBACL,OAAO,WAAW,aAAa,WAC5B,WAAW,WACX,WAAW,SAAS,IAAI,CAAC,YAAY;AACrC,UAAI,OAAO,YAAY,SAAU,QAAO;AACxC,UAAI,OAAO,SAAS,WAAW,WAAY,QAAO,QAAQ,OAAO;AAEjE,aAAO;AAAA,IACR,CAAC;AAEJ,UAAM,qBACL,mBAAK,qBAAoB,SAAY,mBAAK,mBAAkB,WAAW,QAAQ;AAEhF,SAAK,mBAAmB;AAAA,MACvB,KAAK;AAAA,MACL,WAAW;AAAA,MACX,CAAC,CAAC,EAAE,MAAM,EAAE,GAAG,SAAS,EAAE,CAAC,CAAC;AAAA,MAC5B;AAAA,MACA;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,2BAA2B;AAAA,MAC9D,UAAU;AAAA,MACV,SAAS,WAAW;AAAA,MACpB;AAAA,IACD,CAAC;AAAA,EACF;AAAA,EAEA,MAAM,eAAe,KAAiB,SAAmB,OAAe;AACvE,UAAM,kBAAkB,MAAM,KAAK,6BAA6B,OAAO;AACvE,UAAM,qBACL,mBAAK,qBAAoB,SACtB,mBAAK,mBAAkB,KAAK,mBAAmB,gBAAgB,IAC/D;AAEJ,UAAM,UAAU,IAAI,SAAS,gBAAgB,IAAI,SAAS;AAC1D,UAAM,EAAE,MAAM,IAAI,KAAK,mBAAmB;AAAA,MACzC,KAAK;AAAA,MACL;AAAA,QACC;AAAA,UACC;AAAA,YACC,MAAM;AAAA,cACL,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YACD;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA;AAAA,IACD;AAGA,SAAK,QAAQ,KAAK,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA,UAAU;AAAA,IACX;AACA,SAAK,uBAAuB;AAAA,EAC7B;AAAA,EAEA,MAAM,eACL,OACA,OACA,aACC;AACD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAGpF,QAAI,OAAO,UAAU,YAAY,OAAO,eAAe,SAAS,GAAG;AAClE,YAAM,mBAAmB;AAEzB,aAAO,KAAK,iBAAiB,OAAO,EAAE,QAAQ,CAAC,QAAQ;AACtD,YAAI,CAAC,IAAI,WAAW,IAAI,GAAG;AAC1B,iBAAO,iBAAiB,QAAQ,GAAG;AAAA,QACpC;AAAA,MACD,CAAC;AAAA,IACF;AAEA,QAAI,iBAAiB,+BAAW;AAC/B,UAAI,KAAK,QAAQ,wBAAwB;AACxC,cAAM,cAAc,KAAK,QAAQ,uBAAuB,KAAK;AAAA,MAC9D;AAEA,WAAK,mBAAmB,cAAc,KAAK,gBAAgB,WAAW,OAAO,KAAK;AAAA,IACnF,OAAO;AAEN,WAAK,mBAAmB;AAAA,QACvB,KAAK;AAAA,QACL,WAAW;AAAA,QACX,IAAI,uCAAmB,KAAK,mBAAmB,QAAQ,GAAG,OAAqB;AAAA,UAC9E,eAAe;AAAA,QAChB,CAAC;AAAA,MACF;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,kBAAkB;AAAA,MACrD,OAAO,OAAO,KAAK,KAAK,EAAE,WAAW,IAAI,MAAM,SAAS,IAAI;AAAA,MAC5D;AAAA,MACA;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA,EAGA,kBAAkB,UAAkB;AACnC,uBAAK,iBAAkB;AAAA,EACxB;AACD;AAvMC;","names":["import_base","pick"]}
1
+ {"version":3,"sources":["../../../nodes/llms/N8nLlmTracing.ts"],"sourcesContent":["import { BaseCallbackHandler } from '@langchain/core/callbacks/base';\nimport type { SerializedFields } from '@langchain/core/dist/load/map_keys';\nimport { getModelNameForTiktoken } from '@langchain/core/language_models/base';\nimport type {\n\tSerialized,\n\tSerializedNotImplemented,\n\tSerializedSecret,\n} from '@langchain/core/load/serializable';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { LLMResult } from '@langchain/core/outputs';\nimport pick from 'lodash/pick';\nimport type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';\nimport { NodeConnectionTypes, NodeError, NodeOperationError } from 'n8n-workflow';\n\nimport { logAiEvent } from '@utils/helpers';\nimport { estimateTokensFromStringList } from '@utils/tokenizer/token-estimator';\n\ntype TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {\n\tcompletionTokens: number;\n\tpromptTokens: number;\n\ttotalTokens: number;\n};\n\ntype RunDetail = {\n\tindex: number;\n\tmessages: BaseMessage[] | string[] | string;\n\toptions: SerializedSecret | SerializedNotImplemented | SerializedFields;\n};\n\nconst TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';\nexport class N8nLlmTracing extends BaseCallbackHandler {\n\tname = 'N8nLlmTracing';\n\n\t// This flag makes sure that LangChain will wait for the handlers to finish before continuing\n\t// This is crucial for the handleLLMError handler to work correctly (it should be called before the error is propagated to the root node)\n\tawaitHandlers = true;\n\n\tconnectionType = NodeConnectionTypes.AiLanguageModel;\n\n\tpromptTokensEstimate = 0;\n\n\tcompletionTokensEstimate = 0;\n\n\t#parentRunIndex?: number;\n\n\t/**\n\t * A map to associate LLM run IDs to run details.\n\t * Key: Unique identifier for each LLM run (run ID)\n\t * Value: RunDetails object\n\t *\n\t */\n\trunsMap: Record<string, RunDetail> = {};\n\n\toptions = {\n\t\t// Default(OpenAI format) parser\n\t\ttokensUsageParser: (llmOutput: LLMResult['llmOutput']) => {\n\t\t\tconst completionTokens = (llmOutput?.tokenUsage?.completionTokens as number) ?? 0;\n\t\t\tconst promptTokens = (llmOutput?.tokenUsage?.promptTokens as number) ?? 0;\n\n\t\t\treturn {\n\t\t\t\tcompletionTokens,\n\t\t\t\tpromptTokens,\n\t\t\t\ttotalTokens: completionTokens + promptTokens,\n\t\t\t};\n\t\t},\n\t\terrorDescriptionMapper: (error: NodeError) => error.description,\n\t};\n\n\tconstructor(\n\t\tprivate executionFunctions: ISupplyDataFunctions,\n\t\toptions?: {\n\t\t\ttokensUsageParser?: TokensUsageParser;\n\t\t\terrorDescriptionMapper?: (error: NodeError) => string;\n\t\t},\n\t) {\n\t\tsuper();\n\t\tthis.options = { ...this.options, ...options };\n\t}\n\n\tasync estimateTokensFromGeneration(generations: LLMResult['generations']) {\n\t\tconst messages = generations.flatMap((gen) => gen.map((g) => g.text));\n\t\treturn await this.estimateTokensFromStringList(messages);\n\t}\n\n\tasync estimateTokensFromStringList(list: string[]) {\n\t\tconst embeddingModel = getModelNameForTiktoken(TIKTOKEN_ESTIMATE_MODEL);\n\t\treturn await estimateTokensFromStringList(list, embeddingModel);\n\t}\n\n\tasync handleLLMEnd(output: LLMResult, runId: string) {\n\t\t// The fallback should never happen since handleLLMStart should always set the run details\n\t\t// but just in case, we set the index to the length of the runsMap\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\toutput.generations = output.generations.map((gen) =>\n\t\t\tgen.map((g) => pick(g, ['text', 'generationInfo'])),\n\t\t);\n\n\t\tconst tokenUsageEstimate = {\n\t\t\tcompletionTokens: 0,\n\t\t\tpromptTokens: 0,\n\t\t\ttotalTokens: 0,\n\t\t};\n\t\tconst tokenUsage = this.options.tokensUsageParser(output.llmOutput);\n\n\t\tif (output.generations.length > 0) {\n\t\t\ttokenUsageEstimate.completionTokens = await this.estimateTokensFromGeneration(\n\t\t\t\toutput.generations,\n\t\t\t);\n\n\t\t\ttokenUsageEstimate.promptTokens = this.promptTokensEstimate;\n\t\t\ttokenUsageEstimate.totalTokens =\n\t\t\t\ttokenUsageEstimate.completionTokens + this.promptTokensEstimate;\n\t\t}\n\t\tconst response: {\n\t\t\tresponse: { generations: LLMResult['generations'] };\n\t\t\ttokenUsageEstimate?: typeof tokenUsageEstimate;\n\t\t\ttokenUsage?: typeof tokenUsage;\n\t\t} = {\n\t\t\tresponse: { generations: output.generations },\n\t\t};\n\n\t\t// If the LLM response contains actual tokens usage, otherwise fallback to the estimate\n\t\tif (tokenUsage.completionTokens > 0) {\n\t\t\tresponse.tokenUsage = tokenUsage;\n\t\t} else {\n\t\t\tresponse.tokenUsageEstimate = tokenUsageEstimate;\n\t\t}\n\n\t\tconst parsedMessages =\n\t\t\ttypeof runDetails.messages === 'string'\n\t\t\t\t? runDetails.messages\n\t\t\t\t: runDetails.messages.map((message) => {\n\t\t\t\t\t\tif (typeof message === 'string') return message;\n\t\t\t\t\t\tif (typeof message?.toJSON === 'function') return message.toJSON();\n\n\t\t\t\t\t\treturn message;\n\t\t\t\t\t});\n\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined ? this.#parentRunIndex + runDetails.index : undefined;\n\n\t\tthis.executionFunctions.addOutputData(\n\t\t\tthis.connectionType,\n\t\t\trunDetails.index,\n\t\t\t[[{ json: { ...response } }]],\n\t\t\tundefined,\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-generated-output', {\n\t\t\tmessages: parsedMessages,\n\t\t\toptions: runDetails.options,\n\t\t\tresponse,\n\t\t});\n\t}\n\n\tasync handleLLMStart(llm: Serialized, prompts: string[], runId: string) {\n\t\tconst estimatedTokens = await this.estimateTokensFromStringList(prompts);\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined\n\t\t\t\t? this.#parentRunIndex + this.executionFunctions.getNextRunIndex()\n\t\t\t\t: undefined;\n\n\t\tconst options = llm.type === 'constructor' ? llm.kwargs : llm;\n\t\tconst { index } = this.executionFunctions.addInputData(\n\t\t\tthis.connectionType,\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\tjson: {\n\t\t\t\t\t\t\tmessages: prompts,\n\t\t\t\t\t\t\testimatedTokens,\n\t\t\t\t\t\t\toptions,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t],\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\t// Save the run details for later use when processing `handleLLMEnd` event\n\t\tthis.runsMap[runId] = {\n\t\t\tindex,\n\t\t\toptions,\n\t\t\tmessages: prompts,\n\t\t};\n\t\tthis.promptTokensEstimate = estimatedTokens;\n\t}\n\n\tasync handleLLMError(\n\t\terror: IDataObject | Error,\n\t\trunId: string,\n\t\tparentRunId?: string | undefined,\n\t) {\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\t// Filter out non-x- headers to avoid leaking sensitive information in logs\n\t\tif (typeof error === 'object' && error?.hasOwnProperty('headers')) {\n\t\t\tconst errorWithHeaders = error as { headers: Record<string, unknown> };\n\n\t\t\tObject.keys(errorWithHeaders.headers).forEach((key) => {\n\t\t\t\tif (!key.startsWith('x-')) {\n\t\t\t\t\tdelete errorWithHeaders.headers[key];\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\n\t\tif (error instanceof NodeError) {\n\t\t\tif (this.options.errorDescriptionMapper) {\n\t\t\t\terror.description = this.options.errorDescriptionMapper(error);\n\t\t\t}\n\n\t\t\tthis.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);\n\t\t} else {\n\t\t\t// If the error is not a NodeError, we wrap it in a NodeOperationError\n\t\t\tthis.executionFunctions.addOutputData(\n\t\t\t\tthis.connectionType,\n\t\t\t\trunDetails.index,\n\t\t\t\tnew NodeOperationError(this.executionFunctions.getNode(), error as JsonObject, {\n\t\t\t\t\tfunctionality: 'configuration-node',\n\t\t\t\t}),\n\t\t\t);\n\t\t}\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-errored', {\n\t\t\terror: Object.keys(error).length === 0 ? error.toString() : error,\n\t\t\trunId,\n\t\t\tparentRunId,\n\t\t});\n\t}\n\n\t// Used to associate subsequent runs with the correct parent run in subnodes of subnodes\n\tsetParentRunIndex(runIndex: number) {\n\t\tthis.#parentRunIndex = runIndex;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAAoC;AAEpC,IAAAA,eAAwC;AAQxC,kBAAiB;AAEjB,0BAAmE;AAEnE,qBAA2B;AAC3B,6BAA6C;AAf7C;AA6BA,MAAM,0BAA0B;AACzB,MAAM,sBAAsB,gCAAoB;AAAA,EAsCtD,YACS,oBACR,SAIC;AACD,UAAM;AANE;AAtCT,gBAAO;AAIP;AAAA;AAAA,yBAAgB;AAEhB,0BAAiB,wCAAoB;AAErC,gCAAuB;AAEvB,oCAA2B;AAE3B;AAQA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,mBAAqC,CAAC;AAEtC,mBAAU;AAAA;AAAA,MAET,mBAAmB,CAAC,cAAsC;AACzD,cAAM,mBAAoB,WAAW,YAAY,oBAA+B;AAChF,cAAM,eAAgB,WAAW,YAAY,gBAA2B;AAExE,eAAO;AAAA,UACN;AAAA,UACA;AAAA,UACA,aAAa,mBAAmB;AAAA,QACjC;AAAA,MACD;AAAA,MACA,wBAAwB,CAAC,UAAqB,MAAM;AAAA,IACrD;AAUC,SAAK,UAAU,EAAE,GAAG,KAAK,SAAS,GAAG,QAAQ;AAAA,EAC9C;AAAA,EAEA,MAAM,6BAA6B,aAAuC;AACzE,UAAM,WAAW,YAAY,QAAQ,CAAC,QAAQ,IAAI,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC;AACpE,WAAO,MAAM,KAAK,6BAA6B,QAAQ;AAAA,EACxD;AAAA,EAEA,MAAM,6BAA6B,MAAgB;AAClD,UAAM,qBAAiB,sCAAwB,uBAAuB;AACtE,WAAO,UAAM,qDAA6B,MAAM,cAAc;AAAA,EAC/D;AAAA,EAEA,MAAM,aAAa,QAAmB,OAAe;AAGpD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAEpF,WAAO,cAAc,OAAO,YAAY;AAAA,MAAI,CAAC,QAC5C,IAAI,IAAI,CAAC,UAAM,YAAAC,SAAK,GAAG,CAAC,QAAQ,gBAAgB,CAAC,CAAC;AAAA,IACnD;AAEA,UAAM,qBAAqB;AAAA,MAC1B,kBAAkB;AAAA,MAClB,cAAc;AAAA,MACd,aAAa;AAAA,IACd;AACA,UAAM,aAAa,KAAK,QAAQ,kBAAkB,OAAO,SAAS;AAElE,QAAI,OAAO,YAAY,SAAS,GAAG;AAClC,yBAAmB,mBAAmB,MAAM,KAAK;AAAA,QAChD,OAAO;AAAA,MACR;AAEA,yBAAmB,eAAe,KAAK;AACvC,yBAAmB,cAClB,mBAAmB,mBAAmB,KAAK;AAAA,IAC7C;AACA,UAAM,WAIF;AAAA,MACH,UAAU,EAAE,aAAa,OAAO,YAAY;AAAA,IAC7C;AAGA,QAAI,WAAW,mBAAmB,GAAG;AACpC,eAAS,aAAa;AAAA,IACvB,OAAO;AACN,eAAS,qBAAqB;AAAA,IAC/B;AAEA,UAAM,iBACL,OAAO,WAAW,aAAa,WAC5B,WAAW,WACX,WAAW,SAAS,IAAI,CAAC,YAAY;AACrC,UAAI,OAAO,YAAY,SAAU,QAAO;AACxC,UAAI,OAAO,SAAS,WAAW,WAAY,QAAO,QAAQ,OAAO;AAEjE,aAAO;AAAA,IACR,CAAC;AAEJ,UAAM,qBACL,mBAAK,qBAAoB,SAAY,mBAAK,mBAAkB,WAAW,QAAQ;AAEhF,SAAK,mBAAmB;AAAA,MACvB,KAAK;AAAA,MACL,WAAW;AAAA,MACX,CAAC,CAAC,EAAE,MAAM,EAAE,GAAG,SAAS,EAAE,CAAC,CAAC;AAAA,MAC5B;AAAA,MACA;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,2BAA2B;AAAA,MAC9D,UAAU;AAAA,MACV,SAAS,WAAW;AAAA,MACpB;AAAA,IACD,CAAC;AAAA,EACF;AAAA,EAEA,MAAM,eAAe,KAAiB,SAAmB,OAAe;AACvE,UAAM,kBAAkB,MAAM,KAAK,6BAA6B,OAAO;AACvE,UAAM,qBACL,mBAAK,qBAAoB,SACtB,mBAAK,mBAAkB,KAAK,mBAAmB,gBAAgB,IAC/D;AAEJ,UAAM,UAAU,IAAI,SAAS,gBAAgB,IAAI,SAAS;AAC1D,UAAM,EAAE,MAAM,IAAI,KAAK,mBAAmB;AAAA,MACzC,KAAK;AAAA,MACL;AAAA,QACC;AAAA,UACC;AAAA,YACC,MAAM;AAAA,cACL,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YACD;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA;AAAA,IACD;AAGA,SAAK,QAAQ,KAAK,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA,UAAU;AAAA,IACX;AACA,SAAK,uBAAuB;AAAA,EAC7B;AAAA,EAEA,MAAM,eACL,OACA,OACA,aACC;AACD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAGpF,QAAI,OAAO,UAAU,YAAY,OAAO,eAAe,SAAS,GAAG;AAClE,YAAM,mBAAmB;AAEzB,aAAO,KAAK,iBAAiB,OAAO,EAAE,QAAQ,CAAC,QAAQ;AACtD,YAAI,CAAC,IAAI,WAAW,IAAI,GAAG;AAC1B,iBAAO,iBAAiB,QAAQ,GAAG;AAAA,QACpC;AAAA,MACD,CAAC;AAAA,IACF;AAEA,QAAI,iBAAiB,+BAAW;AAC/B,UAAI,KAAK,QAAQ,wBAAwB;AACxC,cAAM,cAAc,KAAK,QAAQ,uBAAuB,KAAK;AAAA,MAC9D;AAEA,WAAK,mBAAmB,cAAc,KAAK,gBAAgB,WAAW,OAAO,KAAK;AAAA,IACnF,OAAO;AAEN,WAAK,mBAAmB;AAAA,QACvB,KAAK;AAAA,QACL,WAAW;AAAA,QACX,IAAI,uCAAmB,KAAK,mBAAmB,QAAQ,GAAG,OAAqB;AAAA,UAC9E,eAAe;AAAA,QAChB,CAAC;AAAA,MACF;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,kBAAkB;AAAA,MACrD,OAAO,OAAO,KAAK,KAAK,EAAE,WAAW,IAAI,MAAM,SAAS,IAAI;AAAA,MAC5D;AAAA,MACA;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA,EAGA,kBAAkB,UAAkB;AACnC,uBAAK,iBAAkB;AAAA,EACxB;AACD;AAjMC;","names":["import_base","pick"]}
@@ -22,7 +22,9 @@ __export(TokenTextSplitter_exports, {
22
22
  });
23
23
  module.exports = __toCommonJS(TokenTextSplitter_exports);
24
24
  var import_textsplitters = require("@langchain/textsplitters");
25
+ var import_helpers = require("../../../utils/helpers");
25
26
  var import_tiktoken = require("../../../utils/tokenizer/tiktoken");
27
+ var import_token_estimator = require("../../../utils/tokenizer/token-estimator");
26
28
  class TokenTextSplitter extends import_textsplitters.TextSplitter {
27
29
  static lc_name() {
28
30
  return "TokenTextSplitter";
@@ -34,22 +36,49 @@ class TokenTextSplitter extends import_textsplitters.TextSplitter {
34
36
  this.disallowedSpecial = fields?.disallowedSpecial ?? "all";
35
37
  }
36
38
  async splitText(text) {
37
- if (!this.tokenizer) {
38
- this.tokenizer = await (0, import_tiktoken.getEncoding)(this.encodingName);
39
- }
40
- const splits = [];
41
- const input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);
42
- let start_idx = 0;
43
- while (start_idx < input_ids.length) {
44
- if (start_idx > 0) {
45
- start_idx -= this.chunkOverlap;
39
+ try {
40
+ if (!text || typeof text !== "string") {
41
+ return [];
42
+ }
43
+ if ((0, import_helpers.hasLongSequentialRepeat)(text)) {
44
+ const splits = (0, import_token_estimator.estimateTextSplitsByTokens)(
45
+ text,
46
+ this.chunkSize,
47
+ this.chunkOverlap,
48
+ this.encodingName
49
+ );
50
+ return splits;
51
+ }
52
+ try {
53
+ if (!this.tokenizer) {
54
+ this.tokenizer = await (0, import_tiktoken.getEncoding)(this.encodingName);
55
+ }
56
+ const splits = [];
57
+ const input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);
58
+ let start_idx = 0;
59
+ let chunkCount = 0;
60
+ while (start_idx < input_ids.length) {
61
+ if (start_idx > 0) {
62
+ start_idx = Math.max(0, start_idx - this.chunkOverlap);
63
+ }
64
+ const end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);
65
+ const chunk_ids = input_ids.slice(start_idx, end_idx);
66
+ splits.push(this.tokenizer.decode(chunk_ids));
67
+ chunkCount++;
68
+ start_idx = end_idx;
69
+ }
70
+ return splits;
71
+ } catch (tiktokenError) {
72
+ return (0, import_token_estimator.estimateTextSplitsByTokens)(
73
+ text,
74
+ this.chunkSize,
75
+ this.chunkOverlap,
76
+ this.encodingName
77
+ );
46
78
  }
47
- const end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);
48
- const chunk_ids = input_ids.slice(start_idx, end_idx);
49
- splits.push(this.tokenizer.decode(chunk_ids));
50
- start_idx = end_idx;
79
+ } catch (error) {
80
+ return [];
51
81
  }
52
- return splits;
53
82
  }
54
83
  }
55
84
  // Annotate the CommonJS export names for ESM import in node:
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\nimport type { TokenTextSplitterParams } from '@langchain/textsplitters';\nimport { TextSplitter } from '@langchain/textsplitters';\nimport type * as tiktoken from 'js-tiktoken';\n\nimport { getEncoding } from '@utils/tokenizer/tiktoken';\n\n/**\n * Implementation of splitter which looks at tokens.\n * This is override of the LangChain TokenTextSplitter\n * to use the n8n tokenizer utility which uses local JSON encodings\n */\nexport class TokenTextSplitter extends TextSplitter implements TokenTextSplitterParams {\n\tstatic lc_name() {\n\t\treturn 'TokenTextSplitter';\n\t}\n\n\tencodingName: tiktoken.TiktokenEncoding;\n\n\tallowedSpecial: 'all' | string[];\n\n\tdisallowedSpecial: 'all' | string[];\n\n\tprivate tokenizer: tiktoken.Tiktoken | undefined;\n\n\tconstructor(fields?: Partial<TokenTextSplitterParams>) {\n\t\tsuper(fields);\n\n\t\tthis.encodingName = fields?.encodingName ?? 'cl100k_base';\n\t\tthis.allowedSpecial = fields?.allowedSpecial ?? [];\n\t\tthis.disallowedSpecial = fields?.disallowedSpecial ?? 'all';\n\t}\n\n\tasync splitText(text: string): Promise<string[]> {\n\t\tif (!this.tokenizer) {\n\t\t\tthis.tokenizer = await getEncoding(this.encodingName);\n\t\t}\n\n\t\tconst splits: string[] = [];\n\n\t\tconst input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);\n\n\t\tlet start_idx = 0;\n\n\t\twhile (start_idx < input_ids.length) {\n\t\t\tif (start_idx > 0) {\n\t\t\t\tstart_idx -= this.chunkOverlap;\n\t\t\t}\n\t\t\tconst end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);\n\t\t\tconst chunk_ids = input_ids.slice(start_idx, end_idx);\n\t\t\tsplits.push(this.tokenizer.decode(chunk_ids));\n\t\t\tstart_idx = end_idx;\n\t\t}\n\n\t\treturn splits;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,2BAA6B;AAG7B,sBAA4B;AAOrB,MAAM,0BAA0B,kCAAgD;AAAA,EACtF,OAAO,UAAU;AAChB,WAAO;AAAA,EACR;AAAA,EAUA,YAAY,QAA2C;AACtD,UAAM,MAAM;AAEZ,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,iBAAiB,QAAQ,kBAAkB,CAAC;AACjD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EACvD;AAAA,EAEA,MAAM,UAAU,MAAiC;AAChD,QAAI,CAAC,KAAK,WAAW;AACpB,WAAK,YAAY,UAAM,6BAAY,KAAK,YAAY;AAAA,IACrD;AAEA,UAAM,SAAmB,CAAC;AAE1B,UAAM,YAAY,KAAK,UAAU,OAAO,MAAM,KAAK,gBAAgB,KAAK,iBAAiB;AAEzF,QAAI,YAAY;AAEhB,WAAO,YAAY,UAAU,QAAQ;AACpC,UAAI,YAAY,GAAG;AAClB,qBAAa,KAAK;AAAA,MACnB;AACA,YAAM,UAAU,KAAK,IAAI,YAAY,KAAK,WAAW,UAAU,MAAM;AACrE,YAAM,YAAY,UAAU,MAAM,WAAW,OAAO;AACpD,aAAO,KAAK,KAAK,UAAU,OAAO,SAAS,CAAC;AAC5C,kBAAY;AAAA,IACb;AAEA,WAAO;AAAA,EACR;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\nimport type { TokenTextSplitterParams } from '@langchain/textsplitters';\nimport { TextSplitter } from '@langchain/textsplitters';\nimport type * as tiktoken from 'js-tiktoken';\n\nimport { hasLongSequentialRepeat } from '@utils/helpers';\nimport { getEncoding } from '@utils/tokenizer/tiktoken';\nimport { estimateTextSplitsByTokens } from '@utils/tokenizer/token-estimator';\n\n/**\n * Implementation of splitter which looks at tokens.\n * This is override of the LangChain TokenTextSplitter\n * to use the n8n tokenizer utility which uses local JSON encodings\n */\nexport class TokenTextSplitter extends TextSplitter implements TokenTextSplitterParams {\n\tstatic lc_name() {\n\t\treturn 'TokenTextSplitter';\n\t}\n\n\tencodingName: tiktoken.TiktokenEncoding;\n\n\tallowedSpecial: 'all' | string[];\n\n\tdisallowedSpecial: 'all' | string[];\n\n\tprivate tokenizer: tiktoken.Tiktoken | undefined;\n\n\tconstructor(fields?: Partial<TokenTextSplitterParams>) {\n\t\tsuper(fields);\n\n\t\tthis.encodingName = fields?.encodingName ?? 'cl100k_base';\n\t\tthis.allowedSpecial = fields?.allowedSpecial ?? [];\n\t\tthis.disallowedSpecial = fields?.disallowedSpecial ?? 'all';\n\t}\n\n\tasync splitText(text: string): Promise<string[]> {\n\t\ttry {\n\t\t\t// Validate input\n\t\t\tif (!text || typeof text !== 'string') {\n\t\t\t\treturn [];\n\t\t\t}\n\n\t\t\t// Check for repetitive content\n\t\t\tif (hasLongSequentialRepeat(text)) {\n\t\t\t\tconst splits = estimateTextSplitsByTokens(\n\t\t\t\t\ttext,\n\t\t\t\t\tthis.chunkSize,\n\t\t\t\t\tthis.chunkOverlap,\n\t\t\t\t\tthis.encodingName,\n\t\t\t\t);\n\t\t\t\treturn splits;\n\t\t\t}\n\n\t\t\t// Use tiktoken for normal text\n\t\t\ttry {\n\t\t\t\tif (!this.tokenizer) {\n\t\t\t\t\tthis.tokenizer = await getEncoding(this.encodingName);\n\t\t\t\t}\n\n\t\t\t\tconst splits: string[] = [];\n\t\t\t\tconst input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);\n\n\t\t\t\tlet start_idx = 0;\n\t\t\t\tlet chunkCount = 0;\n\n\t\t\t\twhile (start_idx < input_ids.length) {\n\t\t\t\t\tif (start_idx > 0) {\n\t\t\t\t\t\tstart_idx = Math.max(0, start_idx - this.chunkOverlap);\n\t\t\t\t\t}\n\t\t\t\t\tconst end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);\n\t\t\t\t\tconst chunk_ids = input_ids.slice(start_idx, end_idx);\n\n\t\t\t\t\tsplits.push(this.tokenizer.decode(chunk_ids));\n\n\t\t\t\t\tchunkCount++;\n\t\t\t\t\tstart_idx = end_idx;\n\t\t\t\t}\n\n\t\t\t\treturn splits;\n\t\t\t} catch (tiktokenError) {\n\t\t\t\t// Fall back to character-based splitting if tiktoken fails\n\t\t\t\treturn estimateTextSplitsByTokens(\n\t\t\t\t\ttext,\n\t\t\t\t\tthis.chunkSize,\n\t\t\t\t\tthis.chunkOverlap,\n\t\t\t\t\tthis.encodingName,\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Return empty array on complete failure\n\t\t\treturn [];\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,2BAA6B;AAG7B,qBAAwC;AACxC,sBAA4B;AAC5B,6BAA2C;AAOpC,MAAM,0BAA0B,kCAAgD;AAAA,EACtF,OAAO,UAAU;AAChB,WAAO;AAAA,EACR;AAAA,EAUA,YAAY,QAA2C;AACtD,UAAM,MAAM;AAEZ,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,iBAAiB,QAAQ,kBAAkB,CAAC;AACjD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EACvD;AAAA,EAEA,MAAM,UAAU,MAAiC;AAChD,QAAI;AAEH,UAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACtC,eAAO,CAAC;AAAA,MACT;AAGA,cAAI,wCAAwB,IAAI,GAAG;AAClC,cAAM,aAAS;AAAA,UACd;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,QACN;AACA,eAAO;AAAA,MACR;AAGA,UAAI;AACH,YAAI,CAAC,KAAK,WAAW;AACpB,eAAK,YAAY,UAAM,6BAAY,KAAK,YAAY;AAAA,QACrD;AAEA,cAAM,SAAmB,CAAC;AAC1B,cAAM,YAAY,KAAK,UAAU,OAAO,MAAM,KAAK,gBAAgB,KAAK,iBAAiB;AAEzF,YAAI,YAAY;AAChB,YAAI,aAAa;AAEjB,eAAO,YAAY,UAAU,QAAQ;AACpC,cAAI,YAAY,GAAG;AAClB,wBAAY,KAAK,IAAI,GAAG,YAAY,KAAK,YAAY;AAAA,UACtD;AACA,gBAAM,UAAU,KAAK,IAAI,YAAY,KAAK,WAAW,UAAU,MAAM;AACrE,gBAAM,YAAY,UAAU,MAAM,WAAW,OAAO;AAEpD,iBAAO,KAAK,KAAK,UAAU,OAAO,SAAS,CAAC;AAE5C;AACA,sBAAY;AAAA,QACb;AAEA,eAAO;AAAA,MACR,SAAS,eAAe;AAEvB,mBAAO;AAAA,UACN;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,QACN;AAAA,MACD;AAAA,IACD,SAAS,OAAO;AAEf,aAAO,CAAC;AAAA,IACT;AAAA,EACD;AACD;","names":[]}
@@ -56,53 +56,86 @@ class WorkflowToolService {
56
56
  description,
57
57
  itemIndex
58
58
  }) {
59
+ const node = ctx.getNode();
59
60
  let runIndex = ctx.getNextRunIndex();
60
61
  const toolHandler = async (query, runManager) => {
61
- const localRunIndex = runIndex++;
62
- const context = this.baseContext.cloneWith({
63
- runIndex: localRunIndex,
64
- inputData: [[{ json: { query } }]]
65
- });
66
- try {
67
- const response = await this.runFunction(context, query, itemIndex, runManager);
68
- const processedResponse = this.handleToolResponse(response);
69
- let responseData;
70
- if (isNodeExecutionData(response)) {
71
- responseData = response;
72
- } else {
73
- const reParsedData = (0, import_n8n_workflow.jsonParse)(processedResponse, {
74
- fallbackValue: { response: processedResponse }
75
- });
76
- responseData = [{ json: reParsedData }];
62
+ let maxTries = 1;
63
+ if (node.retryOnFail === true) {
64
+ maxTries = Math.min(5, Math.max(2, node.maxTries ?? 3));
65
+ }
66
+ let waitBetweenTries = 0;
67
+ if (node.retryOnFail === true) {
68
+ waitBetweenTries = Math.min(5e3, Math.max(0, node.waitBetweenTries ?? 1e3));
69
+ }
70
+ let lastError;
71
+ for (let tryIndex = 0; tryIndex < maxTries; tryIndex++) {
72
+ const localRunIndex = runIndex++;
73
+ const context = this.baseContext.cloneWith({
74
+ runIndex: localRunIndex,
75
+ inputData: [[{ json: { query } }]]
76
+ });
77
+ const abortSignal = context.getExecutionCancelSignal?.();
78
+ if (abortSignal?.aborted) {
79
+ return 'There was an error: "Execution was cancelled"';
77
80
  }
78
- let metadata;
79
- if (this.subExecutionId && this.subWorkflowId) {
80
- metadata = {
81
- subExecution: {
82
- executionId: this.subExecutionId,
83
- workflowId: this.subWorkflowId
81
+ if (tryIndex !== 0) {
82
+ lastError = void 0;
83
+ if (waitBetweenTries !== 0) {
84
+ try {
85
+ await (0, import_n8n_workflow.sleepWithAbort)(waitBetweenTries, abortSignal);
86
+ } catch (abortError) {
87
+ return 'There was an error: "Execution was cancelled"';
84
88
  }
85
- };
89
+ }
90
+ }
91
+ try {
92
+ const response = await this.runFunction(context, query, itemIndex, runManager);
93
+ const processedResponse = this.handleToolResponse(response);
94
+ let responseData;
95
+ if (isNodeExecutionData(response)) {
96
+ responseData = response;
97
+ } else {
98
+ const reParsedData = (0, import_n8n_workflow.jsonParse)(processedResponse, {
99
+ fallbackValue: { response: processedResponse }
100
+ });
101
+ responseData = [{ json: reParsedData }];
102
+ }
103
+ let metadata;
104
+ if (this.subExecutionId && this.subWorkflowId) {
105
+ metadata = {
106
+ subExecution: {
107
+ executionId: this.subExecutionId,
108
+ workflowId: this.subWorkflowId
109
+ }
110
+ };
111
+ }
112
+ void context.addOutputData(
113
+ import_n8n_workflow.NodeConnectionTypes.AiTool,
114
+ localRunIndex,
115
+ [responseData],
116
+ metadata
117
+ );
118
+ return processedResponse;
119
+ } catch (error) {
120
+ if (abortSignal?.aborted) {
121
+ return 'There was an error: "Execution was cancelled"';
122
+ }
123
+ const executionError = error;
124
+ lastError = executionError;
125
+ const errorResponse = `There was an error: "${executionError.message}"`;
126
+ const metadata = (0, import_n8n_workflow.parseErrorMetadata)(error);
127
+ void context.addOutputData(
128
+ import_n8n_workflow.NodeConnectionTypes.AiTool,
129
+ localRunIndex,
130
+ executionError,
131
+ metadata
132
+ );
133
+ if (tryIndex === maxTries - 1) {
134
+ return errorResponse;
135
+ }
86
136
  }
87
- void context.addOutputData(
88
- import_n8n_workflow.NodeConnectionTypes.AiTool,
89
- localRunIndex,
90
- [responseData],
91
- metadata
92
- );
93
- return processedResponse;
94
- } catch (error) {
95
- const executionError = error;
96
- const errorResponse = `There was an error: "${executionError.message}"`;
97
- const metadata = (0, import_n8n_workflow.parseErrorMetadata)(error);
98
- void context.addOutputData(
99
- import_n8n_workflow.NodeConnectionTypes.AiTool,
100
- localRunIndex,
101
- executionError,
102
- metadata
103
- );
104
- return errorResponse;
105
137
  }
138
+ return `There was an error: ${lastError?.message ?? "Unknown error"}`;
106
139
  };
107
140
  return this.useSchema ? await this.createStructuredTool(name, description, toolHandler) : new import_tools.DynamicTool({ name, description, func: toolHandler });
108
141
  }