@n8n/n8n-nodes-langchain 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/dist/credentials/McpOAuth2Api.credentials.js +1 -2
  2. package/dist/credentials/McpOAuth2Api.credentials.js.map +1 -1
  3. package/dist/nodes/agents/Agent/Agent.node.js +4 -2
  4. package/dist/nodes/agents/Agent/Agent.node.js.map +1 -1
  5. package/dist/nodes/agents/Agent/AgentTool.node.js +4 -2
  6. package/dist/nodes/agents/Agent/AgentTool.node.js.map +1 -1
  7. package/dist/nodes/agents/Agent/V1/AgentV1.node.js +1 -1
  8. package/dist/nodes/agents/Agent/V1/AgentV1.node.js.map +1 -1
  9. package/dist/nodes/agents/Agent/V2/AgentV2.node.js +1 -1
  10. package/dist/nodes/agents/Agent/V2/AgentV2.node.js.map +1 -1
  11. package/dist/nodes/agents/Agent/V3/AgentToolV3.node.js +99 -0
  12. package/dist/nodes/agents/Agent/V3/AgentToolV3.node.js.map +1 -0
  13. package/dist/nodes/agents/Agent/V3/AgentV3.node.js +9 -2
  14. package/dist/nodes/agents/Agent/V3/AgentV3.node.js.map +1 -1
  15. package/dist/nodes/agents/Agent/agents/SqlAgent/description.js +1 -1
  16. package/dist/nodes/agents/Agent/agents/SqlAgent/description.js.map +1 -1
  17. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js +1 -3
  18. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js.map +1 -1
  19. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/executeBatch.js +4 -0
  20. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/executeBatch.js.map +1 -1
  21. package/dist/nodes/chains/ChainLLM/ChainLlm.node.js +1 -1
  22. package/dist/nodes/chains/ChainLLM/ChainLlm.node.js.map +1 -1
  23. package/dist/nodes/chains/ChainLLM/methods/config.js +10 -2
  24. package/dist/nodes/chains/ChainLLM/methods/config.js.map +1 -1
  25. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js +11 -3
  26. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js.map +1 -1
  27. package/dist/nodes/tools/ToolSerpApi/ToolSerpApi.node.js +9 -3
  28. package/dist/nodes/tools/ToolSerpApi/ToolSerpApi.node.js.map +1 -1
  29. package/dist/nodes/tools/ToolWorkflow/v2/utils/WorkflowToolService.js +3 -0
  30. package/dist/nodes/tools/ToolWorkflow/v2/utils/WorkflowToolService.js.map +1 -1
  31. package/dist/nodes/trigger/ChatTrigger/ChatTrigger.node.js +66 -15
  32. package/dist/nodes/trigger/ChatTrigger/ChatTrigger.node.js.map +1 -1
  33. package/dist/nodes/trigger/ChatTrigger/templates.js +1 -2
  34. package/dist/nodes/trigger/ChatTrigger/templates.js.map +1 -1
  35. package/dist/nodes/vendors/GoogleGemini/actions/file/index.js +2 -2
  36. package/dist/nodes/vendors/GoogleGemini/actions/file/index.js.map +1 -1
  37. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/createStore.operation.js +64 -0
  38. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/createStore.operation.js.map +1 -0
  39. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/deleteStore.operation.js +72 -0
  40. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/deleteStore.operation.js.map +1 -0
  41. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/index.js +94 -0
  42. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/index.js.map +1 -0
  43. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/listStores.operation.js +75 -0
  44. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/listStores.operation.js.map +1 -0
  45. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/uploadToStore.operation.js +129 -0
  46. package/dist/nodes/vendors/GoogleGemini/actions/fileSearch/uploadToStore.operation.js.map +1 -0
  47. package/dist/nodes/vendors/GoogleGemini/actions/node.type.js.map +1 -1
  48. package/dist/nodes/vendors/GoogleGemini/actions/router.js +4 -0
  49. package/dist/nodes/vendors/GoogleGemini/actions/router.js.map +1 -1
  50. package/dist/nodes/vendors/GoogleGemini/actions/text/message.operation.js +184 -11
  51. package/dist/nodes/vendors/GoogleGemini/actions/text/message.operation.js.map +1 -1
  52. package/dist/nodes/vendors/GoogleGemini/actions/versionDescription.js +10 -3
  53. package/dist/nodes/vendors/GoogleGemini/actions/versionDescription.js.map +1 -1
  54. package/dist/nodes/vendors/GoogleGemini/helpers/interfaces.js.map +1 -1
  55. package/dist/nodes/vendors/GoogleGemini/helpers/utils.js +131 -26
  56. package/dist/nodes/vendors/GoogleGemini/helpers/utils.js.map +1 -1
  57. package/dist/nodes/vendors/OpenAi/v1/actions/assistant/message.operation.js +1 -1
  58. package/dist/nodes/vendors/OpenAi/v1/actions/assistant/message.operation.js.map +1 -1
  59. package/dist/types/credentials.json +1 -1
  60. package/dist/types/nodes.json +9 -8
  61. package/dist/utils/agent-execution/buildSteps.js +67 -8
  62. package/dist/utils/agent-execution/buildSteps.js.map +1 -1
  63. package/dist/utils/agent-execution/createEngineRequests.js +27 -5
  64. package/dist/utils/agent-execution/createEngineRequests.js.map +1 -1
  65. package/dist/utils/agent-execution/types.js +24 -0
  66. package/dist/utils/agent-execution/types.js.map +1 -1
  67. package/dist/utils/descriptions.js +21 -1
  68. package/dist/utils/descriptions.js.map +1 -1
  69. package/package.json +8 -8
@@ -1,18 +1,19 @@
1
1
  [
2
2
  {"displayName":"Anthropic","name":"anthropic","group":["transform"],"version":1,"subtitle":"={{ $parameter[\"operation\"] + \": \" + $parameter[\"resource\"] }}","description":"Interact with Anthropic AI models","defaults":{"name":"Anthropic"},"usableAsTool":true,"codex":{"alias":["LangChain","document","image","assistant","claude"],"categories":["AI"],"subcategories":{"AI":["Agents","Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.anthropic/"}]}},"inputs":"={{\n\t\t(() => {\n\t\t\tconst resource = $parameter.resource;\n\t \tconst operation = $parameter.operation;\n\t\t\tif (resource === 'text' && operation === 'message') {\n\t\t\t\treturn [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];\n\t\t\t}\n\n\t\t\treturn ['main'];\n\t\t})()\n\t}}","outputs":["main"],"credentials":[{"name":"anthropicApi","required":true}],"properties":[{"displayName":"Resource","name":"resource","type":"options","noDataExpression":true,"options":[{"name":"Document","value":"document"},{"name":"File","value":"file"},{"name":"Image","value":"image"},{"name":"Prompt","value":"prompt"},{"name":"Text","value":"text"}],"default":"text"},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Document","value":"analyze","action":"Analyze document","description":"Take in documents and answer questions about them"}],"default":"analyze","displayOptions":{"show":{"resource":["document"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. claude-sonnet-4-5-20250929"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this document?","default":"What's in this document?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Document URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"URL(s)","name":"documentUrls","type":"string","placeholder":"e.g. https://example.com/document.pdf","description":"URL(s) of the document(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["document"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the document(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["document"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed image description","name":"maxTokens","type":"number","default":1024,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Upload File","value":"upload","action":"Upload a file","description":"Upload a file to the Anthropic API for later use"},{"name":"Get File Metadata","value":"get","action":"Get file metadata","description":"Get metadata for a file from the Anthropic API"},{"name":"List Files","value":"list","action":"List files","description":"List files from the Anthropic API"},{"name":"Delete File","value":"deleteFile","action":"Delete a file","description":"Delete a file from the Anthropic API"}],"default":"upload","displayOptions":{"show":{"resource":["file"]}}},{"displayName":"File ID","name":"fileId","type":"string","placeholder":"e.g. file_123","description":"ID of the file to delete","default":"","displayOptions":{"show":{"operation":["deleteFile"],"resource":["file"]}}},{"displayName":"File ID","name":"fileId","type":"string","placeholder":"e.g. file_123","description":"ID of the file to get metadata for","default":"","displayOptions":{"show":{"operation":["get"],"resource":["file"]}}},{"displayName":"Return All","name":"returnAll","type":"boolean","default":false,"description":"Whether to return all results or only up to a given limit","displayOptions":{"show":{"operation":["list"],"resource":["file"]}}},{"displayName":"Limit","name":"limit","type":"number","typeOptions":{"minValue":1,"maxValue":1000},"default":50,"description":"Max number of results to return","displayOptions":{"show":{"returnAll":[false],"operation":["list"],"resource":["file"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"File URL","value":"url"},{"name":"Binary File","value":"binary"}],"displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"URL","name":"fileUrl","type":"string","placeholder":"e.g. https://example.com/file.pdf","description":"URL of the file to upload","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["upload"],"resource":["file"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field which contains the file","displayOptions":{"show":{"inputType":["binary"],"operation":["upload"],"resource":["file"]}}},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"File Name","name":"fileName","type":"string","description":"The file name to use for the uploaded file","default":""}],"displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Image","value":"analyze","action":"Analyze image","description":"Take in images and answer questions about them"}],"default":"analyze","displayOptions":{"show":{"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. claude-sonnet-4-5-20250929"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this image?","default":"What's in this image?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Image URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"URL(s)","name":"imageUrls","type":"string","placeholder":"e.g. https://example.com/image.png","description":"URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the image(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed image description","name":"maxTokens","type":"number","default":1024,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Generate Prompt","value":"generate","action":"Generate a prompt","description":"Generate a prompt for a model"},{"name":"Improve Prompt","value":"improve","action":"Improve a prompt","description":"Improve a prompt for a model"},{"name":"Templatize Prompt","value":"templatize","action":"Templatize a prompt","description":"Templatize a prompt for a model"}],"default":"generate","displayOptions":{"show":{"resource":["prompt"]}}},{"displayName":"The <a href=\"https://docs.anthropic.com/en/api/prompt-tools-generate\">prompt tools APIs</a> are in a closed research preview. Your organization must request access to use them.","name":"experimentalNotice","type":"notice","default":"","displayOptions":{"show":{"resource":["prompt"]}}},{"displayName":"Task","name":"task","type":"string","description":"Description of the prompt's purpose","placeholder":"e.g. A chef for a meal prep planning service","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["prompt"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["generate"],"resource":["prompt"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"description":"Messages that constitute the prompt to be improved","placeholder":"Add Message","default":{"values":[{"content":"","role":"user"}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be sent","default":"","placeholder":"e.g. Concise instructions for a meal prep service","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Assistant","value":"assistant","description":"Tell the model to adopt a specific tone or personality"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["improve"],"resource":["prompt"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["improve"],"resource":["prompt"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"System Message","name":"system","type":"string","description":"The existing system prompt to incorporate, if any","default":"","placeholder":"e.g. You are a professional meal prep chef"},{"displayName":"Feedback","name":"feedback","type":"string","description":"Feedback for improving the prompt","default":"","placeholder":"e.g. Make it more detailed and include cooking times"}],"displayOptions":{"show":{"operation":["improve"],"resource":["prompt"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"description":"Messages that constitute the prompt to be templatized","placeholder":"Add Message","default":{"values":[{"content":"","role":"user"}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be sent","default":"","placeholder":"e.g. Translate hello to German","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Assistant","value":"assistant","description":"Tell the model to adopt a specific tone or personality"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["templatize"],"resource":["prompt"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["templatize"],"resource":["prompt"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"System Message","name":"system","type":"string","description":"The existing system prompt to templatize","default":"","placeholder":"e.g. You are a professional English to German translator"}],"displayOptions":{"show":{"operation":["templatize"],"resource":["prompt"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Message a Model","value":"message","action":"Message a model","description":"Create a completion with Anthropic model"}],"default":"message","displayOptions":{"show":{"resource":["text"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. claude-sonnet-4-5-20250929"}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"content":"","role":"user"}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be sent","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Assistant","value":"assistant","description":"Tell the model to adopt a specific tone or personality"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Add Attachments","name":"addAttachments","type":"boolean","default":false,"description":"Whether to add attachments to the message","displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Attachments Input Type","name":"attachmentsInputType","type":"options","default":"url","description":"The type of input to use for the attachments","options":[{"name":"URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"addAttachments":[true],"operation":["message"],"resource":["text"]}}},{"displayName":"Attachment URL(s)","name":"attachmentsUrls","type":"string","default":"","placeholder":"e.g. https://example.com/image.png","description":"URL(s) of the file(s) to attach, multiple URLs can be added separated by comma","displayOptions":{"show":{"addAttachments":[true],"attachmentsInputType":["url"],"operation":["message"],"resource":["text"]}}},{"displayName":"Attachment Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","description":"Name of the binary field(s) which contains the file(s) to attach, multiple field names can be added separated by comma","displayOptions":{"show":{"addAttachments":[true],"attachmentsInputType":["binary"],"operation":["message"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Include Merged Response","name":"includeMergedResponse","type":"boolean","default":false,"description":"Whether to include a single output string merging all text parts of the response"},{"displayName":"System Message","name":"system","type":"string","default":"","placeholder":"e.g. You are a helpful assistant"},{"displayName":"Code Execution","name":"codeExecution","type":"boolean","default":false,"description":"Whether to enable code execution. Not supported by all models."},{"displayName":"Web Search","name":"webSearch","type":"boolean","default":false,"description":"Whether to enable web search"},{"displayName":"Web Search Max Uses","name":"maxUses","type":"number","default":5,"description":"The maximum number of web search uses per request","typeOptions":{"minValue":0,"numberPrecision":0}},{"displayName":"Web Search Allowed Domains","name":"allowedDomains","type":"string","default":"","description":"Comma-separated list of domains to search. Only domains in this list will be searched. Conflicts with \"Web Search Blocked Domains\".","placeholder":"e.g. google.com, wikipedia.org"},{"displayName":"Web Search Blocked Domains","name":"blockedDomains","type":"string","default":"","description":"Comma-separated list of domains to block from search. Conflicts with \"Web Search Allowed Domains\".","placeholder":"e.g. google.com, wikipedia.org"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":1024,"description":"The maximum number of tokens to generate in the completion","type":"number","typeOptions":{"minValue":1,"numberPrecision":0}},{"displayName":"Output Randomness (Temperature)","name":"temperature","default":1,"description":"Controls the randomness of the output. Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive","type":"number","typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}},{"displayName":"Output Randomness (Top P)","name":"topP","default":0.7,"description":"The maximum cumulative probability of tokens to consider when sampling","type":"number","typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}},{"displayName":"Output Randomness (Top K)","name":"topK","default":5,"description":"The maximum number of tokens to consider when sampling","type":"number","typeOptions":{"minValue":0,"numberPrecision":0}},{"displayName":"Max Tool Calls Iterations","name":"maxToolsIterations","type":"number","default":15,"description":"The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit","typeOptions":{"minValue":0,"numberPrecision":0}}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/Anthropic/anthropic.svg"},
3
- {"displayName":"Google Gemini","name":"googleGemini","group":["transform"],"version":1,"subtitle":"={{ $parameter[\"operation\"] + \": \" + $parameter[\"resource\"] }}","description":"Interact with Google Gemini AI models","defaults":{"name":"Google Gemini"},"usableAsTool":true,"codex":{"alias":["LangChain","video","document","audio","transcribe","assistant"],"categories":["AI"],"subcategories":{"AI":["Agents","Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.googlegemini/"}]}},"inputs":"={{\n\t\t(() => {\n\t\t\tconst resource = $parameter.resource;\n\t \tconst operation = $parameter.operation;\n\t\t\tif (resource === 'text' && operation === 'message') {\n\t\t\t\treturn [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];\n\t\t\t}\n\n\t\t\treturn ['main'];\n\t\t})()\n\t}}","outputs":["main"],"credentials":[{"name":"googlePalmApi","required":true}],"properties":[{"displayName":"Resource","name":"resource","type":"options","noDataExpression":true,"options":[{"name":"Audio","value":"audio"},{"name":"Document","value":"document"},{"name":"File","value":"file"},{"name":"Image","value":"image"},{"name":"Text","value":"text"},{"name":"Video","value":"video"}],"default":"text"},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Audio","value":"analyze","action":"Analyze audio","description":"Take in audio and answer questions about it"},{"name":"Transcribe a Recording","value":"transcribe","action":"Transcribe a recording","description":"Transcribes audio into the text"}],"default":"transcribe","displayOptions":{"show":{"resource":["audio"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"audioModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this audio?","default":"What's in this audio?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Audio URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"URL(s)","name":"audioUrls","type":"string","placeholder":"e.g. https://example.com/audio.mp3","description":"URL(s) of the audio(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed audio description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"audioModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Audio URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"URL(s)","name":"audioUrls","type":"string","placeholder":"e.g. https://example.com/audio.mp3","description":"URL(s) of the audio(s) to transcribe, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"options":[{"displayName":"Start Time","name":"startTime","type":"string","default":"","description":"The start time of the audio in MM:SS or HH:MM:SS format","placeholder":"e.g. 00:15"},{"displayName":"End Time","name":"endTime","type":"string","default":"","description":"The end time of the audio in MM:SS or HH:MM:SS format","placeholder":"e.g. 02:15"}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Document","value":"analyze","action":"Analyze document","description":"Take in documents and answer questions about them"}],"default":"analyze","displayOptions":{"show":{"resource":["document"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this document?","default":"What's in this document?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Document URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"URL(s)","name":"documentUrls","type":"string","placeholder":"e.g. https://example.com/document.pdf","description":"URL(s) of the document(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["document"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the document(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["document"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed document description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Upload File","value":"upload","action":"Upload a file","description":"Upload a file to the Google Gemini API for later use"}],"default":"upload","displayOptions":{"show":{"resource":["file"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"File URL","value":"url"},{"name":"Binary File","value":"binary"}],"displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"URL","name":"fileUrl","type":"string","placeholder":"e.g. https://example.com/file.pdf","description":"URL of the file to upload","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["upload"],"resource":["file"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the file","displayOptions":{"show":{"inputType":["binary"],"operation":["upload"],"resource":["file"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Image","value":"analyze","action":"Analyze an image","description":"Take in images and answer questions about them"},{"name":"Generate an Image","value":"generate","action":"Generate an image","description":"Creates an image from a text prompt"},{"name":"Edit Image","value":"edit","action":"Edit an image","description":"Upload one or more images and apply edits based on a prompt"}],"default":"generate","displayOptions":{"show":{"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this image?","default":"What's in this image?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Image URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"URL(s)","name":"imageUrls","type":"string","placeholder":"e.g. https://example.com/image.png","description":"URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the image(s), separate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed image description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"imageEditModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. combine the first image with the second image","description":"Instruction describing how to edit the image","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Images","name":"images","type":"fixedCollection","placeholder":"Add Image","typeOptions":{"multipleValues":true,"multipleValueButtonText":"Add Image"},"default":{"values":[{"binaryPropertyName":"data"}]},"description":"Add one or more binary fields to include images with your prompt","options":[{"displayName":"Image","name":"values","values":[{"displayName":"Binary Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","description":"The name of the binary field containing the image data"}]}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"edited","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"imageGenerationModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. A cute cat eating a dinosaur","description":"A text description of the desired image(s)","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Number of Images","name":"sampleCount","default":1,"description":"Number of images to generate. Not supported by Gemini models, supported by Imagen models.","type":"number","typeOptions":{"minValue":1}},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Message a Model","value":"message","action":"Message a model","description":"Create a completion with Google Gemini model"}],"default":"message","displayOptions":{"show":{"resource":["text"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"content":""}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be send","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Model","value":"model","description":"Tell the model to adopt a specific tone or personality"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Output Content as JSON","name":"jsonOutput","type":"boolean","description":"Whether to attempt to return the response in JSON format","default":false,"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"","placeholder":"e.g. You are a helpful assistant"},{"displayName":"Code Execution","name":"codeExecution","type":"boolean","default":false,"description":"Whether to allow the model to execute code it generates to produce a response. Supported only by certain models."},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number","typeOptions":{"minValue":-2,"maxValue":2,"numberPrecision":1}},{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":16,"description":"The maximum number of tokens to generate in the completion","type":"number","typeOptions":{"minValue":1,"numberPrecision":0}},{"displayName":"Number of Completions","name":"candidateCount","default":1,"description":"How many completions to generate for each prompt","type":"number","typeOptions":{"minValue":1,"maxValue":8,"numberPrecision":0}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number","typeOptions":{"minValue":-2,"maxValue":2,"numberPrecision":1}},{"displayName":"Output Randomness (Temperature)","name":"temperature","default":1,"description":"Controls the randomness of the output. Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive","type":"number","typeOptions":{"minValue":0,"maxValue":2,"numberPrecision":1}},{"displayName":"Output Randomness (Top P)","name":"topP","default":1,"description":"The maximum cumulative probability of tokens to consider when sampling","type":"number","typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}},{"displayName":"Output Randomness (Top K)","name":"topK","default":1,"description":"The maximum number of tokens to consider when sampling","type":"number","typeOptions":{"minValue":1,"numberPrecision":0}},{"displayName":"Thinking Budget","name":"thinkingBudget","type":"number","description":"Controls reasoning tokens for thinking models. Set to 0 to disable automatic thinking. Set to -1 for dynamic thinking. Leave empty for auto mode.","typeOptions":{"minValue":-1,"numberPrecision":0}},{"displayName":"Max Tool Calls Iterations","name":"maxToolsIterations","type":"number","default":15,"description":"The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit","typeOptions":{"minValue":0,"numberPrecision":0}}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Video","value":"analyze","action":"Analyze video","description":"Take in videos and answer questions about them"},{"name":"Generate a Video","value":"generate","action":"Generate a video","description":"Creates a video from a text prompt"},{"name":"Download Video","value":"download","action":"Download a video","description":"Download a generated video from the Google Gemini API using a URL"}],"default":"generate","displayOptions":{"show":{"resource":["video"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this video?","default":"What's in this video?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Video URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"URL(s)","name":"videoUrls","type":"string","placeholder":"e.g. https://example.com/video.mp4","description":"URL(s) of the video(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["video"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the video(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["video"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed video description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"URL","name":"url","type":"string","placeholder":"e.g. https://generativelanguage.googleapis.com/v1beta/files/abcdefg:download","description":"The URL from Google Gemini API to download the video from","default":"","displayOptions":{"show":{"operation":["download"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["download"],"resource":["video"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"videoGenerationModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. Panning wide shot of a calico kitten sleeping in the sunshine","description":"A text description of the desired video","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Return As","name":"returnAs","type":"options","options":[{"name":"Video","value":"video"},{"name":"URL","value":"url"}],"description":"Whether to return the video as a binary file or a URL that can be used to download the video later","default":"video","displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Number of Videos","name":"sampleCount","type":"number","default":1,"description":"How many videos to generate","typeOptions":{"minValue":1,"maxValue":4}},{"displayName":"Duration (Seconds)","name":"durationSeconds","type":"number","default":8,"description":"Length of the generated video in seconds. Supported only by certain models.","typeOptions":{"minValue":5,"maxValue":8}},{"displayName":"Aspect Ratio","name":"aspectRatio","type":"options","options":[{"name":"Widescreen (16:9)","value":"16:9","description":"Most common aspect ratio for televisions and monitors"},{"name":"Portrait (9:16)","value":"9:16","description":"Popular for short-form videos like YouTube Shorts"}],"default":"16:9"},{"displayName":"Person Generation","name":"personGeneration","type":"options","options":[{"name":"Don't Allow","value":"dont_allow","description":"Prevent generation of people in the video"},{"name":"Allow Adult","value":"allow_adult","description":"Allow generation of adult people in the video"},{"name":"Allow All","value":"allow_all","description":"Allow generation of all people in the video"}],"default":"dont_allow"},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/GoogleGemini/gemini.svg"},
3
+ {"displayName":"Google Gemini","name":"googleGemini","group":["transform"],"version":[1,1.1],"defaultVersion":1.1,"subtitle":"={{ $parameter[\"operation\"] + \": \" + $parameter[\"resource\"] }}","description":"Interact with Google Gemini AI models","defaults":{"name":"Google Gemini"},"usableAsTool":true,"codex":{"alias":["LangChain","video","document","audio","transcribe","assistant"],"categories":["AI"],"subcategories":{"AI":["Agents","Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.googlegemini/"}]}},"inputs":"={{\n\t\t(() => {\n\t\t\tconst resource = $parameter.resource;\n\t \tconst operation = $parameter.operation;\n\t\t\tif (resource === 'text' && operation === 'message') {\n\t\t\t\treturn [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];\n\t\t\t}\n\n\t\t\treturn ['main'];\n\t\t})()\n\t}}","outputs":["main"],"credentials":[{"name":"googlePalmApi","required":true}],"properties":[{"displayName":"Resource","name":"resource","type":"options","noDataExpression":true,"options":[{"name":"Audio","value":"audio"},{"name":"Document","value":"document"},{"name":"File Search","value":"fileSearch"},{"name":"Image","value":"image"},{"name":"Media File","value":"file"},{"name":"Text","value":"text"},{"name":"Video","value":"video"}],"default":"text"},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Audio","value":"analyze","action":"Analyze audio","description":"Take in audio and answer questions about it"},{"name":"Transcribe a Recording","value":"transcribe","action":"Transcribe a recording","description":"Transcribes audio into the text"}],"default":"transcribe","displayOptions":{"show":{"resource":["audio"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"audioModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this audio?","default":"What's in this audio?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Audio URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"URL(s)","name":"audioUrls","type":"string","placeholder":"e.g. https://example.com/audio.mp3","description":"URL(s) of the audio(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed audio description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["audio"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"audioModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Audio URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"URL(s)","name":"audioUrls","type":"string","placeholder":"e.g. https://example.com/audio.mp3","description":"URL(s) of the audio(s) to transcribe, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"options":[{"displayName":"Start Time","name":"startTime","type":"string","default":"","description":"The start time of the audio in MM:SS or HH:MM:SS format","placeholder":"e.g. 00:15"},{"displayName":"End Time","name":"endTime","type":"string","default":"","description":"The end time of the audio in MM:SS or HH:MM:SS format","placeholder":"e.g. 02:15"}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Document","value":"analyze","action":"Analyze document","description":"Take in documents and answer questions about them"}],"default":"analyze","displayOptions":{"show":{"resource":["document"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this document?","default":"What's in this document?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Document URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"URL(s)","name":"documentUrls","type":"string","placeholder":"e.g. https://example.com/document.pdf","description":"URL(s) of the document(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["document"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the document(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["document"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed document description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["document"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Upload Media File","value":"upload","action":"Upload a media file","description":"Upload a file to the Google Gemini API for later use"}],"default":"upload","displayOptions":{"show":{"resource":["file"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"File URL","value":"url"},{"name":"Binary File","value":"binary"}],"displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"URL","name":"fileUrl","type":"string","placeholder":"e.g. https://example.com/file.pdf","description":"URL of the file to upload","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["upload"],"resource":["file"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the file","displayOptions":{"show":{"inputType":["binary"],"operation":["upload"],"resource":["file"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Create File Search Store","value":"createStore","action":"Create a File Search store","description":"Create a new File Search store for RAG (Retrieval Augmented Generation)"},{"name":"Delete File Search Store","value":"deleteStore","action":"Delete a File Search store","description":"Delete a File Search store"},{"name":"List File Search Stores","value":"listStores","action":"List all File Search stores","description":"List all File Search stores owned by the user"},{"name":"Upload to File Search Store","value":"uploadToStore","action":"Upload a file to a File Search store","description":"Upload a file to a File Search store for RAG (Retrieval Augmented Generation)"}],"default":"createStore","displayOptions":{"show":{"resource":["fileSearch"]}}},{"displayName":"Display Name","name":"displayName","type":"string","placeholder":"e.g. My File Search Store","description":"A human-readable name for the File Search store","default":"","required":true,"displayOptions":{"show":{"operation":["createStore"],"resource":["fileSearch"]}}},{"displayName":"File Search Store Name","name":"fileSearchStoreName","type":"string","placeholder":"e.g. fileSearchStores/abc123","description":"The full name of the File Search store to delete (format: fileSearchStores/...)","default":"","required":true,"displayOptions":{"show":{"operation":["deleteStore"],"resource":["fileSearch"]}}},{"displayName":"Force Delete","name":"force","type":"boolean","description":"Whether to delete related Documents and objects. If false, deletion will fail if the store contains any Documents.","default":false,"displayOptions":{"show":{"operation":["deleteStore"],"resource":["fileSearch"]}}},{"displayName":"Page Size","name":"pageSize","type":"number","description":"Maximum number of File Search stores to return per page (max 20)","default":10,"typeOptions":{"minValue":1,"maxValue":20},"displayOptions":{"show":{"operation":["listStores"],"resource":["fileSearch"]}}},{"displayName":"Page Token","name":"pageToken","type":"string","description":"Token from a previous page to retrieve the next page of results","default":"","displayOptions":{"show":{"operation":["listStores"],"resource":["fileSearch"]}}},{"displayName":"File Search Store Name","name":"fileSearchStoreName","type":"string","placeholder":"e.g. fileSearchStores/abc123","description":"The full name of the File Search store to upload to (format: fileSearchStores/...)","default":"","required":true,"displayOptions":{"show":{"operation":["uploadToStore"],"resource":["fileSearch"]}}},{"displayName":"File Display Name","name":"displayName","type":"string","placeholder":"e.g. My Document","description":"A human-readable name for the file (will be visible in citations)","default":"","required":true,"displayOptions":{"show":{"operation":["uploadToStore"],"resource":["fileSearch"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"File URL","value":"url"},{"name":"Binary File","value":"binary"}],"displayOptions":{"show":{"operation":["uploadToStore"],"resource":["fileSearch"]}}},{"displayName":"URL","name":"fileUrl","type":"string","placeholder":"e.g. https://example.com/file.pdf","description":"URL of the file to upload","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["uploadToStore"],"resource":["fileSearch"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the file","displayOptions":{"show":{"inputType":["binary"],"operation":["uploadToStore"],"resource":["fileSearch"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Image","value":"analyze","action":"Analyze an image","description":"Take in images and answer questions about them"},{"name":"Generate an Image","value":"generate","action":"Generate an image","description":"Creates an image from a text prompt"},{"name":"Edit Image","value":"edit","action":"Edit an image","description":"Upload one or more images and apply edits based on a prompt"}],"default":"generate","displayOptions":{"show":{"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this image?","default":"What's in this image?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Image URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"URL(s)","name":"imageUrls","type":"string","placeholder":"e.g. https://example.com/image.png","description":"URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the image(s), separate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed image description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"imageEditModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. combine the first image with the second image","description":"Instruction describing how to edit the image","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Images","name":"images","type":"fixedCollection","placeholder":"Add Image","typeOptions":{"multipleValues":true,"multipleValueButtonText":"Add Image"},"default":{"values":[{"binaryPropertyName":"data"}]},"description":"Add one or more binary fields to include images with your prompt","options":[{"displayName":"Image","name":"values","values":[{"displayName":"Binary Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","description":"The name of the binary field containing the image data"}]}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"edited","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"imageGenerationModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. A cute cat eating a dinosaur","description":"A text description of the desired image(s)","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Number of Images","name":"sampleCount","default":1,"description":"Number of images to generate. Not supported by Gemini models, supported by Imagen models.","type":"number","typeOptions":{"minValue":1}},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Message a Model","value":"message","action":"Message a model","description":"Create a completion with Google Gemini model"}],"default":"message","displayOptions":{"show":{"resource":["text"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"content":""}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be send","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Model","value":"model","description":"Tell the model to adopt a specific tone or personality"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Output Content as JSON","name":"jsonOutput","type":"boolean","description":"Whether to attempt to return the response in JSON format","default":false,"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Built-in Tools","name":"builtInTools","placeholder":"Add Built-in Tool","type":"collection","default":{},"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}],"operation":["message"],"resource":["text"]}},"options":[{"displayName":"Google Search","name":"googleSearch","type":"boolean","default":true,"description":"Whether to allow the model to search the web using Google Search to get real-time information"},{"displayName":"Google Maps","name":"googleMaps","type":"collection","default":{"latitude":"","longitude":""},"options":[{"displayName":"Latitude","name":"latitude","type":"number","default":"","description":"The latitude coordinate for location-based queries","typeOptions":{"numberPrecision":6}},{"displayName":"Longitude","name":"longitude","type":"number","default":"","description":"The longitude coordinate for location-based queries","typeOptions":{"numberPrecision":6}}]},{"displayName":"URL Context","name":"urlContext","type":"boolean","default":true,"description":"Whether to allow the model to read and analyze content from specific URLs"},{"displayName":"File Search","name":"fileSearch","type":"collection","default":{"fileSearchStoreNames":"[]"},"options":[{"displayName":"File Search Store Names","name":"fileSearchStoreNames","description":"The file search store names to use for the file search. File search stores are managed via Google AI Studio.","type":"json","default":"[]","required":true},{"displayName":"Metadata Filter","name":"metadataFilter","type":"string","default":"","description":"Use metadata filter to search within a subset of documents. Example: author=\"Robert Graves\".","placeholder":"e.g. author=\"John Doe\""}]},{"displayName":"Code Execution","name":"codeExecution","type":"boolean","default":true,"description":"Whether to allow the model to execute code it generates to produce a response. Supported only by certain models."}]},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Include Merged Response","name":"includeMergedResponse","type":"boolean","default":false,"description":"Whether to include a single output string merging all text parts of the response","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}}},{"displayName":"System Message","name":"systemMessage","type":"string","default":"","placeholder":"e.g. You are a helpful assistant"},{"displayName":"Code Execution","name":"codeExecution","type":"boolean","default":false,"description":"Whether to allow the model to execute code it generates to produce a response. Supported only by certain models.","displayOptions":{"show":{"@version":[{"_cnd":{"eq":1}}]}}},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number","typeOptions":{"minValue":-2,"maxValue":2,"numberPrecision":1}},{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":16,"description":"The maximum number of tokens to generate in the completion","type":"number","typeOptions":{"minValue":1,"numberPrecision":0}},{"displayName":"Number of Completions","name":"candidateCount","default":1,"description":"How many completions to generate for each prompt","type":"number","typeOptions":{"minValue":1,"maxValue":8,"numberPrecision":0}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number","typeOptions":{"minValue":-2,"maxValue":2,"numberPrecision":1}},{"displayName":"Output Randomness (Temperature)","name":"temperature","default":1,"description":"Controls the randomness of the output. Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive","type":"number","typeOptions":{"minValue":0,"maxValue":2,"numberPrecision":1}},{"displayName":"Output Randomness (Top P)","name":"topP","default":1,"description":"The maximum cumulative probability of tokens to consider when sampling","type":"number","typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}},{"displayName":"Output Randomness (Top K)","name":"topK","default":1,"description":"The maximum number of tokens to consider when sampling","type":"number","typeOptions":{"minValue":1,"numberPrecision":0}},{"displayName":"Thinking Budget","name":"thinkingBudget","type":"number","description":"Controls reasoning tokens for thinking models. Set to 0 to disable automatic thinking. Set to -1 for dynamic thinking. Leave empty for auto mode.","typeOptions":{"minValue":-1,"numberPrecision":0}},{"displayName":"Max Tool Calls Iterations","name":"maxToolsIterations","type":"number","default":15,"description":"The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit","typeOptions":{"minValue":0,"numberPrecision":0}}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Video","value":"analyze","action":"Analyze video","description":"Take in videos and answer questions about them"},{"name":"Generate a Video","value":"generate","action":"Generate a video","description":"Creates a video from a text prompt"},{"name":"Download Video","value":"download","action":"Download a video","description":"Download a generated video from the Google Gemini API using a URL"}],"default":"generate","displayOptions":{"show":{"resource":["video"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this video?","default":"What's in this video?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Video URL(s)","value":"url"},{"name":"Binary File(s)","value":"binary"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"URL(s)","name":"videoUrls","type":"string","placeholder":"e.g. https://example.com/video.mp4","description":"URL(s) of the video(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["video"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the video(s), seperate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["video"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed video description","name":"maxOutputTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["video"]}}},{"displayName":"URL","name":"url","type":"string","placeholder":"e.g. https://generativelanguage.googleapis.com/v1beta/files/abcdefg:download","description":"The URL from Google Gemini API to download the video from","default":"","displayOptions":{"show":{"operation":["download"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["download"],"resource":["video"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"videoGenerationModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. models/gemini-2.5-flash"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. Panning wide shot of a calico kitten sleeping in the sunshine","description":"A text description of the desired video","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Return As","name":"returnAs","type":"options","options":[{"name":"Video","value":"video"},{"name":"URL","value":"url"}],"description":"Whether to return the video as a binary file or a URL that can be used to download the video later","default":"video","displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Number of Videos","name":"sampleCount","type":"number","default":1,"description":"How many videos to generate","typeOptions":{"minValue":1,"maxValue":4}},{"displayName":"Duration (Seconds)","name":"durationSeconds","type":"number","default":8,"description":"Length of the generated video in seconds. Supported only by certain models.","typeOptions":{"minValue":5,"maxValue":8}},{"displayName":"Aspect Ratio","name":"aspectRatio","type":"options","options":[{"name":"Widescreen (16:9)","value":"16:9","description":"Most common aspect ratio for televisions and monitors"},{"name":"Portrait (9:16)","value":"9:16","description":"Popular for short-form videos like YouTube Shorts"}],"default":"16:9"},{"displayName":"Person Generation","name":"personGeneration","type":"options","options":[{"name":"Don't Allow","value":"dont_allow","description":"Prevent generation of people in the video"},{"name":"Allow Adult","value":"allow_adult","description":"Allow generation of adult people in the video"},{"name":"Allow All","value":"allow_all","description":"Allow generation of all people in the video"}],"default":"dont_allow"},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/GoogleGemini/gemini.svg"},
4
4
  {"displayName":"Ollama","name":"ollama","group":["transform"],"version":1,"subtitle":"={{ $parameter[\"operation\"] + \": \" + $parameter[\"resource\"] }}","description":"Interact with Ollama AI models","defaults":{"name":"Ollama"},"usableAsTool":true,"codex":{"alias":["LangChain","image","vision","AI","local"],"categories":["AI"],"subcategories":{"AI":["Agents","Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.ollama/"}]}},"inputs":"={{\n\t\t(() => {\n\t\t\tconst resource = $parameter.resource;\n\t \tconst operation = $parameter.operation;\n\t\t\tif (resource === 'text' && operation === 'message') {\n\t\t\t\treturn [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];\n\t\t\t}\n\n\t\t\treturn ['main'];\n\t\t})()\n\t}}","outputs":["main"],"credentials":[{"name":"ollamaApi","required":true}],"properties":[{"displayName":"Resource","name":"resource","type":"options","noDataExpression":true,"options":[{"name":"Image","value":"image"},{"name":"Text","value":"text"}],"default":"text"},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Image","value":"analyze","action":"Analyze image","description":"Take in images and answer questions about them"}],"default":"analyze","displayOptions":{"show":{"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. llava, llama3.2-vision"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this image?","default":"What's in this image?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"binary","options":[{"name":"Binary File(s)","value":"binary"},{"name":"Image URL(s)","value":"url"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Data Field Name(s)","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary field(s) which contains the image(s), separate multiple field names with commas","displayOptions":{"show":{"inputType":["binary"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"URL(s)","name":"imageUrls","type":"string","placeholder":"e.g. https://example.com/image.png","description":"URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"System Message","name":"system","type":"string","default":"","placeholder":"e.g. You are a helpful assistant.","description":"System message to set the context for the conversation","typeOptions":{"rows":2}},{"displayName":"Temperature","name":"temperature","type":"number","default":0.8,"typeOptions":{"minValue":0,"maxValue":2,"numberPrecision":2},"description":"Controls randomness in responses. Lower values make output more focused."},{"displayName":"Output Randomness (Top P)","name":"top_p","default":0.7,"description":"The maximum cumulative probability of tokens to consider when sampling","type":"number","typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}},{"displayName":"Top K","name":"top_k","type":"number","default":40,"typeOptions":{"minValue":1},"description":"Controls diversity by limiting the number of top tokens to consider"},{"displayName":"Max Tokens","name":"num_predict","type":"number","default":1024,"typeOptions":{"minValue":1,"numberPrecision":0},"description":"Maximum number of tokens to generate in the completion"},{"displayName":"Frequency Penalty","name":"frequency_penalty","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":2},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Presence Penalty","name":"presence_penalty","type":"number","default":0,"typeOptions":{"numberPrecision":2},"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeat_penalty","type":"number","default":1.1,"typeOptions":{"minValue":0,"numberPrecision":2},"description":"Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient."},{"displayName":"Context Length","name":"num_ctx","type":"number","default":4096,"typeOptions":{"minValue":1,"numberPrecision":0},"description":"Sets the size of the context window used to generate the next token"},{"displayName":"Repeat Last N","name":"repeat_last_n","type":"number","default":64,"typeOptions":{"minValue":-1,"numberPrecision":0},"description":"Sets how far back for the model to look back to prevent repetition. (0 = disabled, -1 = num_ctx)."},{"displayName":"Min P","name":"min_p","type":"number","default":0,"typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":3},"description":"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token."},{"displayName":"Seed","name":"seed","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":0},"description":"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt."},{"displayName":"Stop Sequences","name":"stop","type":"string","default":"","description":"Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Separate multiple patterns with commas"},{"displayName":"Keep Alive","name":"keep_alive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"low_vram","type":"boolean","default":false,"description":"Whether to activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"main_gpu","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":0},"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"num_batch","type":"number","default":512,"typeOptions":{"minValue":1,"numberPrecision":0},"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Number of GPUs","name":"num_gpu","type":"number","default":-1,"typeOptions":{"minValue":-1,"numberPrecision":0},"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Number of CPU Threads","name":"num_thread","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":0},"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalize_newline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Use Memory Locking","name":"use_mlock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"use_mmap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance."},{"displayName":"Load Vocabulary Only","name":"vocab_only","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":""},{"name":"JSON","value":"json"}],"default":"","description":"Specifies the format of the API response"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Message a Model","value":"message","action":"Message a model","description":"Send a message to Ollama model"}],"default":"message","displayOptions":{"show":{"resource":["text"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. llava, llama3.2-vision"}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"content":"","role":"user"}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Content","name":"content","type":"string","description":"The content of the message to be sent","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"The role of this message in the conversation","options":[{"name":"User","value":"user","description":"Message from the user"},{"name":"Assistant","value":"assistant","description":"Response from the assistant (for conversation history)"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"System Message","name":"system","type":"string","default":"","placeholder":"e.g. You are a helpful assistant.","description":"System message to set the context for the conversation","typeOptions":{"rows":2}},{"displayName":"Temperature","name":"temperature","type":"number","default":0.8,"typeOptions":{"minValue":0,"maxValue":2,"numberPrecision":2},"description":"Controls randomness in responses. Lower values make output more focused."},{"displayName":"Output Randomness (Top P)","name":"top_p","default":0.7,"description":"The maximum cumulative probability of tokens to consider when sampling","type":"number","typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}},{"displayName":"Top K","name":"top_k","type":"number","default":40,"typeOptions":{"minValue":1},"description":"Controls diversity by limiting the number of top tokens to consider"},{"displayName":"Max Tokens","name":"num_predict","type":"number","default":1024,"typeOptions":{"minValue":1,"numberPrecision":0},"description":"Maximum number of tokens to generate in the completion"},{"displayName":"Frequency Penalty","name":"frequency_penalty","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":2},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Presence Penalty","name":"presence_penalty","type":"number","default":0,"typeOptions":{"numberPrecision":2},"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeat_penalty","type":"number","default":1.1,"typeOptions":{"minValue":0,"numberPrecision":2},"description":"Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient."},{"displayName":"Context Length","name":"num_ctx","type":"number","default":4096,"typeOptions":{"minValue":1,"numberPrecision":0},"description":"Sets the size of the context window used to generate the next token"},{"displayName":"Repeat Last N","name":"repeat_last_n","type":"number","default":64,"typeOptions":{"minValue":-1,"numberPrecision":0},"description":"Sets how far back for the model to look back to prevent repetition. (0 = disabled, -1 = num_ctx)."},{"displayName":"Min P","name":"min_p","type":"number","default":0,"typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":3},"description":"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token."},{"displayName":"Seed","name":"seed","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":0},"description":"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt."},{"displayName":"Stop Sequences","name":"stop","type":"string","default":"","description":"Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Separate multiple patterns with commas"},{"displayName":"Keep Alive","name":"keep_alive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"low_vram","type":"boolean","default":false,"description":"Whether to activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"main_gpu","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":0},"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"num_batch","type":"number","default":512,"typeOptions":{"minValue":1,"numberPrecision":0},"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Number of GPUs","name":"num_gpu","type":"number","default":-1,"typeOptions":{"minValue":-1,"numberPrecision":0},"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Number of CPU Threads","name":"num_thread","type":"number","default":0,"typeOptions":{"minValue":0,"numberPrecision":0},"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalize_newline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Use Memory Locking","name":"use_mlock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"use_mmap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance."},{"displayName":"Load Vocabulary Only","name":"vocab_only","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":""},{"name":"JSON","value":"json"}],"default":"","description":"Specifies the format of the API response"}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/Ollama/ollama.svg"},
5
5
  {"displayName":"OpenAI","name":"openAi","group":["transform"],"defaultVersion":2.1,"subtitle":"={{((resource, operation) => {\n if (operation === \"deleteAssistant\") {\n return \"Delete Assistant\";\n }\n if (operation === \"deleteFile\") {\n return \"Delete File\";\n }\n if (operation === \"classify\") {\n return \"Classify Text\";\n }\n if (operation === \"message\" && resource === \"text\") {\n return \"Message Model\";\n }\n const capitalize = (str) => {\n const chars = str.split(\"\");\n chars[0] = chars[0].toUpperCase();\n return chars.join(\"\");\n };\n if ([\"transcribe\", \"translate\"].includes(operation)) {\n resource = \"recording\";\n }\n if (operation === \"list\") {\n resource = resource + \"s\";\n }\n return `${capitalize(operation)} ${capitalize(resource)}`;\n})($parameter.resource, $parameter.operation)}}","description":"Message an assistant or GPT, analyze images, generate audio, etc.","codex":{"alias":["LangChain","ChatGPT","Sora","DallE","whisper","audio","transcribe","tts","assistant"],"categories":["AI"],"subcategories":{"AI":["Agents","Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.openai/"}]}},"version":[2,2.1],"defaults":{"name":"OpenAI"},"inputs":"={{((resource, operation, hideTools, memory) => {\n if (resource === \"assistant\" && operation === \"message\") {\n const inputs = [\n { type: \"main\" },\n { type: \"ai_tool\", displayName: \"Tools\" }\n ];\n if (memory !== \"threadId\") {\n inputs.push({ type: \"ai_memory\", displayName: \"Memory\", maxConnections: 1 });\n }\n return inputs;\n }\n if (resource === \"text\" && (operation === \"message\" || operation === \"response\")) {\n if (hideTools === \"hide\") {\n return [\"main\"];\n }\n return [{ type: \"main\" }, { type: \"ai_tool\", displayName: \"Tools\" }];\n }\n return [\"main\"];\n})($parameter.resource, $parameter.operation, $parameter.hideTools, $parameter.memory ?? undefined)}}","outputs":["main"],"credentials":[{"name":"openAiApi","required":true}],"properties":[{"displayName":"Resource","name":"resource","type":"options","noDataExpression":true,"options":[{"name":"Text","value":"text"},{"name":"Image","value":"image"},{"name":"Audio","value":"audio"},{"name":"File","value":"file"},{"name":"Conversation","value":"conversation"},{"name":"Video","value":"video"}],"default":"text"},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Generate Audio","value":"generate","action":"Generate audio","description":"Creates audio from a text prompt"},{"name":"Transcribe a Recording","value":"transcribe","action":"Transcribe a recording","description":"Transcribes audio into text"},{"name":"Translate a Recording","value":"translate","action":"Translate a recording","description":"Translates audio into text in English"}],"default":"generate","displayOptions":{"show":{"resource":["audio"]}}},{"displayName":"OpenAI API limits the size of the audio file to 25 MB","name":"fileSizeLimitNotice","type":"notice","default":" ","displayOptions":{"show":{"resource":["audio"],"operation":["translate","transcribe"]}}},{"displayName":"Model","name":"model","type":"options","default":"tts-1","options":[{"name":"TTS-1","value":"tts-1"},{"name":"TTS-1-HD","value":"tts-1-hd"}],"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Text Input","name":"input","type":"string","placeholder":"e.g. The quick brown fox jumped over the lazy dog","description":"The text to generate audio for. The maximum length is 4096 characters.","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Voice","name":"voice","type":"options","default":"alloy","description":"The voice to use when generating the audio","options":[{"name":"Alloy","value":"alloy"},{"name":"Echo","value":"echo"},{"name":"Fable","value":"fable"},{"name":"Nova","value":"nova"},{"name":"Onyx","value":"onyx"},{"name":"Shimmer","value":"shimmer"}],"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Response Format","name":"response_format","type":"options","default":"mp3","options":[{"name":"MP3","value":"mp3"},{"name":"OPUS","value":"opus"},{"name":"AAC","value":"aac"},{"name":"FLAC","value":"flac"}]},{"displayName":"Audio Speed","name":"speed","type":"number","default":1,"typeOptions":{"minValue":0.25,"maxValue":4,"numberPrecision":1}},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm","displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Language of the Audio File","name":"language","type":"string","description":"The language of the input audio. Supplying the input language in <a href=\"https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes\" target=\"_blank\">ISO-639-1</a> format will improve accuracy and latency.","default":""},{"displayName":"Output Randomness (Temperature)","name":"temperature","type":"number","default":0,"typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","hint":"The name of the input field containing the binary file data to be processed","placeholder":"e.g. data","description":"Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm","displayOptions":{"show":{"operation":["translate"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Output Randomness (Temperature)","name":"temperature","type":"number","default":0,"typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}}],"displayOptions":{"show":{"operation":["translate"],"resource":["audio"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Delete a File","value":"deleteFile","action":"Delete a file","description":"Delete a file from the server"},{"name":"List Files","value":"list","action":"List files","description":"Returns a list of files that belong to the user's organization"},{"name":"Upload a File","value":"upload","action":"Upload a file","description":"Upload a file that can be used across various endpoints"}],"default":"upload","displayOptions":{"show":{"resource":["file"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","hint":"The name of the input field containing the binary file data to be processed","placeholder":"e.g. data","description":"Name of the binary property which contains the file. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants.","displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Purpose","name":"purpose","type":"options","default":"user_data","description":"The intended purpose of the uploaded file, the 'Fine-tuning' only supports .jsonl files","options":[{"name":"Assistants","value":"assistants"},{"name":"Fine-Tune","value":"fine-tune"},{"name":"Vision","value":"vision"},{"name":"User Data","value":"user_data"}]}],"displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"File","name":"fileId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"fileSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","validation":[{"type":"regex","properties":{"regex":"file-[a-zA-Z0-9]","errorMessage":"Not a valid File ID"}}],"placeholder":"e.g. file-1234567890"}],"displayOptions":{"show":{"operation":["deleteFile"],"resource":["file"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Purpose","name":"purpose","type":"options","default":"any","description":"Only return files with the given purpose","options":[{"name":"Any [Default]","value":"any"},{"name":"Assistants","value":"assistants"},{"name":"Fine-Tune","value":"fine-tune"},{"name":"Vision","value":"vision"},{"name":"User Data","value":"user_data"}]}],"displayOptions":{"show":{"operation":["list"],"resource":["file"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Image","value":"analyze","action":"Analyze image","description":"Take in images and answer questions about them"},{"name":"Generate an Image","value":"generate","action":"Generate an image","description":"Creates an image from a text prompt"},{"name":"Edit Image","value":"edit","action":"Edit image","description":"Edit an image"}],"default":"generate","displayOptions":{"show":{"resource":["image"]}}},{"displayName":"Model","name":"model","type":"options","default":"dall-e-3","description":"The model to use for image generation","options":[{"name":"DALL·E 2","value":"dall-e-2"},{"name":"DALL·E 3","value":"dall-e-3"},{"name":"GPT Image 1","value":"gpt-image-1"}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. A cute cat eating a dinosaur","description":"A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Number of Images","name":"n","default":1,"description":"Number of images to generate","type":"number","typeOptions":{"minValue":1,"maxValue":10},"displayOptions":{"show":{"/model":["dall-e-2"]}}},{"displayName":"Quality","name":"dalleQuality","type":"options","description":"The quality of the image that will be generated, HD creates images with finer details and greater consistency across the image","options":[{"name":"HD","value":"hd"},{"name":"Standard","value":"standard"}],"displayOptions":{"show":{"/model":["dall-e-3"]}},"default":"standard"},{"displayName":"Quality","name":"quality","type":"options","description":"The quality of the image that will be generated, High creates images with finer details and greater consistency across the image","options":[{"name":"High","value":"high"},{"name":"Medium","value":"medium"},{"name":"Low","value":"low"}],"displayOptions":{"show":{"/model":["gpt-image-1"]}},"default":"medium"},{"displayName":"Resolution","name":"size","type":"options","options":[{"name":"256x256","value":"256x256"},{"name":"512x512","value":"512x512"},{"name":"1024x1024","value":"1024x1024"}],"displayOptions":{"show":{"/model":["dall-e-2"]}},"default":"1024x1024"},{"displayName":"Resolution","name":"size","type":"options","options":[{"name":"1024x1024","value":"1024x1024"},{"name":"1792x1024","value":"1792x1024"},{"name":"1024x1792","value":"1024x1792"}],"displayOptions":{"show":{"/model":["dall-e-3"]}},"default":"1024x1024"},{"displayName":"Resolution","name":"size","type":"options","options":[{"name":"1024x1024","value":"1024x1024"},{"name":"1024x1536","value":"1024x1536"},{"name":"1536x1024","value":"1536x1024"}],"displayOptions":{"show":{"/model":["gpt-image-1"]}},"default":"1024x1024"},{"displayName":"Style","name":"style","type":"options","options":[{"name":"Natural","value":"natural","description":"Produce more natural looking images"},{"name":"Vivid","value":"vivid","description":"Lean towards generating hyper-real and dramatic images"}],"displayOptions":{"show":{"/model":["dall-e-3"]}},"default":"vivid"},{"displayName":"Respond with Image URL(s)","name":"returnImageUrls","type":"boolean","default":false,"description":"Whether to return image URL(s) instead of binary file(s)","displayOptions":{"hide":{"/model":["gpt-image-1"]}}},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in","displayOptions":{"show":{"returnImageUrls":[false]}}}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"imageModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this image?","default":"What's in this image?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Image URL(s)","value":"url"},{"name":"Binary File(s)","value":"base64"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"URL(s)","name":"imageUrls","type":"string","placeholder":"e.g. https://example.com/image.jpeg","description":"URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the image(s)","displayOptions":{"show":{"inputType":["base64"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Detail","name":"detail","type":"options","default":"auto","options":[{"name":"Auto","value":"auto","description":"Model will look at the image input size and decide if it should use the low or high setting"},{"name":"Low","value":"low","description":"Return faster responses and consume fewer tokens"},{"name":"High","value":"high","description":"Return more detailed responses, consumes more tokens"}]},{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed image description","name":"maxTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Model","name":"model","type":"options","default":"gpt-image-1","description":"The model to use for image generation","options":[{"name":"DALL·E 2","value":"dall-e-2"},{"name":"GPT Image 1","value":"gpt-image-1"}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"","description":"A text description of the desired image(s). Maximum 1000 characters for dall-e-2, 32000 characters for gpt-image-1.","placeholder":"A beautiful sunset over mountains","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Images","name":"images","type":"fixedCollection","placeholder":"Add Image","typeOptions":{"multipleValues":true,"multipleValueButtonText":"Add Image"},"default":{"values":[{"binaryPropertyName":"data"}]},"description":"Add one or more binary fields to include images with your prompt. Each image should be a png, webp, or jpg file less than 50MB. You can provide up to 16 images.","displayOptions":{"show":{"/model":["gpt-image-1"],"operation":["edit"],"resource":["image"]}},"options":[{"displayName":"Image","name":"values","values":[{"displayName":"Binary Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","description":"The name of the binary field containing the image data"}]}]},{"displayName":"Binary Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the image. It should be a square png file less than 4MB.","displayOptions":{"show":{"/model":["dall-e-2"],"operation":["edit"],"resource":["image"]}}},{"displayName":"Number of Images","name":"n","type":"number","default":1,"description":"The number of images to generate. Must be between 1 and 10.","typeOptions":{"minValue":1,"maxValue":10},"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Size","name":"size","type":"options","default":"1024x1024","description":"The size of the generated images","options":[{"name":"256x256","value":"256x256"},{"name":"512x512","value":"512x512"},{"name":"1024x1024","value":"1024x1024"},{"name":"1024x1536 (Portrait)","value":"1024x1536"},{"name":"1536x1024 (Landscape)","value":"1536x1024"},{"name":"Auto","value":"auto"}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Quality","name":"quality","type":"options","default":"auto","description":"The quality of the image that will be generated","options":[{"name":"Auto","value":"auto"},{"name":"High","value":"high"},{"name":"Medium","value":"medium"},{"name":"Low","value":"low"},{"name":"Standard","value":"standard"}],"displayOptions":{"show":{"/model":["gpt-image-1"],"operation":["edit"],"resource":["image"]}}},{"displayName":"Response Format","name":"responseFormat","type":"options","default":"url","description":"The format in which the generated images are returned. URLs are only valid for 60 minutes after generation.","options":[{"name":"URL","value":"url"},{"name":"Base64 JSON","value":"b64_json"}],"displayOptions":{"show":{"/model":["dall-e-2"],"operation":["edit"],"resource":["image"]}}},{"displayName":"Output Format","name":"outputFormat","type":"options","default":"png","description":"The format in which the generated images are returned. Only supported for gpt-image-1.","options":[{"name":"PNG","value":"png"},{"name":"JPEG","value":"jpeg"},{"name":"WebP","value":"webp"}],"displayOptions":{"show":{"/model":["gpt-image-1"],"operation":["edit"],"resource":["image"]}}},{"displayName":"Output Compression","name":"outputCompression","type":"number","default":100,"description":"The compression level (0-100%) for the generated images. Only supported for gpt-image-1 with webp or jpeg output formats.","typeOptions":{"minValue":0,"maxValue":100},"displayOptions":{"show":{"/model":["gpt-image-1"],"outputFormat":["webp","jpeg"],"operation":["edit"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"User","name":"user","type":"string","default":"","description":"A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse","placeholder":"user-12345"},{"displayName":"Background","name":"background","type":"options","default":"auto","description":"Allows to set transparency for the background of the generated image(s). Only supported for gpt-image-1.","options":[{"name":"Auto","value":"auto"},{"name":"Transparent","value":"transparent"},{"name":"Opaque","value":"opaque"}],"displayOptions":{"show":{"/model":["gpt-image-1"]}}},{"displayName":"Input Fidelity","name":"inputFidelity","type":"options","default":"low","description":"Control how much effort the model will exert to match the style and features of input images. Only supported for gpt-image-1.","options":[{"name":"Low","value":"low"},{"name":"High","value":"high"}],"displayOptions":{"show":{"/model":["gpt-image-1"]}}},{"displayName":"Image Mask","name":"imageMask","type":"string","default":"data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the image. An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as image."}],"displayOptions":{"show":{"operation":["edit"],"resource":["image"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Message a Model","value":"response","action":"Message a model","description":"Generate a model response with GPT 3, 4, 5, etc. using Responses API"},{"name":"Classify Text for Violations","value":"classify","action":"Classify text for violations","description":"Check whether content complies with usage policies"}],"default":"response","displayOptions":{"show":{"resource":["text"]}}},{"displayName":"Text Input","name":"input","type":"string","placeholder":"e.g. Sample text goes here","description":"The input text to classify if it is violates the moderation policy","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["classify"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":false,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["classify"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Use Stable Model","name":"useStableModel","type":"boolean","default":false,"description":"Whether to use the stable version of the model instead of the latest version, accuracy may be slightly lower"}],"displayOptions":{"show":{"@version":[{"_cnd":{"lt":2.1}}],"operation":["classify"],"resource":["text"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}],"displayOptions":{"show":{"operation":["response"],"resource":["text"]}}},{"displayName":"Messages","name":"responses","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"type":"text"}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Type","name":"type","type":"options","default":"text","options":[{"name":"Text","value":"text"},{"name":"Image","value":"image"},{"name":"File","value":"file"}]},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Assistant","value":"assistant","description":"Tell the model to adopt a specific tone or personality"},{"name":"System","value":"system","description":"Usually used to set the model's behavior or context for the next user message"}],"default":"user"},{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be send","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"type":["text"]}}},{"displayName":"Image Type","name":"imageType","type":"options","default":"url","options":[{"name":"Image URL","value":"url"},{"name":"File ID","value":"fileId"},{"name":"File Data","value":"base64"}],"displayOptions":{"show":{"type":["image"]}}},{"displayName":"Image URL","name":"imageUrl","type":"string","default":"","placeholder":"e.g. https://example.com/image.jpeg","description":"URL of the image to be sent","displayOptions":{"show":{"type":["image"],"imageType":["url"]}}},{"displayName":"Image Data","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the image(s)","displayOptions":{"show":{"type":["image"],"imageType":["base64"]}}},{"displayName":"File ID","name":"fileId","type":"string","default":"","description":"ID of the file to be sent","displayOptions":{"show":{"type":["image"],"imageType":["fileId"]}}},{"displayName":"Detail","name":"imageDetail","type":"options","default":"auto","description":"The detail level of the image to be sent to the model","options":[{"name":"Auto","value":"auto"},{"name":"Low","value":"low"},{"name":"High","value":"high"}],"displayOptions":{"show":{"type":["image"]}}},{"displayName":"File Type","name":"fileType","type":"options","default":"url","options":[{"name":"File URL","value":"url"},{"name":"File ID","value":"fileId"},{"name":"File Data","value":"base64"}],"displayOptions":{"show":{"type":["file"]}}},{"displayName":"File URL","name":"fileUrl","type":"string","default":"","placeholder":"e.g. https://example.com/file.pdf","description":"URL of the file to be sent. Accepts base64 encoded files as well.","displayOptions":{"show":{"type":["file"],"fileType":["url"]}}},{"displayName":"File ID","name":"fileId","type":"string","default":"","description":"ID of the file to be sent","displayOptions":{"show":{"type":["file"],"fileType":["fileId"]}}},{"displayName":"File Data","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the file","displayOptions":{"show":{"type":["file"],"fileType":["base64"]}}},{"displayName":"File Name","name":"fileName","type":"string","default":"","required":true,"displayOptions":{"show":{"type":["file"],"fileType":["base64"]}}}]}],"displayOptions":{"show":{"operation":["response"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["response"],"resource":["text"]}}},{"displayName":"Hide Tools","name":"hideTools","type":"hidden","default":"hide","displayOptions":{"show":{"modelId":["gpt-3.5-turbo-16k-0613","dall-e-3","text-embedding-3-large","dall-e-2","whisper-1","tts-1-hd-1106","tts-1-hd","gpt-4-0314","text-embedding-3-small","gpt-4-32k-0314","gpt-3.5-turbo-0301","gpt-4-vision-preview","gpt-3.5-turbo-16k","gpt-3.5-turbo-instruct-0914","tts-1","davinci-002","gpt-3.5-turbo-instruct","babbage-002","tts-1-1106","text-embedding-ada-002"],"operation":["response"],"resource":["text"]}}},{"displayName":"Connect your own custom n8n tools to this node on the canvas","name":"noticeTools","type":"notice","default":"","displayOptions":{"hide":{"hideTools":["hide"]},"show":{"operation":["response"],"resource":["text"]}}},{"displayName":"Built-in Tools","name":"builtInTools","placeholder":"Add Built-in Tool","type":"collection","default":{},"options":[{"displayName":"Web Search","name":"webSearch","type":"collection","default":{"searchContextSize":"medium"},"options":[{"displayName":"Search Context Size","description":"High level guidance for the amount of context window space to use for the search","name":"searchContextSize","type":"options","default":"medium","options":[{"name":"Low","value":"low"},{"name":"Medium","value":"medium"},{"name":"High","value":"high"}]},{"displayName":"Web Search Allowed Domains","name":"allowedDomains","type":"string","default":"","description":"Comma-separated list of domains to search. Only domains in this list will be searched.","placeholder":"e.g. google.com, wikipedia.org"},{"displayName":"Country","name":"country","type":"string","default":"","placeholder":"e.g. US, GB"},{"displayName":"City","name":"city","type":"string","default":"","placeholder":"e.g. New York, London"},{"displayName":"Region","name":"region","type":"string","default":"","placeholder":"e.g. New York, London"}]},{"displayName":"File Search","name":"fileSearch","type":"collection","default":{"vectorStoreIds":"[]"},"options":[{"displayName":"Vector Store IDs","name":"vectorStoreIds","description":"The vector store IDs to use for the file search. Vector stores are managed via OpenAI Dashboard.","type":"json","default":"[]","required":true},{"displayName":"Filters","name":"filters","type":"json","default":"{}"},{"displayName":"Max Results","name":"maxResults","type":"number","default":1,"typeOptions":{"minValue":1,"maxValue":50}}]},{"displayName":"Code Interpreter","name":"codeInterpreter","type":"boolean","default":true,"description":"Whether to allow the model to execute code in a sandboxed environment"}],"displayOptions":{"show":{"operation":["response"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Conversation ID","name":"conversationId","default":"","description":"The conversation that this response belongs to. Input items and output items from this response are automatically added to this conversation after this response completes.","type":"string"},{"displayName":"Include Additional Data","name":"include","default":[],"type":"multiOptions","description":"Specify additional output data to include in the model response","options":[{"name":"Code Interpreter Call Outputs","value":"code_interpreter_call.outputs"},{"name":"Computer Call Output Image URL","value":"computer_call_output.output.image_url"},{"name":"File Search Call Results","value":"file_search_call.results"},{"name":"Message Input Image URL","value":"message.input_image.image_url"},{"name":"Message Output Text Logprobs","value":"message.output_text.logprobs"},{"name":"Reasoning Encrypted Content","value":"reasoning.encrypted_content"},{"name":"Web Search Tool Call Sources","value":"web_search_call.action.sources"}]},{"displayName":"Instructions","name":"instructions","type":"string","default":"","description":"Instructions for the model to follow","typeOptions":{"rows":2}},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":16,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Max Tool Calls Iterations","name":"maxToolsIterations","type":"number","default":15,"description":"The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit."},{"displayName":"Max Built-in Tool Calls","name":"maxToolCalls","type":"number","default":15,"description":"The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored."},{"displayName":"Metadata","name":"metadata","type":"json","description":"Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.","default":"{}"},{"displayName":"Parallel Tool Calls","name":"parallelToolCalls","type":"boolean","default":false,"description":"Whether to allow parallel tool calls. If true, the model can call multiple tools at once."},{"displayName":"Previous Response ID","name":"previousResponseId","type":"string","default":"","description":"The ID of the previous response to continue from. Cannot be used in conjunction with Conversation ID."},{"displayName":"Prompt","name":"promptConfig","type":"fixedCollection","description":"Configure the reusable prompt template configured via OpenAI Dashboard. <a href=\"https://platform.openai.com/docs/guides/prompt-engineering#reusable-prompts\">Learn more</a>.","default":{"promptOptions":[{"promptId":""}]},"options":[{"displayName":"Prompt","name":"promptOptions","values":[{"displayName":"Prompt ID","name":"promptId","type":"string","default":"","description":"The unique identifier of the prompt template to use"},{"displayName":"Version","name":"version","type":"string","default":"","description":"Optional version of the prompt template"},{"displayName":"Variables","name":"variables","type":"json","default":"{}","description":"Variables to be substituted into the prompt template"}]}]},{"displayName":"Prompt Cache Key","name":"promptCacheKey","type":"string","default":"","description":"Used by OpenAI to cache responses for similar requests to optimize your cache hit rates"},{"displayName":"Reasoning","name":"reasoning","type":"fixedCollection","default":{"reasoningOptions":[{"effort":"medium","summary":"none"}]},"options":[{"displayName":"Reasoning","name":"reasoningOptions","values":[{"displayName":"Effort","name":"effort","type":"options","default":"medium","options":[{"name":"Low","value":"low"},{"name":"Medium","value":"medium"},{"name":"High","value":"high"}]},{"displayName":"Summary","name":"summary","type":"options","default":"auto","description":"A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process.","options":[{"name":"None","value":"none"},{"name":"Auto","value":"auto"},{"name":"Concise","value":"concise"},{"name":"Detailed","value":"detailed"}]}]}]},{"displayName":"Safety Identifier","name":"safetyIdentifier","type":"string","default":"","description":"A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user."},{"displayName":"Service Tier","name":"serviceTier","type":"options","default":"auto","description":"The service tier to use for the request","options":[{"name":"Auto","value":"auto"},{"name":"Flex","value":"flex"},{"name":"Default","value":"default"},{"name":"Priority","value":"priority"}]},{"displayName":"Store","name":"store","type":"boolean","default":true,"description":"Whether to store the generated model response for later retrieval via API"},{"displayName":"Output Format","name":"textFormat","type":"fixedCollection","default":{"textOptions":[{"type":"text"}]},"options":[{"displayName":"Text","name":"textOptions","values":[{"displayName":"Type","name":"type","type":"options","default":"","options":[{"name":"Text","value":"text"},{"name":"JSON Schema (recommended)","value":"json_schema"},{"name":"JSON Object","value":"json_object"}]},{"displayName":"Verbosity","name":"verbosity","type":"options","default":"medium","options":[{"name":"Low","value":"low"},{"name":"Medium","value":"medium"},{"name":"High","value":"high"}]},{"displayName":"Name","name":"name","type":"string","default":"my_schema","description":"The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.","displayOptions":{"show":{"type":["json_schema"]}}},{"displayName":"All properties in the schema must be set to \"required\", when using \"strict\" mode.","name":"requiredNotice","type":"notice","default":"","displayOptions":{"show":{"strict":[true]}}},{"displayName":"Schema","name":"schema","type":"json","default":"{\n \"type\": \"object\",\n \"properties\": {\n \"message\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": false,\n \"required\": [\"message\"]\n}","description":"The schema of the response format","displayOptions":{"show":{"type":["json_schema"]}}},{"displayName":"Description","name":"description","type":"string","default":"","description":"The description of the response format","displayOptions":{"show":{"type":["json_schema"]}}},{"displayName":"Strict","name":"strict","type":"boolean","default":false,"description":"Whether to require that the AI will always generate responses that match the provided JSON Schema","displayOptions":{"show":{"type":["json_schema"]}}}]}]},{"displayName":"Top Logprobs","name":"topLogprobs","type":"number","default":0,"description":"An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability","typeOptions":{"minValue":0,"maxValue":20}},{"displayName":"Output Randomness (Temperature)","name":"temperature","type":"number","default":1,"description":"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both","typeOptions":{"minValue":0,"maxValue":2,"numberPrecision":1}},{"displayName":"Output Randomness (Top P)","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Truncation","name":"truncation","type":"boolean","default":false,"description":"Whether to truncate the input to the model's context window size. When disabled will throw a 400 error instead."},{"displayName":"Background Mode","name":"backgroundMode","type":"fixedCollection","default":{"values":[{"backgroundMode":true}]},"options":[{"displayName":"Bakground","name":"values","values":[{"displayName":"Background Mode","name":"enabled","type":"boolean","default":false,"description":"Whether to run the model in background mode. If true, the model will run in background mode."},{"displayName":"Timeout","name":"timeout","type":"number","default":300,"description":"The timeout for the background mode in seconds. If 0, the timeout is infinite.","typeOptions":{"minValue":0,"maxValue":3600}}]}]}],"displayOptions":{"show":{"operation":["response"],"resource":["text"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Create","value":"create","action":"Create a conversation","description":"Create a conversation"},{"name":"Get","value":"get","action":"Get a conversation","description":"Get a conversation"},{"name":"Remove","value":"remove","action":"Remove a conversation","description":"Remove a conversation"},{"name":"Update","value":"update","action":"Update a conversation","description":"Update a conversation"}],"default":"create","displayOptions":{"show":{"resource":["conversation"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"type":"text"}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Assistant","value":"assistant","description":"Tell the model to adopt a specific tone or personality"},{"name":"System","value":"system","description":"Usually used to set the model's behavior or context for the next user message"}],"default":"user"},{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be send","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{}}]}],"displayOptions":{"show":{"operation":["create"],"resource":["conversation"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Metadata","name":"metadata","type":"json","description":"Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.","default":"{}"}],"displayOptions":{"show":{"operation":["create"],"resource":["conversation"]}}},{"displayName":"Conversation ID","name":"conversationId","type":"string","default":"","placeholder":"conv_1234567890","description":"The ID of the conversation to delete","required":true,"displayOptions":{"show":{"operation":["remove"],"resource":["conversation"]}}},{"displayName":"Conversation ID","name":"conversationId","type":"string","default":"","placeholder":"conv_1234567890","description":"The ID of the conversation to update","required":true,"displayOptions":{"show":{"operation":["update"],"resource":["conversation"]}}},{"displayName":"Metadata","name":"metadata","type":"json","description":"Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.","default":"{}","required":true,"displayOptions":{"show":{"operation":["update"],"resource":["conversation"]}}},{"displayName":"Conversation ID","name":"conversationId","type":"string","default":"","placeholder":"conv_1234567890","description":"The ID of the conversation to retrieve","required":true,"displayOptions":{"show":{"operation":["get"],"resource":["conversation"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Generate","value":"generate","action":"Generate a video","description":"Creates a video from a text prompt"}],"default":"generate","displayOptions":{"show":{"resource":["video"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"videoModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Prompt","name":"prompt","type":"string","default":"A video of a cat playing with a ball","description":"The prompt to generate a video from","required":true,"typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Seconds","name":"seconds","type":"number","default":4,"description":"Clip duration in seconds","required":true,"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Size","name":"size","type":"options","default":"1280x720","description":"Output resolution formatted as width x height. 1024x1792 and 1792x1024 are only supported by Sora 2 Pro.","options":[{"name":"720x1280","value":"720x1280"},{"name":"1280x720","value":"1280x720"},{"name":"1024x1792","value":"1024x1792"},{"name":"1792x1024","value":"1792x1024"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Reference","description":"Optional image reference that guides generation","name":"binaryPropertyNameReference","type":"string","default":"data","placeholder":"e.g. data"},{"displayName":"Wait Timeout","name":"waitTime","type":"number","default":300,"description":"Time to wait for the video to be generated in seconds","typeOptions":{"minValue":5,"maxValue":7200}},{"displayName":"Output Field Name","name":"fileName","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["video"]}}}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/OpenAi/openAi.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/OpenAi/openAi.dark.svg"}},
6
6
  {"displayName":"OpenAI","name":"openAi","group":["transform"],"defaultVersion":2.1,"subtitle":"={{((resource, operation) => {\n if (operation === \"deleteAssistant\") {\n return \"Delete Assistant\";\n }\n if (operation === \"deleteFile\") {\n return \"Delete File\";\n }\n if (operation === \"classify\") {\n return \"Classify Text\";\n }\n if (operation === \"message\" && resource === \"text\") {\n return \"Message Model\";\n }\n const capitalize = (str) => {\n const chars = str.split(\"\");\n chars[0] = chars[0].toUpperCase();\n return chars.join(\"\");\n };\n if ([\"transcribe\", \"translate\"].includes(operation)) {\n resource = \"recording\";\n }\n if (operation === \"list\") {\n resource = resource + \"s\";\n }\n return `${capitalize(operation)} ${capitalize(resource)}`;\n})($parameter.resource, $parameter.operation)}}","description":"Message an assistant or GPT, analyze images, generate audio, etc.","codex":{"alias":["LangChain","ChatGPT","Sora","DallE","whisper","audio","transcribe","tts","assistant"],"categories":["AI"],"subcategories":{"AI":["Agents","Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.openai/"}]}},"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8],"defaults":{"name":"OpenAI"},"inputs":"={{((resource, operation, hideTools, memory) => {\n if (resource === \"assistant\" && operation === \"message\") {\n const inputs = [\n { type: \"main\" },\n { type: \"ai_tool\", displayName: \"Tools\" }\n ];\n if (memory !== \"threadId\") {\n inputs.push({ type: \"ai_memory\", displayName: \"Memory\", maxConnections: 1 });\n }\n return inputs;\n }\n if (resource === \"text\" && (operation === \"message\" || operation === \"response\")) {\n if (hideTools === \"hide\") {\n return [\"main\"];\n }\n return [{ type: \"main\" }, { type: \"ai_tool\", displayName: \"Tools\" }];\n }\n return [\"main\"];\n})($parameter.resource, $parameter.operation, $parameter.hideTools, $parameter.memory ?? undefined)}}","outputs":["main"],"credentials":[{"name":"openAiApi","required":true}],"properties":[{"displayName":"Resource","name":"resource","type":"options","noDataExpression":true,"options":[{"name":"Assistant","value":"assistant"},{"name":"Text","value":"text"},{"name":"Image","value":"image"},{"name":"Audio","value":"audio"},{"name":"File","value":"file"}],"default":"text"},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Create an Assistant","value":"create","action":"Create an assistant","description":"Create a new assistant"},{"name":"Delete an Assistant","value":"deleteAssistant","action":"Delete an assistant","description":"Delete an assistant from the account"},{"name":"List Assistants","value":"list","action":"List assistants","description":"List assistants in the organization"},{"name":"Message an Assistant","value":"message","action":"Message an assistant","description":"Send messages to an assistant"},{"name":"Update an Assistant","value":"update","action":"Update an assistant","description":"Update an existing assistant"}],"default":"message","displayOptions":{"show":{"resource":["assistant"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}],"displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Name","name":"name","type":"string","default":"","description":"The name of the assistant. The maximum length is 256 characters.","placeholder":"e.g. My Assistant","required":true,"displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Description","name":"description","type":"string","default":"","description":"The description of the assistant. The maximum length is 512 characters.","placeholder":"e.g. My personal assistant","displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Instructions","name":"instructions","type":"string","description":"The system instructions that the assistant uses. The maximum length is 32768 characters.","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Code Interpreter","name":"codeInterpreter","type":"boolean","default":false,"description":"Whether to enable the code interpreter that allows the assistants to write and run Python code in a sandboxed execution environment, find more <a href=\"https://platform.openai.com/docs/assistants/tools/code-interpreter\" target=\"_blank\">here</a>","displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Knowledge Retrieval","name":"knowledgeRetrieval","type":"boolean","default":false,"description":"Whether to augments the assistant with knowledge from outside its model, such as proprietary product information or documents, find more <a href=\"https://platform.openai.com/docs/assistants/tools/knowledge-retrieval\" target=\"_blank\">here</a>","displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Files","name":"file_ids","type":"multiOptions","description":"The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant. You can use expression to pass file IDs as an array or comma-separated string.","typeOptions":{"loadOptionsMethod":"getFiles"},"default":[],"hint":"Add more files by using the 'Upload a File' operation","displayOptions":{"show":{"codeInterpreter":[true],"operation":["create"],"resource":["assistant"]},"hide":{"knowledgeRetrieval":[true]}}},{"displayName":"Files","name":"file_ids","type":"multiOptions","description":"The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant","typeOptions":{"loadOptionsMethod":"getFiles"},"default":[],"hint":"Add more files by using the 'Upload a File' operation","displayOptions":{"show":{"knowledgeRetrieval":[true],"operation":["create"],"resource":["assistant"]},"hide":{"codeInterpreter":[true]}}},{"displayName":"Files","name":"file_ids","type":"multiOptions","description":"The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant","typeOptions":{"loadOptionsMethod":"getFiles"},"default":[],"hint":"Add more files by using the 'Upload a File' operation","displayOptions":{"show":{"knowledgeRetrieval":[true],"codeInterpreter":[true],"operation":["create"],"resource":["assistant"]}}},{"displayName":"Add custom n8n tools when you <i>message</i> your assistant (rather than when creating it)","name":"noticeTools","type":"notice","default":"","displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Output Randomness (Temperature)","name":"temperature","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Output Randomness (Top P)","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Fail if Assistant Already Exists","name":"failIfExists","type":"boolean","default":false,"description":"Whether to fail an operation if the assistant with the same name already exists"}],"displayOptions":{"show":{"operation":["create"],"resource":["assistant"]}}},{"displayName":"Assistant","name":"assistantId","type":"resourceLocator","description":"Assistant to respond to the message. You can add, modify or remove assistants in the <a href=\"https://platform.openai.com/playground?mode=assistant\" target=\"_blank\">playground</a>.","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"assistantSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. asst_abc123"}],"displayOptions":{"show":{"operation":["deleteAssistant"],"resource":["assistant"]}}},{"displayName":"Assistant","name":"assistantId","type":"resourceLocator","description":"Assistant to respond to the message. You can add, modify or remove assistants in the <a href=\"https://platform.openai.com/playground?mode=assistant\" target=\"_blank\">playground</a>.","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"assistantSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. asst_abc123"}],"displayOptions":{"show":{"operation":["message"],"resource":["assistant"]}}},{"displayName":"Source for Prompt (User Message)","name":"prompt","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"show":{"operation":["message"],"resource":["assistant"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"prompt":["define"],"operation":["message"],"resource":["assistant"]}}},{"displayName":"Memory","name":"memory","type":"options","options":[{"name":"Use memory connector","value":"connector","description":"Connect one of the supported memory nodes"},{"name":"Use thread ID","value":"threadId","description":"Specify the ID of the thread to continue"}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.6}}],"operation":["message"],"resource":["assistant"]}},"default":"connector"},{"displayName":"Thread ID","name":"threadId","type":"string","default":"","placeholder":"","description":"The ID of the thread to continue, a new thread will be created if not specified","hint":"If the thread ID is empty or undefined a new thread will be created and included in the response","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.6}}],"memory":["threadId"],"operation":["message"],"resource":["assistant"]}}},{"displayName":"Connect your own custom n8n tools to this node on the canvas","name":"noticeTools","type":"notice","default":"","displayOptions":{"show":{"operation":["message"],"resource":["assistant"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string","displayOptions":{"hide":{"@version":[{"_cnd":{"gte":1.8}}]}}},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Timeout","name":"timeout","default":10000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Preserve Original Tools","name":"preserveOriginalTools","type":"boolean","default":true,"description":"Whether to preserve the original tools of the assistant after the execution of this node, otherwise the tools will be replaced with the connected tools, if any, default is true","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.3}}]}}}],"displayOptions":{"show":{"operation":["message"],"resource":["assistant"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["list"],"resource":["assistant"]}}},{"displayName":"Assistant","name":"assistantId","type":"resourceLocator","description":"Assistant to respond to the message. You can add, modify or remove assistants in the <a href=\"https://platform.openai.com/playground?mode=assistant\" target=\"_blank\">playground</a>.","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"assistantSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. asst_abc123"}],"displayOptions":{"show":{"operation":["update"],"resource":["assistant"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Code Interpreter","name":"codeInterpreter","type":"boolean","default":false,"description":"Whether to enable the code interpreter that allows the assistants to write and run Python code in a sandboxed execution environment, find more <a href=\"https://platform.openai.com/docs/assistants/tools/code-interpreter\" target=\"_blank\">here</a>"},{"displayName":"Description","name":"description","type":"string","default":"","description":"The description of the assistant. The maximum length is 512 characters.","placeholder":"e.g. My personal assistant"},{"displayName":"Files","name":"file_ids","type":"multiOptions","description":"The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant. You can use expression to pass file IDs as an array or comma-separated string.","typeOptions":{"loadOptionsMethod":"getFiles"},"default":[],"hint":"Add more files by using the 'Upload a File' operation, any existing files not selected here will be removed."},{"displayName":"Instructions","name":"instructions","type":"string","description":"The system instructions that the assistant uses. The maximum length is 32768 characters.","default":"","typeOptions":{"rows":2}},{"displayName":"Knowledge Retrieval","name":"knowledgeRetrieval","type":"boolean","default":false,"description":"Whether to augments the assistant with knowledge from outside its model, such as proprietary product information or documents, find more <a href=\"https://platform.openai.com/docs/assistants/tools/knowledge-retrieval\" target=\"_blank\">here</a>"},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":false,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}]},{"displayName":"Name","name":"name","type":"string","default":"","description":"The name of the assistant. The maximum length is 256 characters.","placeholder":"e.g. My Assistant"},{"displayName":"Remove All Custom Tools (Functions)","name":"removeCustomTools","type":"boolean","default":false,"description":"Whether to remove all custom tools (functions) from the assistant"},{"displayName":"Output Randomness (Temperature)","name":"temperature","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Output Randomness (Top P)","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}],"displayOptions":{"show":{"operation":["update"],"resource":["assistant"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Generate Audio","value":"generate","action":"Generate audio","description":"Creates audio from a text prompt"},{"name":"Transcribe a Recording","value":"transcribe","action":"Transcribe a recording","description":"Transcribes audio into text"},{"name":"Translate a Recording","value":"translate","action":"Translate a recording","description":"Translates audio into text in English"}],"default":"generate","displayOptions":{"show":{"resource":["audio"]}}},{"displayName":"OpenAI API limits the size of the audio file to 25 MB","name":"fileSizeLimitNotice","type":"notice","default":" ","displayOptions":{"show":{"resource":["audio"],"operation":["translate","transcribe"]}}},{"displayName":"Model","name":"model","type":"options","default":"tts-1","options":[{"name":"TTS-1","value":"tts-1"},{"name":"TTS-1-HD","value":"tts-1-hd"}],"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Text Input","name":"input","type":"string","placeholder":"e.g. The quick brown fox jumped over the lazy dog","description":"The text to generate audio for. The maximum length is 4096 characters.","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Voice","name":"voice","type":"options","default":"alloy","description":"The voice to use when generating the audio","options":[{"name":"Alloy","value":"alloy"},{"name":"Echo","value":"echo"},{"name":"Fable","value":"fable"},{"name":"Nova","value":"nova"},{"name":"Onyx","value":"onyx"},{"name":"Shimmer","value":"shimmer"}],"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Response Format","name":"response_format","type":"options","default":"mp3","options":[{"name":"MP3","value":"mp3"},{"name":"OPUS","value":"opus"},{"name":"AAC","value":"aac"},{"name":"FLAC","value":"flac"}]},{"displayName":"Audio Speed","name":"speed","type":"number","default":1,"typeOptions":{"minValue":0.25,"maxValue":4,"numberPrecision":1}},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in"}],"displayOptions":{"show":{"operation":["generate"],"resource":["audio"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm","displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Language of the Audio File","name":"language","type":"string","description":"The language of the input audio. Supplying the input language in <a href=\"https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes\" target=\"_blank\">ISO-639-1</a> format will improve accuracy and latency.","default":""},{"displayName":"Output Randomness (Temperature)","name":"temperature","type":"number","default":0,"typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}}],"displayOptions":{"show":{"operation":["transcribe"],"resource":["audio"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","hint":"The name of the input field containing the binary file data to be processed","placeholder":"e.g. data","description":"Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm","displayOptions":{"show":{"operation":["translate"],"resource":["audio"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Output Randomness (Temperature)","name":"temperature","type":"number","default":0,"typeOptions":{"minValue":0,"maxValue":1,"numberPrecision":1}}],"displayOptions":{"show":{"operation":["translate"],"resource":["audio"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Delete a File","value":"deleteFile","action":"Delete a file","description":"Delete a file from the server"},{"name":"List Files","value":"list","action":"List files","description":"Returns a list of files that belong to the user's organization"},{"name":"Upload a File","value":"upload","action":"Upload a file","description":"Upload a file that can be used across various endpoints"}],"default":"upload","displayOptions":{"show":{"resource":["file"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","hint":"The name of the input field containing the binary file data to be processed","placeholder":"e.g. data","description":"Name of the binary property which contains the file. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants.","displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Purpose","name":"purpose","type":"options","default":"assistants","description":"The intended purpose of the uploaded file, the 'Fine-tuning' only supports .jsonl files","options":[{"name":"Assistants","value":"assistants"},{"name":"Fine-Tune","value":"fine-tune"}]}],"displayOptions":{"show":{"operation":["upload"],"resource":["file"]}}},{"displayName":"File","name":"fileId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"fileSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","validation":[{"type":"regex","properties":{"regex":"file-[a-zA-Z0-9]","errorMessage":"Not a valid File ID"}}],"placeholder":"e.g. file-1234567890"}],"displayOptions":{"show":{"operation":["deleteFile"],"resource":["file"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Purpose","name":"purpose","type":"options","default":"any","description":"Only return files with the given purpose","options":[{"name":"Any [Default]","value":"any"},{"name":"Assistants","value":"assistants"},{"name":"Fine-Tune","value":"fine-tune"}]}],"displayOptions":{"show":{"operation":["list"],"resource":["file"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Analyze Image","value":"analyze","action":"Analyze image","description":"Take in images and answer questions about them"},{"name":"Generate an Image","value":"generate","action":"Generate an image","description":"Creates an image from a text prompt"}],"default":"generate","displayOptions":{"show":{"resource":["image"]}}},{"displayName":"Model","name":"model","type":"options","default":"dall-e-3","description":"The model to use for image generation","options":[{"name":"DALL·E 2","value":"dall-e-2"},{"name":"DALL·E 3","value":"dall-e-3"},{"name":"GPT Image 1","value":"gpt-image-1"}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Prompt","name":"prompt","type":"string","placeholder":"e.g. A cute cat eating a dinosaur","description":"A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Number of Images","name":"n","default":1,"description":"Number of images to generate","type":"number","typeOptions":{"minValue":1,"maxValue":10},"displayOptions":{"show":{"/model":["dall-e-2"]}}},{"displayName":"Quality","name":"dalleQuality","type":"options","description":"The quality of the image that will be generated, HD creates images with finer details and greater consistency across the image","options":[{"name":"HD","value":"hd"},{"name":"Standard","value":"standard"}],"displayOptions":{"show":{"/model":["dall-e-3"]}},"default":"standard"},{"displayName":"Quality","name":"quality","type":"options","description":"The quality of the image that will be generated, High creates images with finer details and greater consistency across the image","options":[{"name":"High","value":"high"},{"name":"Medium","value":"medium"},{"name":"Low","value":"low"}],"displayOptions":{"show":{"/model":["gpt-image-1"]}},"default":"medium"},{"displayName":"Resolution","name":"size","type":"options","options":[{"name":"256x256","value":"256x256"},{"name":"512x512","value":"512x512"},{"name":"1024x1024","value":"1024x1024"}],"displayOptions":{"show":{"/model":["dall-e-2"]}},"default":"1024x1024"},{"displayName":"Resolution","name":"size","type":"options","options":[{"name":"1024x1024","value":"1024x1024"},{"name":"1792x1024","value":"1792x1024"},{"name":"1024x1792","value":"1024x1792"}],"displayOptions":{"show":{"/model":["dall-e-3"]}},"default":"1024x1024"},{"displayName":"Resolution","name":"size","type":"options","options":[{"name":"1024x1024","value":"1024x1024"},{"name":"1024x1536","value":"1024x1536"},{"name":"1536x1024","value":"1536x1024"}],"displayOptions":{"show":{"/model":["gpt-image-1"]}},"default":"1024x1024"},{"displayName":"Style","name":"style","type":"options","options":[{"name":"Natural","value":"natural","description":"Produce more natural looking images"},{"name":"Vivid","value":"vivid","description":"Lean towards generating hyper-real and dramatic images"}],"displayOptions":{"show":{"/model":["dall-e-3"]}},"default":"vivid"},{"displayName":"Respond with Image URL(s)","name":"returnImageUrls","type":"boolean","default":false,"description":"Whether to return image URL(s) instead of binary file(s)","displayOptions":{"hide":{"/model":["gpt-image-1"]}}},{"displayName":"Put Output in Field","name":"binaryPropertyOutput","type":"string","default":"data","hint":"The name of the output field to put the binary file data in","displayOptions":{"show":{"returnImageUrls":[false]}}}],"displayOptions":{"show":{"operation":["generate"],"resource":["image"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"imageModelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.4}}],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Text Input","name":"text","type":"string","placeholder":"e.g. What's in this image?","default":"What's in this image?","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Type","name":"inputType","type":"options","default":"url","options":[{"name":"Image URL(s)","value":"url"},{"name":"Binary File(s)","value":"base64"}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"URL(s)","name":"imageUrls","type":"string","placeholder":"e.g. https://example.com/image.jpeg","description":"URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma","default":"","displayOptions":{"show":{"inputType":["url"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Input Data Field Name","name":"binaryPropertyName","type":"string","default":"data","placeholder":"e.g. data","hint":"The name of the input field containing the binary file data to be processed","description":"Name of the binary property which contains the image(s)","displayOptions":{"show":{"inputType":["base64"],"operation":["analyze"],"resource":["image"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to simplify the response or not","displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Detail","name":"detail","type":"options","default":"auto","options":[{"name":"Auto","value":"auto","description":"Model will look at the image input size and decide if it should use the low or high setting"},{"name":"Low","value":"low","description":"Return faster responses and consume fewer tokens"},{"name":"High","value":"high","description":"Return more detailed responses, consumes more tokens"}]},{"displayName":"Length of Description (Max Tokens)","description":"Fewer tokens will result in shorter, less detailed image description","name":"maxTokens","type":"number","default":300,"typeOptions":{"minValue":1}}],"displayOptions":{"show":{"operation":["analyze"],"resource":["image"]}}},{"displayName":"Operation","name":"operation","type":"options","noDataExpression":true,"options":[{"name":"Message a Model","value":"message","action":"Message a model","description":"Create a completion with GPT 3, 4, etc."},{"name":"Classify Text for Violations","value":"classify","action":"Classify text for violations","description":"Check whether content complies with usage policies"}],"default":"message","displayOptions":{"show":{"resource":["text"]}}},{"displayName":"Text Input","name":"input","type":"string","placeholder":"e.g. Sample text goes here","description":"The input text to classify if it is violates the moderation policy","default":"","typeOptions":{"rows":2},"displayOptions":{"show":{"operation":["classify"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":false,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["classify"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Use Stable Model","name":"useStableModel","type":"boolean","default":false,"description":"Whether to use the stable version of the model instead of the latest version, accuracy may be slightly lower"}],"displayOptions":{"show":{"operation":["classify"],"resource":["text"]}}},{"displayName":"Model","name":"modelId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"modelSearch","searchable":true}},{"displayName":"ID","name":"id","type":"string","placeholder":"e.g. gpt-4"}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Messages","name":"messages","type":"fixedCollection","typeOptions":{"sortable":true,"multipleValues":true},"placeholder":"Add Message","default":{"values":[{"content":""}]},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Prompt","name":"content","type":"string","description":"The content of the message to be send","default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Role","name":"role","type":"options","description":"Role in shaping the model's response, it tells the model how it should behave and interact with the user","options":[{"name":"User","value":"user","description":"Send a message as a user and get a response from the model"},{"name":"Assistant","value":"assistant","description":"Tell the model to adopt a specific tone or personality"},{"name":"System","value":"system","description":"Usually used to set the model's behavior or context for the next user message"}],"default":"user"}]}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Simplify Output","name":"simplify","type":"boolean","default":true,"description":"Whether to return a simplified version of the response instead of the raw data","displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Output Content as JSON","name":"jsonOutput","type":"boolean","description":"Whether to attempt to return the response in JSON format. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.","default":false,"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Hide Tools","name":"hideTools","type":"hidden","default":"hide","displayOptions":{"show":{"modelId":["gpt-3.5-turbo-16k-0613","dall-e-3","text-embedding-3-large","dall-e-2","whisper-1","tts-1-hd-1106","tts-1-hd","gpt-4-0314","text-embedding-3-small","gpt-4-32k-0314","gpt-3.5-turbo-0301","gpt-4-vision-preview","gpt-3.5-turbo-16k","gpt-3.5-turbo-instruct-0914","tts-1","davinci-002","gpt-3.5-turbo-instruct","babbage-002","tts-1-1106","text-embedding-ada-002"],"@version":[{"_cnd":{"gte":1.2}}],"operation":["message"],"resource":["text"]}}},{"displayName":"Connect your own custom n8n tools to this node on the canvas","name":"noticeTools","type":"notice","default":"","displayOptions":{"hide":{"hideTools":["hide"]},"show":{"operation":["message"],"resource":["text"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequency_penalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":16,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Number of Completions","name":"n","default":1,"description":"How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.","type":"number"},{"displayName":"Presence Penalty","name":"presence_penalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Output Randomness (Temperature)","name":"temperature","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Output Randomness (Top P)","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Reasoning Effort","name":"reasoning_effort","default":"medium","description":"Controls the amount of reasoning tokens to use. A value of \"low\" will favor speed and economical token usage, \"high\" will favor more complete reasoning at the cost of more tokens generated and slower responses.","type":"options","options":[{"name":"Low","value":"low","description":"Favors speed and economical token usage"},{"name":"Medium","value":"medium","description":"Balance between speed and reasoning accuracy"},{"name":"High","value":"high","description":"Favors more complete reasoning at the cost of more tokens generated and slower responses"}],"displayOptions":{"show":{"/modelId":[{"_cnd":{"regex":"(^o1([-\\d]+)?$)|(^o[3-9].*)|(^gpt-5.*)"}}]}}},{"displayName":"Max Tool Calls Iterations","name":"maxToolsIterations","type":"number","default":15,"description":"The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit.","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.5}}]}}}],"displayOptions":{"show":{"operation":["message"],"resource":["text"]}}}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/OpenAi/openAi.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vendors/OpenAi/openAi.dark.svg"}},
7
- {"displayName":"AI Agent","name":"agent","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/"}]}},"defaultVersion":3,"version":[2,2.1,2.2],"defaults":{"name":"AI Agent","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(true, hasOutputParser, needsFallback);\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["main"],"properties":[{"displayName":"Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/workflows/templates/1954\" target=\"_blank\">example</a> of how this node works","name":"aiAgentStarterCallout","type":"callout","default":""},{"displayName":"Get started faster with our","name":"preBuiltAgentsCallout","type":"callout","typeOptions":{"calloutAction":{"label":"pre-built agents","icon":"bot","type":"openPreBuiltAgentsCollection"}},"default":""},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]},{"displayName":"Enable Streaming","name":"enableStreaming","type":"boolean","default":true,"description":"Whether this agent will stream the response in real-time as it generates text"}],"displayOptions":{"hide":{"@version":[{"_cnd":{"lt":2.2}}]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"show":{"@version":[{"_cnd":{"lt":2.2}}]}}}],"hints":[{"message":"You are using streaming responses. Make sure to set the response mode to \"Streaming Response\" on the connected trigger node.","type":"warning","location":"outputPane","whenToDisplay":"afterExecution","displayCondition":"={{ $parameter[\"enableStreaming\"] === true }}"}]},
8
- {"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9],"displayName":"AI Agent","name":"agent","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/"}]}},"defaultVersion":3,"defaults":{"name":"AI Agent","color":"#404040"},"inputs":"={{\n\t\t\t\t((agent, hasOutputParser) => {\n\t\t\t\t\tfunction getInputs(agent, hasOutputParser) {\n const getInputData = (inputs) => {\n const displayNames = {\n ai_languageModel: \"Model\",\n ai_memory: \"Memory\",\n ai_tool: \"Tool\",\n ai_outputParser: \"Output Parser\"\n };\n return inputs.map(({ type, filter }) => {\n const isModelType = type === \"ai_languageModel\";\n let displayName = type in displayNames ? displayNames[type] : void 0;\n if (isModelType && [\"openAiFunctionsAgent\", \"toolsAgent\", \"conversationalAgent\"].includes(agent)) {\n displayName = \"Chat Model\";\n }\n const input = {\n type,\n displayName,\n required: isModelType,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [];\n if (agent === \"conversationalAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\",\n filter: {\n nodes: [\n \"@n8n/n8n-nodes-langchain.lmChatAnthropic\",\n \"@n8n/n8n-nodes-langchain.lmChatAwsBedrock\",\n \"@n8n/n8n-nodes-langchain.lmChatGroq\",\n \"@n8n/n8n-nodes-langchain.lmChatLemonade\",\n \"@n8n/n8n-nodes-langchain.lmChatOllama\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleGemini\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleVertex\",\n \"@n8n/n8n-nodes-langchain.lmChatMistralCloud\",\n \"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatDeepSeek\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenRouter\",\n \"@n8n/n8n-nodes-langchain.lmChatVercelAiGateway\",\n \"@n8n/n8n-nodes-langchain.lmChatXAiGrok\",\n \"@n8n/n8n-nodes-langchain.modelSelector\"\n ]\n }\n },\n {\n type: \"ai_memory\"\n },\n {\n type: \"ai_tool\"\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"toolsAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\",\n filter: {\n nodes: [\n \"@n8n/n8n-nodes-langchain.lmChatAnthropic\",\n \"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatAwsBedrock\",\n \"@n8n/n8n-nodes-langchain.lmChatLemonade\",\n \"@n8n/n8n-nodes-langchain.lmChatMistralCloud\",\n \"@n8n/n8n-nodes-langchain.lmChatOllama\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatGroq\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleVertex\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleGemini\",\n \"@n8n/n8n-nodes-langchain.lmChatDeepSeek\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenRouter\",\n \"@n8n/n8n-nodes-langchain.lmChatVercelAiGateway\",\n \"@n8n/n8n-nodes-langchain.lmChatXAiGrok\"\n ]\n }\n },\n {\n type: \"ai_memory\"\n },\n {\n type: \"ai_tool\",\n required: true\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"openAiFunctionsAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\",\n filter: {\n nodes: [\n \"@n8n/n8n-nodes-langchain.lmChatOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi\"\n ]\n }\n },\n {\n type: \"ai_memory\"\n },\n {\n type: \"ai_tool\",\n required: true\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"reActAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\"\n },\n {\n type: \"ai_tool\"\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"sqlAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\"\n },\n {\n type: \"ai_memory\"\n }\n ];\n } else if (agent === \"planAndExecuteAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\"\n },\n {\n type: \"ai_tool\"\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n }\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n return [\"main\", ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(agent, hasOutputParser)\n\t\t\t\t})($parameter.agent, $parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true)\n\t\t\t}}","outputs":["main"],"credentials":[{"name":"mySql","required":true,"testedBy":"mysqlConnectionTest","displayOptions":{"show":{"agent":["sqlAgent"],"/dataSource":["mysql"]}}},{"name":"postgres","required":true,"displayOptions":{"show":{"agent":["sqlAgent"],"/dataSource":["postgres"]}}}],"properties":[{"displayName":"Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/templates/1954\" target=\"_blank\">example</a> of how this node works","name":"aiAgentStarterCallout","type":"callout","default":"","displayOptions":{"show":{"agent":["conversationalAgent","toolsAgent"]}}},{"displayName":"Get started faster with our","name":"preBuiltAgentsCallout","type":"callout","typeOptions":{"calloutAction":{"label":"pre-built agents","icon":"bot","type":"openPreBuiltAgentsCollection"}},"default":""},{"displayName":"This node is using Agent that has been deprecated. Please switch to using 'Tools Agent' instead.","name":"deprecated","type":"notice","default":"","displayOptions":{"show":{"agent":["conversationalAgent","openAiFunctionsAgent","planAndExecuteAgent","reActAgent","sqlAgent"]}}},{"displayName":"Agent","name":"agent","type":"options","noDataExpression":true,"options":[{"name":"Conversational Agent","value":"conversationalAgent","description":"Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas."},{"name":"OpenAI Functions Agent","value":"openAiFunctionsAgent","description":"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models."},{"name":"Plan and Execute Agent","value":"planAndExecuteAgent","description":"Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed."},{"name":"ReAct Agent","value":"reActAgent","description":"Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving."},{"name":"SQL Agent","value":"sqlAgent","description":"Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data."}],"default":"conversationalAgent","displayOptions":{"show":{"@version":[{"_cnd":{"lte":1.5}}]}}},{"displayName":"Agent","name":"agent","type":"options","noDataExpression":true,"options":[{"name":"Tools Agent","value":"toolsAgent","description":"Utilizes structured tool schemas for precise and reliable tool selection and execution. Recommended for complex tasks requiring accurate and consistent tool usage, but only usable with models that support tool calling."},{"name":"Conversational Agent","value":"conversationalAgent","description":"Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas."},{"name":"OpenAI Functions Agent","value":"openAiFunctionsAgent","description":"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models."},{"name":"Plan and Execute Agent","value":"planAndExecuteAgent","description":"Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed."},{"name":"ReAct Agent","value":"reActAgent","description":"Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving."},{"name":"SQL Agent","value":"sqlAgent","description":"Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data."}],"default":"toolsAgent","displayOptions":{"show":{"@version":[{"_cnd":{"between":{"from":1.6,"to":1.7}}}]}}},{"displayName":"Agent","name":"agent","type":"hidden","noDataExpression":true,"options":[{"name":"Tools Agent","value":"toolsAgent","description":"Utilizes structured tool schemas for precise and reliable tool selection and execution. Recommended for complex tasks requiring accurate and consistent tool usage, but only usable with models that support tool calling."},{"name":"Conversational Agent","value":"conversationalAgent","description":"Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas."},{"name":"OpenAI Functions Agent","value":"openAiFunctionsAgent","description":"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models."},{"name":"Plan and Execute Agent","value":"planAndExecuteAgent","description":"Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed."},{"name":"ReAct Agent","value":"reActAgent","description":"Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving."},{"name":"SQL Agent","value":"sqlAgent","description":"Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data."}],"default":"toolsAgent","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.8}}]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}],"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.7}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.7}}]},"hide":{"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]},"hide":{"agent":["sqlAgent"]}}},{"displayName":"For more reliable structured output parsing, consider using the Tools agent","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true],"agent":["conversationalAgent","reActAgent","planAndExecuteAgent","openAiFunctionsAgent"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}],"agent":["sqlAgent"]}}},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true],"agent":["toolsAgent"]}}},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["toolsAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["conversationalAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["conversationalAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["conversationalAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["conversationalAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Human Message","name":"humanMessage","type":"string","default":"TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{tools}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{input}}","description":"The message that will provide the agent with a list of tools to use","typeOptions":{"rows":6}},{"displayName":"System Message","name":"systemMessage","type":"string","default":"Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["openAiFunctionsAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["openAiFunctionsAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["openAiFunctionsAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["openAiFunctionsAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful AI assistant.","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["reActAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["reActAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["reActAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["reActAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Human Message Template","name":"humanMessageTemplate","type":"string","default":"{input}\n\n{agent_scratchpad}","description":"String to use directly as the human message template","typeOptions":{"rows":6}},{"displayName":"Prefix Message","name":"prefix","type":"string","default":"Answer the following questions as best you can. You have access to the following tools:","description":"String to put before the list of tools","typeOptions":{"rows":6}},{"displayName":"Suffix Message for Chat Model","name":"suffixChat","type":"string","default":"Begin! Reminder to always use the exact characters `Final Answer` when responding.","description":"String to put after the list of tools that will be used if chat model is used","typeOptions":{"rows":6}},{"displayName":"Suffix Message for Regular Model","name":"suffix","type":"string","default":"Begin!\n\n\tQuestion: {input}\n\tThought:{agent_scratchpad}","description":"String to put after the list of tools that will be used if regular model is used","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"}]},{"displayName":"Data Source","name":"dataSource","type":"options","displayOptions":{"show":{"agent":["sqlAgent"],"@version":[{"_cnd":{"lt":1.4}}]}},"default":"sqlite","description":"SQL database to connect to","options":[{"name":"MySQL","value":"mysql","description":"Connect to a MySQL database"},{"name":"Postgres","value":"postgres","description":"Connect to a Postgres database"},{"name":"SQLite","value":"sqlite","description":"Use SQLite by connecting a database file as binary input"}]},{"displayName":"Data Source","name":"dataSource","type":"options","displayOptions":{"show":{"agent":["sqlAgent"],"@version":[{"_cnd":{"gte":1.4}}]}},"default":"postgres","description":"SQL database to connect to","options":[{"name":"MySQL","value":"mysql","description":"Connect to a MySQL database"},{"name":"Postgres","value":"postgres","description":"Connect to a Postgres database"},{"name":"SQLite","value":"sqlite","description":"Use SQLite by connecting a database file as binary input"}]},{"displayName":"Credentials","name":"credentials","type":"credentials","default":""},{"displayName":"Pass the SQLite database into this node as binary data, e.g. by inserting a 'Read/Write Files from Disk' node beforehand","name":"sqLiteFileNotice","type":"notice","default":"","displayOptions":{"show":{"agent":["sqlAgent"],"dataSource":["sqlite"]}}},{"displayName":"Input Binary Field","name":"binaryPropertyName","type":"string","default":"data","required":true,"placeholder":"e.g data","hint":"The name of the input binary field containing the file to be extracted","displayOptions":{"show":{"agent":["sqlAgent"],"dataSource":["sqlite"]}}},{"displayName":"Prompt","name":"input","type":"string","displayOptions":{"show":{"agent":["sqlAgent"],"@version":[{"_cnd":{"lte":1.2}}]}},"default":"","required":true,"typeOptions":{"rows":5}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}]},"show":{"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.7}}],"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.7}}],"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"],"agent":["sqlAgent"]}}},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["sqlAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Ignored Tables","name":"ignoredTables","type":"string","default":"","description":"Comma-separated list of tables to ignore from the database. If empty, no tables are ignored."},{"displayName":"Include Sample Rows","name":"includedSampleRows","type":"number","description":"Number of sample rows to include in the prompt to the agent. It helps the agent to understand the schema of the database but it also increases the amount of tokens used.","default":3},{"displayName":"Included Tables","name":"includedTables","type":"string","default":"","description":"Comma-separated list of tables to include in the database. If empty, all tables are included."},{"displayName":"Prefix Prompt","name":"prefixPrompt","type":"string","default":"You are an agent designed to interact with an SQL database.\nGiven an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results using the LIMIT clause.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for a the few relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return \"I don't know\" as the answer.","description":"Prefix prompt to use for the agent","typeOptions":{"rows":10}},{"displayName":"Suffix Prompt","name":"suffixPrompt","type":"string","default":"Begin!\nChat History:\n{chatHistory}\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}","description":"Suffix prompt to use for the agent","typeOptions":{"rows":4}},{"displayName":"Limit","name":"topK","type":"number","default":10,"description":"The maximum number of results to return"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["planAndExecuteAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["planAndExecuteAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["planAndExecuteAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["planAndExecuteAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Human Message Template","name":"humanMessageTemplate","type":"string","default":"Previous steps: {previous_steps}\n\nCurrent objective: {current_step}\n\n{agent_scratchpad}\n\nYou may extract and combine relevant data from your previous steps when responding to me.","description":"The message that will be sent to the agent during each step execution","typeOptions":{"rows":6}}]}]},
9
- {"displayName":"AI Agent","name":"agent","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/"}]}},"defaultVersion":3,"version":[3],"defaults":{"name":"AI Agent","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(true, hasOutputParser, needsFallback);\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["main"],"properties":[{"displayName":"Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/workflows/templates/1954\" target=\"_blank\">example</a> of how this node works","name":"aiAgentStarterCallout","type":"callout","default":""},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Enable Streaming","name":"enableStreaming","type":"boolean","default":true,"description":"Whether this agent will stream the response in real-time as it generates text"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]},{"displayName":"Max Tokens To Read From Memory","name":"maxTokensFromMemory","type":"hidden","default":0,"description":"The maximum number of tokens to read from the chat memory history. Set to 0 to read all history."}]}],"hints":[{"message":"You are using streaming responses. Make sure to set the response mode to \"Streaming Response\" on the connected trigger node.","type":"warning","location":"outputPane","whenToDisplay":"afterExecution","displayCondition":"={{ $parameter[\"enableStreaming\"] === true }}"}]},
10
- {"displayName":"AI Agent Tool","name":"agentTool","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]}},"defaultVersion":2.2,"version":[2.2],"defaults":{"name":"AI Agent Tool","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(false, hasOutputParser, needsFallback)\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["ai_tool"],"properties":[{"displayName":"Description","name":"toolDescription","type":"string","default":"AI Agent that can call other tools","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"hide":{"@version":[{"_cnd":{"lt":2.2}}]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"show":{"@version":[{"_cnd":{"lt":2.2}}]}}}]},
7
+ {"displayName":"AI Agent","name":"agent","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/"}]}},"defaultVersion":3.1,"version":[3,3.1],"defaults":{"name":"AI Agent","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(true, hasOutputParser, needsFallback);\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["main"],"properties":[{"displayName":"Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/workflows/templates/1954\" target=\"_blank\">example</a> of how this node works","name":"aiAgentStarterCallout","type":"callout","default":""},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"show":{"@version":[{"_cnd":{"lt":3.1}}]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"show":{"@version":[{"_cnd":{"gte":3.1}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Enable Streaming","name":"enableStreaming","type":"boolean","default":true,"description":"Whether this agent will stream the response in real-time as it generates text"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]},{"displayName":"Max Tokens To Read From Memory","name":"maxTokensFromMemory","type":"hidden","default":0,"description":"The maximum number of tokens to read from the chat memory history. Set to 0 to read all history."}]}],"hints":[{"message":"You are using streaming responses. Make sure to set the response mode to \"Streaming Response\" on the connected trigger node.","type":"warning","location":"outputPane","whenToDisplay":"afterExecution","displayCondition":"={{ $parameter[\"enableStreaming\"] === true }}"}]},
8
+ {"displayName":"AI Agent","name":"agent","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/"}]}},"defaultVersion":3.1,"version":[2,2.1,2.2],"defaults":{"name":"AI Agent","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(true, hasOutputParser, needsFallback);\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["main"],"properties":[{"displayName":"Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/workflows/templates/1954\" target=\"_blank\">example</a> of how this node works","name":"aiAgentStarterCallout","type":"callout","default":""},{"displayName":"Get started faster with our","name":"preBuiltAgentsCallout","type":"callout","typeOptions":{"calloutAction":{"label":"pre-built agents","icon":"bot","type":"openPreBuiltAgentsCollection"}},"default":""},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]},{"displayName":"Enable Streaming","name":"enableStreaming","type":"boolean","default":true,"description":"Whether this agent will stream the response in real-time as it generates text"}],"displayOptions":{"hide":{"@version":[{"_cnd":{"lt":2.2}}]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"show":{"@version":[{"_cnd":{"lt":2.2}}]}}}],"hints":[{"message":"You are using streaming responses. Make sure to set the response mode to \"Streaming Response\" on the connected trigger node.","type":"warning","location":"outputPane","whenToDisplay":"afterExecution","displayCondition":"={{ $parameter[\"enableStreaming\"] === true }}"}]},
9
+ {"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9],"displayName":"AI Agent","name":"agent","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/"}]}},"defaultVersion":3.1,"defaults":{"name":"AI Agent","color":"#404040"},"inputs":"={{\n\t\t\t\t((agent, hasOutputParser) => {\n\t\t\t\t\tfunction getInputs(agent, hasOutputParser) {\n const getInputData = (inputs) => {\n const displayNames = {\n ai_languageModel: \"Model\",\n ai_memory: \"Memory\",\n ai_tool: \"Tool\",\n ai_outputParser: \"Output Parser\"\n };\n return inputs.map(({ type, filter }) => {\n const isModelType = type === \"ai_languageModel\";\n let displayName = type in displayNames ? displayNames[type] : void 0;\n if (isModelType && [\"openAiFunctionsAgent\", \"toolsAgent\", \"conversationalAgent\"].includes(agent)) {\n displayName = \"Chat Model\";\n }\n const input = {\n type,\n displayName,\n required: isModelType,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [];\n if (agent === \"conversationalAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\",\n filter: {\n nodes: [\n \"@n8n/n8n-nodes-langchain.lmChatAnthropic\",\n \"@n8n/n8n-nodes-langchain.lmChatAwsBedrock\",\n \"@n8n/n8n-nodes-langchain.lmChatGroq\",\n \"@n8n/n8n-nodes-langchain.lmChatLemonade\",\n \"@n8n/n8n-nodes-langchain.lmChatOllama\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleGemini\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleVertex\",\n \"@n8n/n8n-nodes-langchain.lmChatMistralCloud\",\n \"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatDeepSeek\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenRouter\",\n \"@n8n/n8n-nodes-langchain.lmChatVercelAiGateway\",\n \"@n8n/n8n-nodes-langchain.lmChatXAiGrok\",\n \"@n8n/n8n-nodes-langchain.modelSelector\"\n ]\n }\n },\n {\n type: \"ai_memory\"\n },\n {\n type: \"ai_tool\"\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"toolsAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\",\n filter: {\n nodes: [\n \"@n8n/n8n-nodes-langchain.lmChatAnthropic\",\n \"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatAwsBedrock\",\n \"@n8n/n8n-nodes-langchain.lmChatLemonade\",\n \"@n8n/n8n-nodes-langchain.lmChatMistralCloud\",\n \"@n8n/n8n-nodes-langchain.lmChatOllama\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatGroq\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleVertex\",\n \"@n8n/n8n-nodes-langchain.lmChatGoogleGemini\",\n \"@n8n/n8n-nodes-langchain.lmChatDeepSeek\",\n \"@n8n/n8n-nodes-langchain.lmChatOpenRouter\",\n \"@n8n/n8n-nodes-langchain.lmChatVercelAiGateway\",\n \"@n8n/n8n-nodes-langchain.lmChatXAiGrok\"\n ]\n }\n },\n {\n type: \"ai_memory\"\n },\n {\n type: \"ai_tool\",\n required: true\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"openAiFunctionsAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\",\n filter: {\n nodes: [\n \"@n8n/n8n-nodes-langchain.lmChatOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi\"\n ]\n }\n },\n {\n type: \"ai_memory\"\n },\n {\n type: \"ai_tool\",\n required: true\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"reActAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\"\n },\n {\n type: \"ai_tool\"\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n } else if (agent === \"sqlAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\"\n },\n {\n type: \"ai_memory\"\n }\n ];\n } else if (agent === \"planAndExecuteAgent\") {\n specialInputs = [\n {\n type: \"ai_languageModel\"\n },\n {\n type: \"ai_tool\"\n },\n {\n type: \"ai_outputParser\"\n }\n ];\n }\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n return [\"main\", ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(agent, hasOutputParser)\n\t\t\t\t})($parameter.agent, $parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true)\n\t\t\t}}","outputs":["main"],"credentials":[{"name":"mySql","required":true,"testedBy":"mysqlConnectionTest","displayOptions":{"show":{"agent":["sqlAgent"],"/dataSource":["mysql"]}}},{"name":"postgres","required":true,"displayOptions":{"show":{"agent":["sqlAgent"],"/dataSource":["postgres"]}}}],"properties":[{"displayName":"Tip: Get a feel for agents with our quick <a href=\"https://docs.n8n.io/advanced-ai/intro-tutorial/\" target=\"_blank\">tutorial</a> or see an <a href=\"/templates/1954\" target=\"_blank\">example</a> of how this node works","name":"aiAgentStarterCallout","type":"callout","default":"","displayOptions":{"show":{"agent":["conversationalAgent","toolsAgent"]}}},{"displayName":"Get started faster with our","name":"preBuiltAgentsCallout","type":"callout","typeOptions":{"calloutAction":{"label":"pre-built agents","icon":"bot","type":"openPreBuiltAgentsCollection"}},"default":""},{"displayName":"This node is using Agent that has been deprecated. Please switch to using 'Tools Agent' instead.","name":"deprecated","type":"notice","default":"","displayOptions":{"show":{"agent":["conversationalAgent","openAiFunctionsAgent","planAndExecuteAgent","reActAgent","sqlAgent"]}}},{"displayName":"Agent","name":"agent","type":"options","noDataExpression":true,"options":[{"name":"Conversational Agent","value":"conversationalAgent","description":"Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas."},{"name":"OpenAI Functions Agent","value":"openAiFunctionsAgent","description":"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models."},{"name":"Plan and Execute Agent","value":"planAndExecuteAgent","description":"Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed."},{"name":"ReAct Agent","value":"reActAgent","description":"Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving."},{"name":"SQL Agent","value":"sqlAgent","description":"Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data."}],"default":"conversationalAgent","displayOptions":{"show":{"@version":[{"_cnd":{"lte":1.5}}]}}},{"displayName":"Agent","name":"agent","type":"options","noDataExpression":true,"options":[{"name":"Tools Agent","value":"toolsAgent","description":"Utilizes structured tool schemas for precise and reliable tool selection and execution. Recommended for complex tasks requiring accurate and consistent tool usage, but only usable with models that support tool calling."},{"name":"Conversational Agent","value":"conversationalAgent","description":"Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas."},{"name":"OpenAI Functions Agent","value":"openAiFunctionsAgent","description":"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models."},{"name":"Plan and Execute Agent","value":"planAndExecuteAgent","description":"Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed."},{"name":"ReAct Agent","value":"reActAgent","description":"Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving."},{"name":"SQL Agent","value":"sqlAgent","description":"Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data."}],"default":"toolsAgent","displayOptions":{"show":{"@version":[{"_cnd":{"between":{"from":1.6,"to":1.7}}}]}}},{"displayName":"Agent","name":"agent","type":"hidden","noDataExpression":true,"options":[{"name":"Tools Agent","value":"toolsAgent","description":"Utilizes structured tool schemas for precise and reliable tool selection and execution. Recommended for complex tasks requiring accurate and consistent tool usage, but only usable with models that support tool calling."},{"name":"Conversational Agent","value":"conversationalAgent","description":"Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas."},{"name":"OpenAI Functions Agent","value":"openAiFunctionsAgent","description":"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models."},{"name":"Plan and Execute Agent","value":"planAndExecuteAgent","description":"Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed."},{"name":"ReAct Agent","value":"reActAgent","description":"Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving."},{"name":"SQL Agent","value":"sqlAgent","description":"Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data."}],"default":"toolsAgent","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.8}}]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}],"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.7}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.7}}]},"hide":{"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]},"hide":{"agent":["sqlAgent"]}}},{"displayName":"For more reliable structured output parsing, consider using the Tools agent","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true],"agent":["conversationalAgent","reActAgent","planAndExecuteAgent","openAiFunctionsAgent"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}],"agent":["sqlAgent"]}}},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true],"agent":["toolsAgent"]}}},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["toolsAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["conversationalAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["conversationalAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["conversationalAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["conversationalAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Human Message","name":"humanMessage","type":"string","default":"TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{tools}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{input}}","description":"The message that will provide the agent with a list of tools to use","typeOptions":{"rows":6}},{"displayName":"System Message","name":"systemMessage","type":"string","default":"Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["openAiFunctionsAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["openAiFunctionsAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["openAiFunctionsAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["openAiFunctionsAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful AI assistant.","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["reActAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["reActAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["reActAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["reActAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Human Message Template","name":"humanMessageTemplate","type":"string","default":"{input}\n\n{agent_scratchpad}","description":"String to use directly as the human message template","typeOptions":{"rows":6}},{"displayName":"Prefix Message","name":"prefix","type":"string","default":"Answer the following questions as best you can. You have access to the following tools:","description":"String to put before the list of tools","typeOptions":{"rows":6}},{"displayName":"Suffix Message for Chat Model","name":"suffixChat","type":"string","default":"Begin! Reminder to always use the exact characters `Final Answer` when responding.","description":"String to put after the list of tools that will be used if chat model is used","typeOptions":{"rows":6}},{"displayName":"Suffix Message for Regular Model","name":"suffix","type":"string","default":"Begin!\n\n\tQuestion: {input}\n\tThought:{agent_scratchpad}","description":"String to put after the list of tools that will be used if regular model is used","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"}]},{"displayName":"Data Source","name":"dataSource","type":"options","displayOptions":{"show":{"agent":["sqlAgent"],"@version":[{"_cnd":{"lt":1.4}}]}},"default":"sqlite","description":"SQL database to connect to","options":[{"name":"MySQL","value":"mysql","description":"Connect to a MySQL database"},{"name":"Postgres","value":"postgres","description":"Connect to a Postgres database"},{"name":"SQLite","value":"sqlite","description":"Use SQLite by connecting a database file as binary input"}]},{"displayName":"Data Source","name":"dataSource","type":"options","displayOptions":{"show":{"agent":["sqlAgent"],"@version":[{"_cnd":{"gte":1.4}}]}},"default":"postgres","description":"SQL database to connect to","options":[{"name":"MySQL","value":"mysql","description":"Connect to a MySQL database"},{"name":"Postgres","value":"postgres","description":"Connect to a Postgres database"},{"name":"SQLite","value":"sqlite","description":"Use SQLite by connecting a database file as binary input"}]},{"displayName":"Credentials","name":"credentials","type":"credentials","default":""},{"displayName":"Pass the SQLite database into this node as binary data, e.g. by inserting a 'Read/Write Files from Disk' node beforehand","name":"sqLiteFileNotice","type":"notice","default":"","displayOptions":{"show":{"agent":["sqlAgent"],"dataSource":["sqlite"]}}},{"displayName":"Input Binary Field","name":"binaryPropertyName","type":"string","default":"data","required":true,"placeholder":"e.g data","hint":"The name of the input binary field containing the file to be extracted","displayOptions":{"show":{"agent":["sqlAgent"],"dataSource":["sqlite"]}}},{"displayName":"Prompt","name":"input","type":"string","displayOptions":{"show":{"agent":["sqlAgent"],"@version":[{"_cnd":{"lte":1.2}}]}},"default":"","required":true,"typeOptions":{"rows":5}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}]},"show":{"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.7}}],"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.7}}],"agent":["sqlAgent"]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"],"agent":["sqlAgent"]}}},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["sqlAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Ignored Tables","name":"ignoredTables","type":"string","default":"","description":"Comma-separated list of tables to ignore from the database. If empty, no tables are ignored."},{"displayName":"Include Sample Rows","name":"includedSampleRows","type":"number","description":"Number of sample rows to include in the prompt to the agent. It helps the agent to understand the schema of the database but it also increases the amount of tokens used.","default":3},{"displayName":"Included Tables","name":"includedTables","type":"string","default":"","description":"Comma-separated list of tables to include in the database. If empty, all tables are included."},{"displayName":"Prefix Prompt","name":"prefixPrompt","type":"string","default":"You are an agent designed to interact with an SQL database.\nGiven an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results using the LIMIT clause.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for a the few relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return \"I don't know\" as the answer.","description":"Prefix prompt to use for the agent","typeOptions":{"rows":10}},{"displayName":"Suffix Prompt","name":"suffixPrompt","type":"string","default":"Begin!\nChat History:\n{chatHistory}\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}","description":"Suffix prompt to use for the agent","typeOptions":{"rows":4}},{"displayName":"Limit","name":"topK","type":"number","default":10,"description":"The maximum number of results to return"}]},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["planAndExecuteAgent"],"@version":[1]}},"default":"={{ $json.input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["planAndExecuteAgent"],"@version":[1.1]}},"default":"={{ $json.chat_input }}"},{"displayName":"Text","name":"text","type":"string","required":true,"displayOptions":{"show":{"agent":["planAndExecuteAgent"],"@version":[1.2]}},"default":"={{ $json.chatInput }}"},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"agent":["planAndExecuteAgent"]}},"default":{},"placeholder":"Add Option","options":[{"displayName":"Human Message Template","name":"humanMessageTemplate","type":"string","default":"Previous steps: {previous_steps}\n\nCurrent objective: {current_step}\n\n{agent_scratchpad}\n\nYou may extract and combine relevant data from your previous steps when responding to me.","description":"The message that will be sent to the agent during each step execution","typeOptions":{"rows":6}}]}]},
10
+ {"displayName":"AI Agent Tool","name":"agentTool","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]}},"defaultVersion":3,"version":[2.2],"defaults":{"name":"AI Agent Tool","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(false, hasOutputParser, needsFallback)\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["ai_tool"],"properties":[{"displayName":"Description","name":"toolDescription","type":"string","default":"AI Agent that can call other tools","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"hide":{"@version":[{"_cnd":{"lt":2.2}}]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"show":{"@version":[{"_cnd":{"lt":2.2}}]}}}]},
11
+ {"displayName":"AI Agent Tool","name":"agentTool","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]}},"defaultVersion":3,"version":[3],"defaults":{"name":"AI Agent Tool","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(false, hasOutputParser, needsFallback)\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["ai_tool"],"properties":[{"displayName":"Description","name":"toolDescription","type":"string","default":"AI Agent that can call other tools","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Enable Streaming","name":"enableStreaming","type":"boolean","default":true,"description":"Whether this agent will stream the response in real-time as it generates text"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]},{"displayName":"Max Tokens To Read From Memory","name":"maxTokensFromMemory","type":"hidden","default":0,"description":"The maximum number of tokens to read from the chat memory history. Set to 0 to read all history."}]}]},
11
12
  {"displayName":"OpenAI Assistant","name":"openAiAssistant","hidden":true,"icon":"fa:robot","group":["transform"],"version":[1,1.1],"description":"Utilizes Assistant API from Open AI.","subtitle":"Open AI Assistant","defaults":{"name":"OpenAI Assistant","color":"#404040"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.openaiassistant/"}]}},"inputs":[{"type":"main"},{"type":"ai_tool","displayName":"Tools"}],"outputs":["main"],"credentials":[{"name":"openAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"Operation","name":"mode","type":"options","noDataExpression":true,"default":"existing","options":[{"name":"Use New Assistant","value":"new"},{"name":"Use Existing Assistant","value":"existing"}]},{"displayName":"Name","name":"name","type":"string","default":"","required":true,"displayOptions":{"show":{"/mode":["new"]}}},{"displayName":"Instructions","name":"instructions","type":"string","description":"How the Assistant and model should behave or respond","default":"","typeOptions":{"rows":5},"displayOptions":{"show":{"/mode":["new"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will be used to power the assistant. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>. The Retrieval tool requires gpt-3.5-turbo-1106 and gpt-4-1106-preview models.","required":true,"displayOptions":{"show":{"/mode":["new"]}},"typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"gpt-3.5-turbo-1106"},{"displayName":"Assistant","name":"assistantId","type":"options","noDataExpression":true,"displayOptions":{"show":{"/mode":["existing"]}},"description":"The assistant to use. <a href=\"https://beta.openai.com/docs/assistants/overview\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","headers":{"OpenAI-Beta":"assistants=v1"},"url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/assistants"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.id}}","description":"={{$responseItem.model}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"assistant"}},"required":true,"default":""},{"displayName":"Text","name":"text","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Text","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"OpenAI Tools","name":"nativeTools","type":"multiOptions","default":[],"options":[{"name":"Code Interpreter","value":"code_interpreter"},{"name":"Knowledge Retrieval","value":"retrieval"}]},{"displayName":"Connect your own custom tools to this node on the canvas","name":"noticeTools","type":"notice","default":""},{"displayName":"Upload files for retrieval using the <a href=\"https://platform.openai.com/playground\" target=\"_blank\">OpenAI website<a/>","name":"noticeTools","type":"notice","typeOptions":{"noticeTheme":"info"},"displayOptions":{"show":{"/nativeTools":["retrieval"]}},"default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Timeout","name":"timeout","default":10000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"}]}]},
12
13
  {"displayName":"Summarization Chain","name":"chainSummarization","icon":"fa:link","iconColor":"black","group":["transform"],"description":"Transforms text into a concise summary","codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainsummarization/"}]}},"defaultVersion":2.1,"version":[2,2.1],"defaults":{"name":"Summarization Chain","color":"#909298"},"inputs":"={{ ((parameter) => { function getInputs(parameters) {\n const chunkingMode = parameters?.chunkingMode;\n const operationMode = parameters?.operationMode;\n const inputs = [\n { displayName: \"\", type: \"main\" },\n {\n displayName: \"Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n }\n ];\n if (operationMode === \"documentLoader\") {\n inputs.push({\n displayName: \"Document\",\n type: \"ai_document\",\n required: true,\n maxConnections: 1\n });\n return inputs;\n }\n if (chunkingMode === \"advanced\") {\n inputs.push({\n displayName: \"Text Splitter\",\n type: \"ai_textSplitter\",\n required: false,\n maxConnections: 1\n });\n return inputs;\n }\n return inputs;\n}; return getInputs(parameter) })($parameter) }}","outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1951\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Data to Summarize","name":"operationMode","noDataExpression":true,"type":"options","description":"How to pass data into the summarization chain","default":"nodeInputJson","options":[{"name":"Use Node Input (JSON)","value":"nodeInputJson","description":"Summarize the JSON data coming into this node from the previous one"},{"name":"Use Node Input (Binary)","value":"nodeInputBinary","description":"Summarize the binary data coming into this node from the previous one"},{"name":"Use Document Loader","value":"documentLoader","description":"Use a loader sub-node with more configuration options"}]},{"displayName":"Chunking Strategy","name":"chunkingMode","noDataExpression":true,"type":"options","description":"Chunk splitting strategy","default":"simple","options":[{"name":"Simple (Define Below)","value":"simple"},{"name":"Advanced","value":"advanced","description":"Use a splitter sub-node with more configuration options"}],"displayOptions":{"show":{"/operationMode":["nodeInputJson","nodeInputBinary"]}}},{"displayName":"Characters Per Chunk","name":"chunkSize","description":"Controls the max size (in terms of number of characters) of the final document chunk","type":"number","default":1000,"displayOptions":{"show":{"/chunkingMode":["simple"]}}},{"displayName":"Chunk Overlap (Characters)","name":"chunkOverlap","type":"number","description":"Specifies how much characters overlap there should be between chunks","default":200,"displayOptions":{"show":{"/chunkingMode":["simple"]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"Input Data Field Name","name":"binaryDataKey","type":"string","default":"data","description":"The name of the field in the agent or chain’s input that contains the binary file to be processed","displayOptions":{"show":{"/operationMode":["nodeInputBinary"]}}},{"displayName":"Summarization Method and Prompts","name":"summarizationMethodAndPrompts","type":"fixedCollection","default":{"values":{"summarizationMethod":"map_reduce","prompt":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","combineMapPrompt":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:"}},"placeholder":"Add Option","typeOptions":{},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Summarization Method","name":"summarizationMethod","type":"options","description":"The type of summarization to run","default":"map_reduce","options":[{"name":"Map Reduce (Recommended)","value":"map_reduce","description":"Summarize each document (or chunk) individually, then summarize those summaries"},{"name":"Refine","value":"refine","description":"Summarize the first document (or chunk). Then update that summary based on the next document (or chunk), and repeat."},{"name":"Stuff","value":"stuff","description":"Pass all documents (or chunks) at once. Ideal for small datasets."}]},{"displayName":"Individual Summary Prompt","name":"combineMapPrompt","type":"string","hint":"The prompt to summarize an individual document (or chunk)","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","refine"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","typeOptions":{"rows":9}},{"displayName":"Final Prompt to Combine","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt to combine individual summaries","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","refine"]}},"typeOptions":{"rows":9}},{"displayName":"Prompt","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["refine","map_reduce"]}},"typeOptions":{"rows":9}},{"displayName":"Subsequent (Refine) Prompt","name":"refinePrompt","type":"string","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","map_reduce"]}},"default":"Your job is to produce a final summary\nWe have provided an existing summary up to a certain point: \"{existing_answer}\"\nWe have the opportunity to refine the existing summary\n(only if needed) with some more context below.\n------------\n\"{text}\"\n------------\n\nGiven the new context, refine the original summary\nIf the context isn't useful, return the original summary.\n\nREFINED SUMMARY:","hint":"The prompt to refine the summary based on the next document (or chunk)","typeOptions":{"rows":9}},{"displayName":"Initial Prompt","name":"refineQuestionPrompt","type":"string","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","map_reduce"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt for the first document (or chunk)","typeOptions":{"rows":9}}]}]},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}}]}]},
13
14
  {"displayName":"Summarization Chain","name":"chainSummarization","icon":"fa:link","iconColor":"black","group":["transform"],"description":"Transforms text into a concise summary","codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainsummarization/"}]}},"defaultVersion":2.1,"version":1,"defaults":{"name":"Summarization Chain","color":"#909298"},"inputs":["main",{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true},{"displayName":"Document","maxConnections":1,"type":"ai_document","required":true}],"outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1951\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Type","name":"type","type":"options","description":"The type of summarization to run","default":"map_reduce","options":[{"name":"Map Reduce (Recommended)","value":"map_reduce","description":"Summarize each document (or chunk) individually, then summarize those summaries"},{"name":"Refine","value":"refine","description":"Summarize the first document (or chunk). Then update that summary based on the next document (or chunk), and repeat."},{"name":"Stuff","value":"stuff","description":"Pass all documents (or chunks) at once. Ideal for small datasets."}]},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"Final Prompt to Combine","name":"combineMapPrompt","type":"string","hint":"The prompt to combine individual summaries","displayOptions":{"show":{"/type":["map_reduce"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","typeOptions":{"rows":6}},{"displayName":"Individual Summary Prompt","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt to summarize an individual document (or chunk)","displayOptions":{"show":{"/type":["map_reduce"]}},"typeOptions":{"rows":6}},{"displayName":"Prompt","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","displayOptions":{"show":{"/type":["stuff"]}},"typeOptions":{"rows":6}},{"displayName":"Subsequent (Refine) Prompt","name":"refinePrompt","type":"string","displayOptions":{"show":{"/type":["refine"]}},"default":"Your job is to produce a final summary\nWe have provided an existing summary up to a certain point: \"{existing_answer}\"\nWe have the opportunity to refine the existing summary\n(only if needed) with some more context below.\n------------\n\"{text}\"\n------------\n\nGiven the new context, refine the original summary\nIf the context isn't useful, return the original summary.\n\nREFINED SUMMARY:","hint":"The prompt to refine the summary based on the next document (or chunk)","typeOptions":{"rows":6}},{"displayName":"Initial Prompt","name":"refineQuestionPrompt","type":"string","displayOptions":{"show":{"/type":["refine"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt for the first document (or chunk)","typeOptions":{"rows":6}}]}]},
14
- {"displayName":"Basic LLM Chain","name":"chainLlm","icon":"fa:link","iconColor":"black","group":["transform"],"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7],"description":"A simple chain to prompt a large language model","defaults":{"name":"Basic LLM Chain","color":"#909298"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/"}]}},"inputs":"={{ ((parameter) => { function getInputs(parameters) {\n const inputs = [\n { displayName: \"\", type: \"main\" },\n {\n displayName: \"Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n }\n ];\n const needsFallback = parameters?.needsFallback;\n if (needsFallback === true) {\n inputs.push({\n displayName: \"Fallback Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n });\n }\n const hasOutputParser = parameters?.hasOutputParser;\n if (hasOutputParser === void 0 || hasOutputParser === true) {\n inputs.push({\n displayName: \"Output Parser\",\n type: \"ai_outputParser\",\n maxConnections: 1,\n required: false\n });\n }\n return inputs;\n}; return getInputs(parameter) })($parameter) }}","outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1978\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1.1,1.2]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.3]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[1,1.1,1.2,1.3]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[1,1.1,1.3]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[1,1.1,1.3]}}},{"displayName":"Chat Messages (if Using a Chat Model)","name":"messages","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{},"placeholder":"Add prompt","options":[{"name":"messageValues","displayName":"Prompt","values":[{"displayName":"Type Name or ID","name":"type","type":"options","options":[{"name":"AI","value":"AIMessagePromptTemplate"},{"name":"System","value":"SystemMessagePromptTemplate"},{"name":"User","value":"HumanMessagePromptTemplate"}],"default":"SystemMessagePromptTemplate"},{"displayName":"Message Type","name":"messageType","type":"options","displayOptions":{"show":{"type":["HumanMessagePromptTemplate"]}},"options":[{"name":"Text","value":"text","description":"Simple text message"},{"name":"Image (Binary)","value":"imageBinary","description":"Process the binary input from the previous node"},{"name":"Image (URL)","value":"imageUrl","description":"Process the image from the specified URL"}],"default":"text"},{"displayName":"Image Data Field Name","name":"binaryImageDataKey","type":"string","default":"data","required":true,"description":"The name of the field in the chain's input that contains the binary image file to be processed","displayOptions":{"show":{"messageType":["imageBinary"]}}},{"displayName":"Image URL","name":"imageUrl","type":"string","default":"","required":true,"description":"URL to the image to be processed","displayOptions":{"show":{"messageType":["imageUrl"]}}},{"displayName":"Image Details","description":"Control how the model processes the image and generates its textual understanding","name":"imageDetail","type":"options","displayOptions":{"show":{"type":["HumanMessagePromptTemplate"],"messageType":["imageBinary","imageUrl"]}},"options":[{"name":"Auto","value":"auto","description":"Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting"},{"name":"Low","value":"low","description":"The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail."},{"name":"High","value":"high","description":"Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens."}],"default":"auto"},{"displayName":"Message","name":"message","type":"string","required":true,"displayOptions":{"hide":{"messageType":["imageBinary","imageUrl"]}},"default":""}]}]},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.7}}]}}},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}}]},
15
- {"displayName":"Question and Answer Chain","name":"chainRetrievalQa","icon":"fa:link","iconColor":"black","group":["transform"],"version":[1,1.1,1.2,1.3,1.4,1.5,1.6],"description":"Answer questions about retrieved documents","defaults":{"name":"Question and Answer Chain","color":"#909298"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainretrievalqa/"}]}},"inputs":["main",{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true},{"displayName":"Retriever","maxConnections":1,"type":"ai_retriever","required":true}],"outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1960\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Query","name":"query","type":"string","required":true,"default":"={{ $json.input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Query","name":"query","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Query","name":"query","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.2]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.4}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.4}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\nContext: {context}","typeOptions":{"rows":6},"description":"Template string used for the system prompt. This should include the variable `{context}` for the provided context. For text completion models, you should also include the variable `{question}` for the user’s query.","displayOptions":{"show":{"@version":[{"_cnd":{"lt":1.5}}]}}},{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\nContext: {context}","typeOptions":{"rows":6},"description":"Template string used for the system prompt. This should include the variable `{context}` for the provided context. For text completion models, you should also include the variable `{input}` for the user’s query.","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.6}}]}}}]}]},
15
+ {"displayName":"Basic LLM Chain","name":"chainLlm","icon":"fa:link","iconColor":"black","group":["transform"],"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8],"description":"A simple chain to prompt a large language model","defaults":{"name":"Basic LLM Chain","color":"#909298"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/"}]}},"inputs":"={{ ((parameter) => { function getInputs(parameters) {\n const inputs = [\n { displayName: \"\", type: \"main\" },\n {\n displayName: \"Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n }\n ];\n const needsFallback = parameters?.needsFallback;\n if (needsFallback === true) {\n inputs.push({\n displayName: \"Fallback Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n });\n }\n const hasOutputParser = parameters?.hasOutputParser;\n if (hasOutputParser === void 0 || hasOutputParser === true) {\n inputs.push({\n displayName: \"Output Parser\",\n type: \"ai_outputParser\",\n maxConnections: 1,\n required: false\n });\n }\n return inputs;\n}; return getInputs(parameter) })($parameter) }}","outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1978\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1.1,1.2]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.3]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.3}},{"_cnd":{"gte":1.8}}]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.8}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[1,1.1,1.3]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[1,1.1,1.3]}}},{"displayName":"Chat Messages (if Using a Chat Model)","name":"messages","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{},"placeholder":"Add prompt","options":[{"name":"messageValues","displayName":"Prompt","values":[{"displayName":"Type Name or ID","name":"type","type":"options","options":[{"name":"AI","value":"AIMessagePromptTemplate"},{"name":"System","value":"SystemMessagePromptTemplate"},{"name":"User","value":"HumanMessagePromptTemplate"}],"default":"SystemMessagePromptTemplate"},{"displayName":"Message Type","name":"messageType","type":"options","displayOptions":{"show":{"type":["HumanMessagePromptTemplate"]}},"options":[{"name":"Text","value":"text","description":"Simple text message"},{"name":"Image (Binary)","value":"imageBinary","description":"Process the binary input from the previous node"},{"name":"Image (URL)","value":"imageUrl","description":"Process the image from the specified URL"}],"default":"text"},{"displayName":"Image Data Field Name","name":"binaryImageDataKey","type":"string","default":"data","required":true,"description":"The name of the field in the chain's input that contains the binary image file to be processed","displayOptions":{"show":{"messageType":["imageBinary"]}}},{"displayName":"Image URL","name":"imageUrl","type":"string","default":"","required":true,"description":"URL to the image to be processed","displayOptions":{"show":{"messageType":["imageUrl"]}}},{"displayName":"Image Details","description":"Control how the model processes the image and generates its textual understanding","name":"imageDetail","type":"options","displayOptions":{"show":{"type":["HumanMessagePromptTemplate"],"messageType":["imageBinary","imageUrl"]}},"options":[{"name":"Auto","value":"auto","description":"Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting"},{"name":"Low","value":"low","description":"The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail."},{"name":"High","value":"high","description":"Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens."}],"default":"auto"},{"displayName":"Message","name":"message","type":"string","required":true,"displayOptions":{"hide":{"messageType":["imageBinary","imageUrl"]}},"default":""}]}]},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.7}}]}}},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}}]},
16
+ {"displayName":"Question and Answer Chain","name":"chainRetrievalQa","icon":"fa:link","iconColor":"black","group":["transform"],"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7],"description":"Answer questions about retrieved documents","defaults":{"name":"Question and Answer Chain","color":"#909298"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainretrievalqa/"}]}},"inputs":["main",{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true},{"displayName":"Retriever","maxConnections":1,"type":"ai_retriever","required":true}],"outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1960\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Query","name":"query","type":"string","required":true,"default":"={{ $json.input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Query","name":"query","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Query","name":"query","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.2]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.2}},{"_cnd":{"gte":1.7}}]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.7}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.4}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.4}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\nContext: {context}","typeOptions":{"rows":6},"description":"Template string used for the system prompt. This should include the variable `{context}` for the provided context. For text completion models, you should also include the variable `{question}` for the user’s query.","displayOptions":{"show":{"@version":[{"_cnd":{"lt":1.5}}]}}},{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\nContext: {context}","typeOptions":{"rows":6},"description":"Template string used for the system prompt. This should include the variable `{context}` for the provided context. For text completion models, you should also include the variable `{input}` for the user’s query.","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.6}}]}}}]}]},
16
17
  {"displayName":"Sentiment Analysis","name":"sentimentAnalysis","icon":"fa:balance-scale-left","iconColor":"black","group":["transform"],"version":[1,1.1],"description":"Analyze the sentiment of your text","codex":{"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.sentimentanalysis/"}]}},"defaults":{"name":"Sentiment Analysis"},"inputs":[{"displayName":"","type":"main"},{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true}],"outputs":"={{((parameters, defaultCategories) => {\n const options = parameters?.options ?? {};\n const categories = options?.categories ?? defaultCategories;\n const categoriesArray = categories.split(\",\").map((cat) => cat.trim());\n const ret = categoriesArray.map((cat) => ({ type: \"main\", displayName: cat }));\n return ret;\n})($parameter, \"Positive, Neutral, Negative\")}}","properties":[{"displayName":"Text to Analyze","name":"inputText","type":"string","required":true,"default":"","description":"Use an expression to reference data in previous nodes or enter static text","typeOptions":{"rows":2}},{"displayName":"Sentiment scores are LLM-generated estimates, not statistically rigorous measurements. They may be inconsistent across runs and should be used as rough indicators only.","name":"detailedResultsNotice","type":"notice","default":"","displayOptions":{"show":{"/options.includeDetailedResults":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"Sentiment Categories","name":"categories","type":"string","default":"Positive, Neutral, Negative","description":"A comma-separated list of categories to analyze","noDataExpression":true,"typeOptions":{"rows":2}},{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"You are highly intelligent and accurate sentiment analyzer. Analyze the sentiment of the provided text. Categorize it into one of the following: {categories}. Use the provided formatting instructions. Only output the JSON.","description":"String to use directly as the system prompt template","typeOptions":{"rows":6}},{"displayName":"Include Detailed Results","name":"includeDetailedResults","type":"boolean","default":false,"description":"Whether to include sentiment strength and confidence scores in the output"},{"displayName":"Enable Auto-Fixing","name":"enableAutoFixing","type":"boolean","default":true,"description":"Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}}}]}]},
17
18
  {"displayName":"Information Extractor","name":"informationExtractor","icon":"fa:project-diagram","iconColor":"black","group":["transform"],"version":[1,1.1,1.2],"defaultVersion":1.2,"description":"Extract information from text in a structured format","codex":{"alias":["NER","parse","parsing","JSON","data extraction","structured"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.information-extractor/"}]}},"defaults":{"name":"Information Extractor"},"inputs":[{"displayName":"","type":"main"},{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true}],"outputs":["main"],"properties":[{"displayName":"Text","name":"text","type":"string","default":"","description":"The text to extract information from","typeOptions":{"rows":2}},{"displayName":"Schema Type","name":"schemaType","type":"options","noDataExpression":true,"options":[{"name":"From Attribute Descriptions","value":"fromAttributes","description":"Extract specific attributes from the text based on types and descriptions"},{"name":"Generate From JSON Example","value":"fromJson","description":"Generate a schema from an example JSON object"},{"name":"Define using JSON Schema","value":"manual","description":"Define the JSON schema manually"}],"default":"fromAttributes","description":"How to specify the schema for the desired output"},{"displayName":"JSON Example","name":"jsonSchemaExample","type":"json","default":"{\n\t\"state\": \"California\",\n\t\"cities\": [\"Los Angeles\", \"San Francisco\", \"San Diego\"]\n}","noDataExpression":true,"typeOptions":{"rows":10},"displayOptions":{"show":{"schemaType":["fromJson"]}},"description":"Example JSON object to use to generate the schema"},{"displayName":"All properties will be required. To make them optional, use the 'JSON Schema' schema type instead","name":"notice","type":"notice","default":"","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}],"schemaType":["fromJson"]}}},{"displayName":"Input Schema","name":"inputSchema","type":"json","default":"{\n\t\"type\": \"object\",\n\t\"properties\": {\n\t\t\"state\": {\n\t\t\t\"type\": \"string\"\n\t\t},\n\t\t\"cities\": {\n\t\t\t\"type\": \"array\",\n\t\t\t\"items\": {\n\t\t\t\t\"type\": \"string\"\n\t\t\t}\n\t\t}\n\t}\n}","noDataExpression":false,"typeOptions":{"rows":10},"displayOptions":{"show":{"schemaType":["manual"]}},"description":"Schema to use for the function","hint":"Use <a target=\"_blank\" href=\"https://json-schema.org/\">JSON Schema</a> format (<a target=\"_blank\" href=\"https://json-schema.org/learn/miscellaneous-examples.html\">examples</a>). $refs syntax is currently not supported."},{"displayName":"Attributes","name":"attributes","placeholder":"Add Attribute","type":"fixedCollection","default":{},"displayOptions":{"show":{"schemaType":["fromAttributes"]}},"typeOptions":{"multipleValues":true},"options":[{"name":"attributes","displayName":"Attribute List","values":[{"displayName":"Name","name":"name","type":"string","default":"","description":"Attribute to extract","placeholder":"e.g. company_name","required":true},{"displayName":"Type","name":"type","type":"options","description":"Data type of the attribute","required":true,"options":[{"name":"Boolean","value":"boolean"},{"name":"Date","value":"date"},{"name":"Number","value":"number"},{"name":"String","value":"string"}],"default":"string"},{"displayName":"Description","name":"description","type":"string","default":"","description":"Describe your attribute","placeholder":"Add description for the attribute","required":true},{"displayName":"Required","name":"required","type":"boolean","default":false,"description":"Whether attribute is required","required":true}]}]},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"You are an expert extraction algorithm.\nOnly extract relevant information from the text.\nIf you do not know the value of an attribute asked to extract, you may omit the attribute's value.","description":"String to use directly as the system prompt template","typeOptions":{"rows":6}},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}}}]}]},
18
19
  {"displayName":"Text Classifier","name":"textClassifier","icon":"fa:tags","iconColor":"black","group":["transform"],"version":[1,1.1],"description":"Classify your text into distinct categories","codex":{"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.text-classifier/"}]}},"defaults":{"name":"Text Classifier"},"inputs":[{"displayName":"","type":"main"},{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true}],"outputs":"={{((parameters) => {\n const categories = parameters.categories?.categories ?? [];\n const fallback = parameters.options?.fallback;\n const ret = categories.map((cat) => {\n return { type: \"main\", displayName: cat.category };\n });\n if (fallback === \"other\") ret.push({ type: \"main\", displayName: \"Other\" });\n return ret;\n})($parameter)}}","properties":[{"displayName":"Text to Classify","name":"inputText","type":"string","required":true,"default":"","description":"Use an expression to reference data in previous nodes or enter static text","typeOptions":{"rows":2}},{"displayName":"Categories","name":"categories","placeholder":"Add Category","type":"fixedCollection","default":{},"typeOptions":{"multipleValues":true},"options":[{"name":"categories","displayName":"Categories","values":[{"displayName":"Category","name":"category","type":"string","default":"","description":"Category to add","required":true},{"displayName":"Description","name":"description","type":"string","default":"","description":"Describe your category if it's not obvious"}]}]},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"Allow Multiple Classes To Be True","name":"multiClass","type":"boolean","default":false},{"displayName":"When No Clear Match","name":"fallback","type":"options","default":"discard","description":"What to do with items that don’t match the categories exactly","options":[{"name":"Discard Item","value":"discard","description":"Ignore the item and drop it from the output"},{"name":"Output on Extra, 'Other' Branch","value":"other","description":"Create a separate output branch called 'Other'"}]},{"displayName":"System Prompt Template","name":"systemPromptTemplate","type":"string","default":"Please classify the text provided by the user into one of the following categories: {categories}, and use the provided formatting instructions below. Don't explain, and only output the json.","description":"String to use directly as the system prompt template","typeOptions":{"rows":6}},{"displayName":"Enable Auto-Fixing","name":"enableAutoFixing","type":"boolean","default":true,"description":"Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}}}]}]},
@@ -86,7 +87,7 @@
86
87
  {"displayName":"Call n8n Workflow Tool","name":"toolWorkflow","icon":"fa:network-wired","iconColor":"black","group":["transform"],"description":"Uses another n8n workflow as a tool. Allows packaging any n8n node(s) as a tool.","codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Recommended Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolworkflow/"}]}},"defaultVersion":2.2,"defaults":{"name":"Call n8n Workflow Tool"},"version":[2,2.1,2.2],"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"See an example of a workflow to suggest meeting slots using AI <a href=\"/templates/1953\" target=\"_blank\">here</a>.","name":"noticeTemplateExample","type":"notice","default":""},{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"e.g. My_Color_Tool","validateType":"string-alphanumeric","description":"The name of the function to be called, could contain letters, numbers, and underscores only","displayOptions":{"show":{"@version":[{"_cnd":{"lte":2.1}}]}}},{"displayName":"Description","name":"description","type":"string","default":"","placeholder":"Call this tool to get a random color. The input should be a string with comma separated names of colors to exclude.","typeOptions":{"rows":3}},{"displayName":"This tool will call the workflow you define below, and look in the last node for the response. The workflow needs to start with an Execute Workflow trigger","name":"executeNotice","type":"notice","default":""},{"displayName":"Source","name":"source","type":"options","options":[{"name":"Database","value":"database","description":"Load the workflow from the database by ID"},{"name":"Define Below","value":"parameter","description":"Pass the JSON code of a workflow"}],"default":"database","description":"Where to get the workflow to execute from"},{"displayName":"Workflow","name":"workflowId","type":"workflowSelector","displayOptions":{"show":{"source":["database"]}},"default":"","required":true},{"displayName":"Workflow Inputs","name":"workflowInputs","type":"resourceMapper","noDataExpression":true,"default":{"mappingMode":"defineBelow","value":null},"required":true,"typeOptions":{"loadOptionsDependsOn":["workflowId.value"],"resourceMapper":{"localResourceMapperMethod":"loadSubWorkflowInputs","valuesLabel":"Workflow Inputs","mode":"map","fieldWords":{"singular":"workflow input","plural":"workflow inputs"},"addAllFields":true,"multiKeyMatch":false,"supportAutoMap":false}},"displayOptions":{"show":{"source":["database"]},"hide":{"workflowId":[""]}}},{"displayName":"Workflow JSON","name":"workflowJson","type":"json","typeOptions":{"rows":10},"displayOptions":{"show":{"source":["parameter"]}},"default":"\n\n\n\n\n\n\n\n\n","required":true,"description":"The workflow JSON code to execute"}]},
87
88
  {"displayName":"Call n8n Workflow Tool","name":"toolWorkflow","icon":"fa:network-wired","iconColor":"black","group":["transform"],"description":"Uses another n8n workflow as a tool. Allows packaging any n8n node(s) as a tool.","codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Recommended Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolworkflow/"}]}},"defaultVersion":2.2,"version":[1,1.1,1.2,1.3],"defaults":{"name":"Call n8n Workflow Tool"},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"See an example of a workflow to suggest meeting slots using AI <a href=\"/templates/1953\" target=\"_blank\">here</a>.","name":"noticeTemplateExample","type":"notice","default":""},{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"My_Color_Tool","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"e.g. My_Color_Tool","validateType":"string-alphanumeric","description":"The name of the function to be called, could contain letters, numbers, and underscores only","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}}},{"displayName":"Description","name":"description","type":"string","default":"","placeholder":"Call this tool to get a random color. The input should be a string with comma separted names of colors to exclude.","typeOptions":{"rows":3}},{"displayName":"This tool will call the workflow you define below, and look in the last node for the response. The workflow needs to start with an Execute Workflow trigger","name":"executeNotice","type":"notice","default":""},{"displayName":"Source","name":"source","type":"options","options":[{"name":"Database","value":"database","description":"Load the workflow from the database by ID"},{"name":"Define Below","value":"parameter","description":"Pass the JSON code of a workflow"}],"default":"database","description":"Where to get the workflow to execute from"},{"displayName":"Workflow ID","name":"workflowId","type":"string","displayOptions":{"show":{"source":["database"],"@version":[{"_cnd":{"lte":1.1}}]}},"default":"","required":true,"description":"The workflow to execute","hint":"Can be found in the URL of the workflow"},{"displayName":"Workflow","name":"workflowId","type":"workflowSelector","displayOptions":{"show":{"source":["database"],"@version":[{"_cnd":{"gte":1.2}}]}},"default":"","required":true},{"displayName":"Workflow JSON","name":"workflowJson","type":"json","typeOptions":{"rows":10},"displayOptions":{"show":{"source":["parameter"]}},"default":"\n\n\n\n\n\n\n\n\n","required":true,"description":"The workflow JSON code to execute"},{"displayName":"Field to Return","name":"responsePropertyName","type":"string","default":"response","required":true,"hint":"The field in the last-executed node of the workflow that contains the response","description":"Where to find the data that this tool should return. n8n will look in the output of the last-executed node of the workflow for a field with this name, and return its value.","displayOptions":{"show":{"@version":[{"_cnd":{"lt":1.3}}]}}},{"displayName":"Extra Workflow Inputs","name":"fields","placeholder":"Add Value","type":"fixedCollection","description":"These will be output by the 'execute workflow' trigger of the workflow being called","typeOptions":{"multipleValues":true,"sortable":true},"default":{},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"e.g. fieldName","description":"Name of the field to set the value of. Supports dot-notation. Example: data.person[0].name.","requiresDataPath":"single"},{"displayName":"Type","name":"type","type":"options","description":"The field value type","options":[{"name":"String","value":"stringValue"},{"name":"Number","value":"numberValue"},{"name":"Boolean","value":"booleanValue"},{"name":"Array","value":"arrayValue"},{"name":"Object","value":"objectValue"}],"default":"stringValue"},{"displayName":"Value","name":"stringValue","type":"string","default":"","displayOptions":{"show":{"type":["stringValue"]}},"validateType":"string","ignoreValidationDuringExecution":true},{"displayName":"Value","name":"numberValue","type":"string","default":"","displayOptions":{"show":{"type":["numberValue"]}},"validateType":"number","ignoreValidationDuringExecution":true},{"displayName":"Value","name":"booleanValue","type":"options","default":"true","options":[{"name":"True","value":"true"},{"name":"False","value":"false"}],"displayOptions":{"show":{"type":["booleanValue"]}},"validateType":"boolean","ignoreValidationDuringExecution":true},{"displayName":"Value","name":"arrayValue","type":"string","default":"","placeholder":"e.g. [ arrayItem1, arrayItem2, arrayItem3 ]","displayOptions":{"show":{"type":["arrayValue"]}},"validateType":"array","ignoreValidationDuringExecution":true},{"displayName":"Value","name":"objectValue","type":"json","default":"={}","typeOptions":{"rows":2},"displayOptions":{"show":{"type":["objectValue"]}},"validateType":"object","ignoreValidationDuringExecution":true}]}]},{"displayName":"Specify Input Schema","name":"specifyInputSchema","type":"boolean","description":"Whether to specify the schema for the function. This would require the LLM to provide the input in the correct format and would validate it against the schema.","noDataExpression":true,"default":false},{"displayName":"Schema Type","name":"schemaType","type":"options","noDataExpression":true,"options":[{"name":"Generate From JSON Example","value":"fromJson","description":"Generate a schema from an example JSON object"},{"name":"Define using JSON Schema","value":"manual","description":"Define the JSON schema manually"}],"default":"fromJson","description":"How to specify the schema for the function","displayOptions":{"show":{"specifyInputSchema":[true]}}},{"displayName":"JSON Example","name":"jsonSchemaExample","type":"json","default":"{\n\t\"some_input\": \"some_value\"\n}","noDataExpression":true,"typeOptions":{"rows":10},"displayOptions":{"show":{"schemaType":["fromJson"]}},"description":"Example JSON object to use to generate the schema"},{"displayName":"Input Schema","name":"inputSchema","type":"json","default":"{\n\"type\": \"object\",\n\"properties\": {\n\t\"some_input\": {\n\t\t\"type\": \"string\",\n\t\t\"description\": \"Some input to the function\"\n\t\t}\n\t}\n}","noDataExpression":false,"typeOptions":{"rows":10},"displayOptions":{"show":{"schemaType":["manual"]}},"description":"Schema to use for the function","hint":"Use <a target=\"_blank\" href=\"https://json-schema.org/\">JSON Schema</a> format (<a target=\"_blank\" href=\"https://json-schema.org/learn/miscellaneous-examples.html\">examples</a>). $refs syntax is currently not supported."}]},
88
89
  {"displayName":"Manual Chat Trigger","name":"manualChatTrigger","icon":"fa:comments","group":["trigger"],"version":[1,1.1],"description":"Runs the flow on new manual chat message","eventTriggerDescription":"","maxNodes":1,"hidden":true,"defaults":{"name":"When chat message received","color":"#909298"},"codex":{"categories":["Core Nodes"],"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-langchain.chattrigger/"}]},"subcategories":{"Core Nodes":["Other Trigger Nodes"]}},"inputs":[],"outputs":["main"],"properties":[{"displayName":"This node is where a manual chat workflow execution starts. To make one, go back to the canvas and click ‘Chat’","name":"notice","type":"notice","default":""},{"displayName":"Chat and execute workflow","name":"openChat","type":"button","typeOptions":{"buttonConfig":{"action":"openChat"}},"default":""}]},
89
- {"displayName":"Chat Trigger","name":"chatTrigger","icon":"fa:comments","iconColor":"black","group":["trigger"],"version":[1,1.1,1.2,1.3,1.4],"defaultVersion":1.4,"description":"Runs the workflow when an n8n generated webchat is submitted","defaults":{"name":"When chat message received"},"codex":{"categories":["Core Nodes"],"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-langchain.chattrigger/"}]}},"maxNodes":1,"inputs":"={{ (() => {\n\t\t\tif (!['hostedChat', 'webhook'].includes($parameter.mode)) {\n\t\t\t\treturn [];\n\t\t\t}\n\t\t\tif ($parameter.options?.loadPreviousSession !== 'memory') {\n\t\t\t\treturn [];\n\t\t\t}\n\n\t\t\treturn [\n\t\t\t\t{\n\t\t\t\t\tdisplayName: 'Memory',\n\t\t\t\t\tmaxConnections: 1,\n\t\t\t\t\ttype: 'ai_memory',\n\t\t\t\t\trequired: true,\n\t\t\t\t}\n\t\t\t];\n\t\t })() }}","outputs":["main"],"credentials":[{"name":"httpBasicAuth","required":true,"displayOptions":{"show":{"authentication":["basicAuth"]}}}],"webhooks":[{"name":"setup","httpMethod":"GET","responseMode":"onReceived","path":"chat","ndvHideUrl":true},{"name":"default","httpMethod":"POST","responseMode":"={{$parameter.options?.[\"responseMode\"] || \"lastNode\" }}","path":"chat","ndvHideMethod":true,"ndvHideUrl":"={{ !$parameter.public }}"}],"eventTriggerDescription":"Waiting for you to submit the chat","activationMessage":"You can now make calls to your production chat URL.","triggerPanel":false,"properties":[{"displayName":"Make Chat Publicly Available","name":"public","type":"boolean","default":false,"description":"Whether the chat should be publicly available or only accessible through the manual chat interface"},{"displayName":"Mode","name":"mode","type":"options","options":[{"name":"Hosted Chat","value":"hostedChat","description":"Chat on a page served by n8n"},{"name":"Embedded Chat","value":"webhook","description":"Chat through a widget embedded in another page, or by calling a webhook"}],"default":"hostedChat","displayOptions":{"show":{"public":[true]}}},{"displayName":"Chat will be live at the URL above once you activate this workflow. Live executions will show up in the ‘executions’ tab","name":"hostedChatNotice","type":"notice","displayOptions":{"show":{"mode":["hostedChat"],"public":[true]}},"default":""},{"displayName":"Follow the instructions <a href=\"https://www.npmjs.com/package/@n8n/chat\" target=\"_blank\">here</a> to embed chat in a webpage (or just call the webhook URL at the top of this section). Chat will be live once you activate this workflow","name":"embeddedChatNotice","type":"notice","displayOptions":{"show":{"mode":["webhook"],"public":[true]}},"default":""},{"displayName":"Authentication","name":"authentication","type":"options","displayOptions":{"show":{"public":[true]}},"options":[{"name":"Basic Auth","value":"basicAuth","description":"Simple username and password (the same one for all users)"},{"name":"n8n User Auth","value":"n8nUserAuth","description":"Require user to be logged in with their n8n account"},{"name":"None","value":"none"}],"default":"none","description":"The way to authenticate"},{"displayName":"Initial Message(s)","name":"initialMessages","type":"string","displayOptions":{"show":{"mode":["hostedChat"],"public":[true]}},"typeOptions":{"rows":3},"default":"Hi there! 👋\nMy name is Nathan. How can I assist you today?","description":"Default messages shown at the start of the chat, one per line"},{"displayName":"Make Available in n8n Chat","name":"availableInChat","type":"boolean","default":false,"noDataExpression":true,"description":"Whether to make the agent available in n8n Chat","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.4}}]}}},{"displayName":"Agent Name","name":"agentName","type":"string","default":"","noDataExpression":true,"description":"The name of the agent on n8n Chat","displayOptions":{"show":{"availableInChat":[true]}}},{"displayName":"Agent Description","name":"agentDescription","type":"string","typeOptions":{"rows":2},"default":"","noDataExpression":true,"description":"The description of the agent on n8n Chat","displayOptions":{"show":{"availableInChat":[true]}}},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"public":[false],"@version":[1,1.1]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat"},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>."}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"mode":["hostedChat","webhook"],"public":[true],"@version":[1,1.1]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allowed Origins (CORS)","name":"allowedOrigins","type":"string","default":"*","description":"Comma-separated list of URLs allowed for cross-origin non-preflight requests. Use * (default) to allow all origins.","displayOptions":{"show":{"/mode":["hostedChat","webhook"]}}},{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>.","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Input Placeholder","name":"inputPlaceholder","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Type your question..","placeholder":"e.g. Type your message here","description":"Shown as placeholder text in the chat input field"},{"displayName":"Load Previous Session","name":"loadPreviousSession","type":"options","options":[{"name":"Off","value":"notSupported","description":"Loading messages of previous session is turned off"},{"name":"From Memory","value":"memory","description":"Load session messages from memory"},{"name":"Manually","value":"manually","description":"Manually return messages of session"}],"default":"notSupported","description":"If loading messages of a previous session should be enabled"},{"displayName":"Require Button Click to Start Chat","name":"showWelcomeScreen","type":"boolean","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":false,"description":"Whether to show the welcome screen at the start of the chat"},{"displayName":"Start Conversation Button Text","name":"getStarted","type":"string","displayOptions":{"show":{"showWelcomeScreen":[true],"/mode":["hostedChat"]}},"default":"New Conversation","placeholder":"e.g. New Conversation","description":"Shown as part of the welcome screen, in the middle of the chat window"},{"displayName":"Subtitle","name":"subtitle","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Start a chat. We're here to help you 24/7.","placeholder":"e.g. We're here for you","description":"Shown at the top of the chat, under the title"},{"displayName":"Title","name":"title","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Hi there! 👋","placeholder":"e.g. Welcome","description":"Shown at the top of the chat"},{"displayName":"Custom Chat Styling","name":"customCss","type":"string","typeOptions":{"rows":10,"editor":"cssEditor"},"displayOptions":{"show":{"/mode":["hostedChat"]}},"default":":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: (\n -apple-system,\n BlinkMacSystemFont,\n 'Segoe UI',\n Roboto,\n Oxygen-Sans,\n Ubuntu,\n Cantarell,\n 'Helvetica Neue',\n sans-serif\n );\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}","description":"Override default styling of the public chat interface with CSS"},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Using 'Respond to Webhook' Node","value":"responseNode","description":"Response defined in that node"}],"default":"lastNode","description":"When and how to respond to the webhook"}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"mode":["hostedChat","webhook"],"public":[true],"@version":[1.2]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allowed Origins (CORS)","name":"allowedOrigins","type":"string","default":"*","description":"Comma-separated list of URLs allowed for cross-origin non-preflight requests. Use * (default) to allow all origins.","displayOptions":{"show":{"/mode":["hostedChat","webhook"]}}},{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>.","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Input Placeholder","name":"inputPlaceholder","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Type your question..","placeholder":"e.g. Type your message here","description":"Shown as placeholder text in the chat input field"},{"displayName":"Load Previous Session","name":"loadPreviousSession","type":"options","options":[{"name":"Off","value":"notSupported","description":"Loading messages of previous session is turned off"},{"name":"From Memory","value":"memory","description":"Load session messages from memory"},{"name":"Manually","value":"manually","description":"Manually return messages of session"}],"default":"notSupported","description":"If loading messages of a previous session should be enabled"},{"displayName":"Require Button Click to Start Chat","name":"showWelcomeScreen","type":"boolean","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":false,"description":"Whether to show the welcome screen at the start of the chat"},{"displayName":"Start Conversation Button Text","name":"getStarted","type":"string","displayOptions":{"show":{"showWelcomeScreen":[true],"/mode":["hostedChat"]}},"default":"New Conversation","placeholder":"e.g. New Conversation","description":"Shown as part of the welcome screen, in the middle of the chat window"},{"displayName":"Subtitle","name":"subtitle","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Start a chat. We're here to help you 24/7.","placeholder":"e.g. We're here for you","description":"Shown at the top of the chat, under the title"},{"displayName":"Title","name":"title","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Hi there! 👋","placeholder":"e.g. Welcome","description":"Shown at the top of the chat"},{"displayName":"Custom Chat Styling","name":"customCss","type":"string","typeOptions":{"rows":10,"editor":"cssEditor"},"displayOptions":{"show":{"/mode":["hostedChat"]}},"default":":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: (\n -apple-system,\n BlinkMacSystemFont,\n 'Segoe UI',\n Roboto,\n Oxygen-Sans,\n Ubuntu,\n Cantarell,\n 'Helvetica Neue',\n sans-serif\n );\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}","description":"Override default styling of the public chat interface with CSS"},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Using 'Respond to Webhook' Node","value":"responseNode","description":"Response defined in that node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"}],"default":"lastNode","description":"When and how to respond to the webhook"}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"public":[false],"@version":[{"_cnd":{"gte":1.3}}]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat"},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>."},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Using Response Nodes","value":"responseNodes","description":"Send responses to the chat by using 'Respond to Chat' node"}],"default":"lastNode","description":"When and how to respond to the chat"}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"mode":["hostedChat","webhook"],"public":[true],"@version":[{"_cnd":{"gte":1.3}}]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allowed Origins (CORS)","name":"allowedOrigins","type":"string","default":"*","description":"Comma-separated list of URLs allowed for cross-origin non-preflight requests. Use * (default) to allow all origins.","displayOptions":{"show":{"/mode":["hostedChat","webhook"]}}},{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>.","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Input Placeholder","name":"inputPlaceholder","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Type your question..","placeholder":"e.g. Type your message here","description":"Shown as placeholder text in the chat input field"},{"displayName":"Load Previous Session","name":"loadPreviousSession","type":"options","options":[{"name":"Off","value":"notSupported","description":"Loading messages of previous session is turned off"},{"name":"From Memory","value":"memory","description":"Load session messages from memory"},{"name":"Manually","value":"manually","description":"Manually return messages of session"}],"default":"notSupported","description":"If loading messages of a previous session should be enabled"},{"displayName":"Require Button Click to Start Chat","name":"showWelcomeScreen","type":"boolean","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":false,"description":"Whether to show the welcome screen at the start of the chat"},{"displayName":"Start Conversation Button Text","name":"getStarted","type":"string","displayOptions":{"show":{"showWelcomeScreen":[true],"/mode":["hostedChat"]}},"default":"New Conversation","placeholder":"e.g. New Conversation","description":"Shown as part of the welcome screen, in the middle of the chat window"},{"displayName":"Subtitle","name":"subtitle","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Start a chat. We're here to help you 24/7.","placeholder":"e.g. We're here for you","description":"Shown at the top of the chat, under the title"},{"displayName":"Title","name":"title","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Hi there! 👋","placeholder":"e.g. Welcome","description":"Shown at the top of the chat"},{"displayName":"Custom Chat Styling","name":"customCss","type":"string","typeOptions":{"rows":10,"editor":"cssEditor"},"displayOptions":{"show":{"/mode":["hostedChat"]}},"default":":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: (\n -apple-system,\n BlinkMacSystemFont,\n 'Segoe UI',\n Roboto,\n Oxygen-Sans,\n Ubuntu,\n Cantarell,\n 'Helvetica Neue',\n sans-serif\n );\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}","description":"Override default styling of the public chat interface with CSS"},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"},{"name":"Using 'Respond to Webhook' Node","value":"responseNode","description":"Response defined in that node"}],"default":"lastNode","description":"When and how to respond to the chat","displayOptions":{"show":{"/mode":["webhook"]}}},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"},{"name":"Using Response Nodes","value":"responseNodes","description":"Send responses to the chat by using 'Respond to Chat' node"}],"default":"lastNode","description":"When and how to respond to the webhook","displayOptions":{"show":{"/mode":["hostedChat"]}}}]}]},
90
+ {"displayName":"Chat Trigger","name":"chatTrigger","icon":"fa:comments","iconColor":"black","group":["trigger"],"version":[1,1.1,1.2,1.3,1.4],"defaultVersion":1.4,"description":"Runs the workflow when an n8n generated webchat is submitted","defaults":{"name":"When chat message received"},"codex":{"categories":["Core Nodes"],"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-langchain.chattrigger/"}]}},"maxNodes":1,"inputs":"={{ (() => {\n\t\t\tif (!['hostedChat', 'webhook'].includes($parameter.mode)) {\n\t\t\t\treturn [];\n\t\t\t}\n\t\t\tif ($parameter.options?.loadPreviousSession !== 'memory') {\n\t\t\t\treturn [];\n\t\t\t}\n\n\t\t\treturn [\n\t\t\t\t{\n\t\t\t\t\tdisplayName: 'Memory',\n\t\t\t\t\tmaxConnections: 1,\n\t\t\t\t\ttype: 'ai_memory',\n\t\t\t\t\trequired: true,\n\t\t\t\t}\n\t\t\t];\n\t\t })() }}","outputs":["main"],"credentials":[{"name":"httpBasicAuth","required":true,"displayOptions":{"show":{"authentication":["basicAuth"]}}}],"webhooks":[{"name":"setup","httpMethod":"GET","responseMode":"onReceived","path":"chat","ndvHideUrl":true},{"name":"default","httpMethod":"POST","responseMode":"={{$parameter.options?.[\"responseMode\"] ?? ($parameter.availableInChat ? \"streaming\" : \"lastNode\") }}","path":"chat","ndvHideMethod":true,"ndvHideUrl":"={{ !$parameter.public }}"}],"eventTriggerDescription":"Waiting for you to submit the chat","activationMessage":"You can now make calls to your production chat URL.","triggerPanel":false,"properties":[{"displayName":"Make Chat Publicly Available","name":"public","type":"boolean","default":false,"description":"Whether the chat should be publicly available or only accessible through the manual chat interface"},{"displayName":"Mode","name":"mode","type":"options","options":[{"name":"Hosted Chat","value":"hostedChat","description":"Chat on a page served by n8n"},{"name":"Embedded Chat","value":"webhook","description":"Chat through a widget embedded in another page, or by calling a webhook"}],"default":"hostedChat","displayOptions":{"show":{"public":[true]}}},{"displayName":"Chat will be live at the URL above once this workflow is published. Live executions will show up in the ‘executions’ tab","name":"hostedChatNotice","type":"notice","displayOptions":{"show":{"mode":["hostedChat"],"public":[true]}},"default":""},{"displayName":"Follow the instructions <a href=\"https://www.npmjs.com/package/@n8n/chat\" target=\"_blank\">here</a> to embed chat in a webpage (or just call the webhook URL at the top of this section). Chat will be live once you publish this workflow","name":"embeddedChatNotice","type":"notice","displayOptions":{"show":{"mode":["webhook"],"public":[true]}},"default":""},{"displayName":"Authentication","name":"authentication","type":"options","displayOptions":{"show":{"public":[true]}},"options":[{"name":"Basic Auth","value":"basicAuth","description":"Simple username and password (the same one for all users)"},{"name":"n8n User Auth","value":"n8nUserAuth","description":"Require user to be logged in with their n8n account"},{"name":"None","value":"none"}],"default":"none","description":"The way to authenticate"},{"displayName":"Initial Message(s)","name":"initialMessages","type":"string","displayOptions":{"show":{"mode":["hostedChat"],"public":[true]}},"typeOptions":{"rows":3},"default":"Hi there! 👋\nMy name is Nathan. How can I assist you today?","description":"Default messages shown at the start of the chat, one per line"},{"displayName":"Make Available in n8n Chat","name":"availableInChat","type":"boolean","default":false,"noDataExpression":true,"description":"Whether to make the agent available in n8n Chat"},{"displayName":"Your Chat Trigger node is out of date. To update, delete this node and insert a new Chat Trigger node.","name":"availableInChatNotice","type":"notice","displayOptions":{"show":{"availableInChat":[true],"@version":[{"_cnd":{"lt":1.2}}]}},"default":""},{"displayName":"Your n8n users will be able to use this agent in <a href=\"/home/chat/\" target=\"_blank\">Chat</a> once this workflow is published. Make sure to share this workflow with at least Project Chat User access to all users who should use it. Currently, only streaming response mode is supported.","name":"availableInChatNotice","type":"notice","displayOptions":{"show":{"availableInChat":[true],"@version":[{"_cnd":{"gte":1.2}}]}},"default":""},{"displayName":"Agent Name","name":"agentName","type":"string","default":"","noDataExpression":true,"description":"The name of the agent on n8n Chat. Name of the workflow is used if left empty.","displayOptions":{"show":{"availableInChat":[true],"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Agent Description","name":"agentDescription","type":"string","typeOptions":{"rows":2},"default":"","noDataExpression":true,"description":"The description of the agent on n8n Chat","displayOptions":{"show":{"availableInChat":[true],"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"public":[false],"@version":[1,1.1]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat"},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>."}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"mode":["hostedChat","webhook"],"public":[true],"@version":[1,1.1]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allowed Origins (CORS)","name":"allowedOrigins","type":"string","default":"*","description":"Comma-separated list of URLs allowed for cross-origin non-preflight requests. Use * (default) to allow all origins.","displayOptions":{"show":{"/mode":["hostedChat","webhook"]}}},{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>.","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Input Placeholder","name":"inputPlaceholder","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Type your question..","placeholder":"e.g. Type your message here","description":"Shown as placeholder text in the chat input field"},{"displayName":"Load Previous Session","name":"loadPreviousSession","type":"options","options":[{"name":"Off","value":"notSupported","description":"Loading messages of previous session is turned off"},{"name":"From Memory","value":"memory","description":"Load session messages from memory"},{"name":"Manually","value":"manually","description":"Manually return messages of session"}],"default":"notSupported","description":"If loading messages of a previous session should be enabled"},{"displayName":"Require Button Click to Start Chat","name":"showWelcomeScreen","type":"boolean","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":false,"description":"Whether to show the welcome screen at the start of the chat"},{"displayName":"Start Conversation Button Text","name":"getStarted","type":"string","displayOptions":{"show":{"showWelcomeScreen":[true],"/mode":["hostedChat"]}},"default":"New Conversation","placeholder":"e.g. New Conversation","description":"Shown as part of the welcome screen, in the middle of the chat window"},{"displayName":"Subtitle","name":"subtitle","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Start a chat. We're here to help you 24/7.","placeholder":"e.g. We're here for you","description":"Shown at the top of the chat, under the title"},{"displayName":"Title","name":"title","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Hi there! 👋","placeholder":"e.g. Welcome","description":"Shown at the top of the chat"},{"displayName":"Custom Chat Styling","name":"customCss","type":"string","typeOptions":{"rows":10,"editor":"cssEditor"},"displayOptions":{"show":{"/mode":["hostedChat"]}},"default":":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: (\n -apple-system,\n BlinkMacSystemFont,\n 'Segoe UI',\n Roboto,\n Oxygen-Sans,\n Ubuntu,\n Cantarell,\n 'Helvetica Neue',\n sans-serif\n );\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}","description":"Override default styling of the public chat interface with CSS"},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Using 'Respond to Webhook' Node","value":"responseNode","description":"Response defined in that node"}],"default":"lastNode","description":"When and how to respond to the webhook"}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"mode":["hostedChat","webhook"],"public":[true],"@version":[1.2]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allowed Origins (CORS)","name":"allowedOrigins","type":"string","default":"*","description":"Comma-separated list of URLs allowed for cross-origin non-preflight requests. Use * (default) to allow all origins.","displayOptions":{"show":{"/mode":["hostedChat","webhook"]}}},{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>.","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Input Placeholder","name":"inputPlaceholder","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Type your question..","placeholder":"e.g. Type your message here","description":"Shown as placeholder text in the chat input field"},{"displayName":"Load Previous Session","name":"loadPreviousSession","type":"options","options":[{"name":"Off","value":"notSupported","description":"Loading messages of previous session is turned off"},{"name":"From Memory","value":"memory","description":"Load session messages from memory"},{"name":"Manually","value":"manually","description":"Manually return messages of session"}],"default":"notSupported","description":"If loading messages of a previous session should be enabled"},{"displayName":"Require Button Click to Start Chat","name":"showWelcomeScreen","type":"boolean","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":false,"description":"Whether to show the welcome screen at the start of the chat"},{"displayName":"Start Conversation Button Text","name":"getStarted","type":"string","displayOptions":{"show":{"showWelcomeScreen":[true],"/mode":["hostedChat"]}},"default":"New Conversation","placeholder":"e.g. New Conversation","description":"Shown as part of the welcome screen, in the middle of the chat window"},{"displayName":"Subtitle","name":"subtitle","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Start a chat. We're here to help you 24/7.","placeholder":"e.g. We're here for you","description":"Shown at the top of the chat, under the title"},{"displayName":"Title","name":"title","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Hi there! 👋","placeholder":"e.g. Welcome","description":"Shown at the top of the chat"},{"displayName":"Custom Chat Styling","name":"customCss","type":"string","typeOptions":{"rows":10,"editor":"cssEditor"},"displayOptions":{"show":{"/mode":["hostedChat"]}},"default":":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: (\n -apple-system,\n BlinkMacSystemFont,\n 'Segoe UI',\n Roboto,\n Oxygen-Sans,\n Ubuntu,\n Cantarell,\n 'Helvetica Neue',\n sans-serif\n );\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}","description":"Override default styling of the public chat interface with CSS"},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Using 'Respond to Webhook' Node","value":"responseNode","description":"Response defined in that node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"}],"default":"lastNode","description":"When and how to respond to the webhook","displayOptions":{"show":{"/availableInChat":[false]}}},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"}],"default":"streaming","description":"When and how to respond to the webhook","displayOptions":{"show":{"/availableInChat":[true]}}}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"public":[false],"@version":[{"_cnd":{"gte":1.3}}]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat"},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>."},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Using Response Nodes","value":"responseNodes","description":"Send responses to the chat by using 'Respond to Chat' node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"}],"default":"lastNode","description":"When and how to respond to the chat","displayOptions":{"show":{"/availableInChat":[false]}}},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"}],"default":"streaming","description":"When and how to respond to the webhook","displayOptions":{"show":{"/availableInChat":[true]}}}]},{"displayName":"Options","name":"options","type":"collection","displayOptions":{"show":{"mode":["hostedChat","webhook"],"public":[true],"@version":[{"_cnd":{"gte":1.3}}]}},"placeholder":"Add Field","default":{},"options":[{"displayName":"Allowed Origins (CORS)","name":"allowedOrigins","type":"string","default":"*","description":"Comma-separated list of URLs allowed for cross-origin non-preflight requests. Use * (default) to allow all origins.","displayOptions":{"show":{"/mode":["hostedChat","webhook"]}}},{"displayName":"Allow File Uploads","name":"allowFileUploads","type":"boolean","default":false,"description":"Whether to allow file uploads in the chat","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Allowed File Mime Types","name":"allowedFilesMimeTypes","type":"string","default":"*","placeholder":"e.g. image/*, text/*, application/pdf","description":"Allowed file types for upload. Comma-separated list of <a href=\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\" target=\"_blank\">MIME types</a>.","displayOptions":{"show":{"/mode":["hostedChat"]}}},{"displayName":"Input Placeholder","name":"inputPlaceholder","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Type your question..","placeholder":"e.g. Type your message here","description":"Shown as placeholder text in the chat input field"},{"displayName":"Load Previous Session","name":"loadPreviousSession","type":"options","options":[{"name":"Off","value":"notSupported","description":"Loading messages of previous session is turned off"},{"name":"From Memory","value":"memory","description":"Load session messages from memory"},{"name":"Manually","value":"manually","description":"Manually return messages of session"}],"default":"notSupported","description":"If loading messages of a previous session should be enabled"},{"displayName":"Require Button Click to Start Chat","name":"showWelcomeScreen","type":"boolean","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":false,"description":"Whether to show the welcome screen at the start of the chat"},{"displayName":"Start Conversation Button Text","name":"getStarted","type":"string","displayOptions":{"show":{"showWelcomeScreen":[true],"/mode":["hostedChat"]}},"default":"New Conversation","placeholder":"e.g. New Conversation","description":"Shown as part of the welcome screen, in the middle of the chat window"},{"displayName":"Subtitle","name":"subtitle","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Start a chat. We're here to help you 24/7.","placeholder":"e.g. We're here for you","description":"Shown at the top of the chat, under the title"},{"displayName":"Title","name":"title","type":"string","displayOptions":{"show":{"/mode":["hostedChat"]}},"default":"Hi there! 👋","placeholder":"e.g. Welcome","description":"Shown at the top of the chat"},{"displayName":"Custom Chat Styling","name":"customCss","type":"string","typeOptions":{"rows":10,"editor":"cssEditor"},"displayOptions":{"show":{"/mode":["hostedChat"]}},"default":":root {\n /* Colors */\n --chat--color-primary: #e74266;\n --chat--color-primary-shade-50: #db4061;\n --chat--color-primary-shade-100: #cf3c5c;\n --chat--color-secondary: #20b69e;\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-light-shade-50: #e6e9f1;\n --chat--color-light-shade-100: #c2c5cc;\n --chat--color-medium: #d2d4d9;\n --chat--color-dark: #101330;\n --chat--color-disabled: #d2d4d9;\n --chat--color-typing: #404040;\n\n /* Base Layout */\n --chat--spacing: 1rem;\n --chat--border-radius: 0.25rem;\n --chat--transition-duration: 0.15s;\n --chat--font-family: (\n -apple-system,\n BlinkMacSystemFont,\n 'Segoe UI',\n Roboto,\n Oxygen-Sans,\n Ubuntu,\n Cantarell,\n 'Helvetica Neue',\n sans-serif\n );\n\n /* Window Dimensions */\n --chat--window--width: 400px;\n --chat--window--height: 600px;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--z-index: 9999;\n --chat--window--border: 1px solid var(--chat--color-light-shade-50);\n --chat--window--border-radius: var(--chat--border-radius);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* Header Styles */\n --chat--header-height: auto;\n --chat--header--padding: var(--chat--spacing);\n --chat--header--background: var(--chat--color-dark);\n --chat--header--color: var(--chat--color-light);\n --chat--header--border-top: none;\n --chat--header--border-bottom: none;\n --chat--header--border-left: none;\n --chat--header--border-right: none;\n --chat--heading--font-size: 2em;\n --chat--subtitle--font-size: inherit;\n --chat--subtitle--line-height: 1.8;\n\n /* Message Styles */\n --chat--message--font-size: 1rem;\n --chat--message--padding: var(--chat--spacing);\n --chat--message--border-radius: var(--chat--border-radius);\n --chat--message-line-height: 1.5;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 1);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: var(--chat--color-dark);\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-secondary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n --chat--message--pre--background: rgba(0, 0, 0, 0.05);\n --chat--messages-list--padding: var(--chat--spacing);\n\n /* Toggle Button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n\n /* Input Area */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 30rem;\n --chat--input--font-size: inherit;\n --chat--input--border: 0;\n --chat--input--border-radius: 0;\n --chat--input--padding: 0.8rem;\n --chat--input--background: var(--chat--color-white);\n --chat--input--text-color: initial;\n --chat--input--line-height: 1.5;\n --chat--input--placeholder--font-size: var(--chat--input--font-size);\n --chat--input--border-active: 0;\n --chat--input--left--panel--width: 2rem;\n\n /* Button Styles */\n --chat--button--color: var(--chat--color-light);\n --chat--button--background: var(--chat--color-primary);\n --chat--button--padding: calc(var(--chat--spacing) * 1 / 2) var(--chat--spacing);\n --chat--button--border-radius: var(--chat--border-radius);\n --chat--button--hover--color: var(--chat--color-light);\n --chat--button--hover--background: var(--chat--color-primary-shade-50);\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* Send and File Buttons */\n --chat--input--send--button--background: var(--chat--color-white);\n --chat--input--send--button--color: var(--chat--color-secondary);\n --chat--input--send--button--background-hover: var(--chat--color-primary-shade-50);\n --chat--input--send--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--input--file--button--background: var(--chat--color-white);\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: var(--chat--input--file--button--background);\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n --chat--files-spacing: 0.25rem;\n\n /* Body and Footer */\n --chat--body--background: var(--chat--color-light);\n --chat--footer--background: var(--chat--color-light);\n --chat--footer--color: var(--chat--color-dark);\n}\n\n\n/* You can override any class styles, too. Right-click inspect in Chat UI to find class to override. */\n.chat-message {\n\tmax-width: 50%;\n}","description":"Override default styling of the public chat interface with CSS"},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"}],"default":"streaming","description":"When and how to respond to the webhook","displayOptions":{"show":{"/availableInChat":[true]}}},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"},{"name":"Using 'Respond to Webhook' Node","value":"responseNode","description":"Response defined in that node"}],"default":"lastNode","description":"When and how to respond to the chat","displayOptions":{"show":{"/mode":["webhook"],"/availableInChat":[false]}}},{"displayName":"Response Mode","name":"responseMode","type":"options","options":[{"name":"When Last Node Finishes","value":"lastNode","description":"Returns data of the last-executed node"},{"name":"Streaming","value":"streaming","description":"Streaming response from specified nodes (e.g. Agents)"},{"name":"Using Response Nodes","value":"responseNodes","description":"Send responses to the chat by using 'Respond to Chat' node"}],"default":"lastNode","description":"When and how to respond to the webhook","displayOptions":{"show":{"/mode":["hostedChat"],"/availableInChat":[false]}}}]}]},
90
91
  {"usableAsTool":true,"displayName":"Respond to Chat","name":"chat","icon":"fa:comments","iconColor":"black","group":["input"],"version":1,"description":"Send a message to a chat","defaults":{"name":"Respond to Chat"},"codex":{"categories":["Core Nodes","HITL"],"subcategories":{"HITL":["Human in the Loop"]},"alias":["human","wait","hitl"],"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-langchain.respondtochat/"}]}},"inputs":"={{ ((parameters) => {\n const inputs = [\n {\n type: \"main\",\n displayName: \"User Response\"\n }\n ];\n if (parameters.options?.memoryConnection) {\n return [\n ...inputs,\n {\n type: \"ai_memory\",\n displayName: \"Memory\",\n maxConnections: 1\n }\n ];\n }\n return inputs;\n})($parameter) }}","outputs":["main"],"properties":[{"displayName":"Verify you're using a chat trigger with the 'Response Mode' option set to 'Using Response Nodes'","name":"generalNotice","type":"notice","default":""},{"displayName":"Message","name":"message","type":"string","default":"","required":true,"typeOptions":{"rows":6}},{"displayName":"Wait for User Reply","name":"waitUserReply","type":"boolean","default":true,"noDataExpression":true},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"displayOptions":{"hide":{"@tool":[true]}},"options":[{"displayName":"Add Memory Input Connection","name":"memoryConnection","type":"boolean","default":false},{"displayName":"Limit Wait Time","name":"limitWaitTime","type":"fixedCollection","description":"Whether to limit the time this node should wait for a user response before execution resumes","default":{"values":{"limitType":"afterTimeInterval","resumeAmount":45,"resumeUnit":"minutes"}},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Limit Type","name":"limitType","type":"options","default":"afterTimeInterval","description":"Sets the condition for the execution to resume. Can be a specified date or after some time.","options":[{"name":"After Time Interval","description":"Waits for a certain amount of time","value":"afterTimeInterval"},{"name":"At Specified Time","description":"Waits until the set date and time to continue","value":"atSpecifiedTime"}]},{"displayName":"Amount","name":"resumeAmount","type":"number","displayOptions":{"show":{"limitType":["afterTimeInterval"]}},"typeOptions":{"minValue":0,"numberPrecision":2},"default":1,"description":"The time to wait"},{"displayName":"Unit","name":"resumeUnit","type":"options","displayOptions":{"show":{"limitType":["afterTimeInterval"]}},"options":[{"name":"Minutes","value":"minutes"},{"name":"Hours","value":"hours"},{"name":"Days","value":"days"}],"default":"hours","description":"Unit of the interval value"},{"displayName":"Max Date and Time","name":"maxDateAndTime","type":"dateTime","displayOptions":{"show":{"limitType":["atSpecifiedTime"]}},"default":"","description":"Continue execution after the specified date and time"}]}],"displayOptions":{"show":{"/waitUserReply":[true]}}}]},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Limit Wait Time","name":"limitWaitTime","type":"fixedCollection","description":"Whether to limit the time this node should wait for a user response before execution resumes","default":{"values":{"limitType":"afterTimeInterval","resumeAmount":45,"resumeUnit":"minutes"}},"options":[{"displayName":"Values","name":"values","values":[{"displayName":"Limit Type","name":"limitType","type":"options","default":"afterTimeInterval","description":"Sets the condition for the execution to resume. Can be a specified date or after some time.","options":[{"name":"After Time Interval","description":"Waits for a certain amount of time","value":"afterTimeInterval"},{"name":"At Specified Time","description":"Waits until the set date and time to continue","value":"atSpecifiedTime"}]},{"displayName":"Amount","name":"resumeAmount","type":"number","displayOptions":{"show":{"limitType":["afterTimeInterval"]}},"typeOptions":{"minValue":0,"numberPrecision":2},"default":1,"description":"The time to wait"},{"displayName":"Unit","name":"resumeUnit","type":"options","displayOptions":{"show":{"limitType":["afterTimeInterval"]}},"options":[{"name":"Minutes","value":"minutes"},{"name":"Hours","value":"hours"},{"name":"Days","value":"days"}],"default":"hours","description":"Unit of the interval value"},{"displayName":"Max Date and Time","name":"maxDateAndTime","type":"dateTime","displayOptions":{"show":{"limitType":["atSpecifiedTime"]}},"default":"","description":"Continue execution after the specified date and time"}]}],"displayOptions":{"show":{"/waitUserReply":[true]}}}],"displayOptions":{"show":{"@tool":[true],"/waitUserReply":[true]}}}]},
91
92
  {"displayName":"Azure AI Search Vector Store","name":"vectorStoreAzureAISearch","description":"Work with your data in Azure AI Search Vector Store","group":["transform"],"version":[1,1.1,1.2,1.3],"defaults":{"name":"Azure AI Search Vector Store"},"codex":{"categories":["AI"],"subcategories":{"AI":["Vector Stores","Tools","Root Nodes"],"Vector Stores":["Other Vector Stores"],"Tools":["Other Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.vectorstoreazureaisearch/"}]}},"credentials":[{"name":"azureAiSearchApi","required":true}],"inputs":"={{\n\t\t\t((parameters) => {\n\t\t\t\tconst mode = parameters?.mode;\n\t\t\t\tconst useReranker = parameters?.useReranker;\n\t\t\t\tconst inputs = [{ displayName: \"Embedding\", type: \"ai_embedding\", required: true, maxConnections: 1}]\n\n\t\t\t\tif (['load', 'retrieve', 'retrieve-as-tool'].includes(mode) && useReranker) {\n\t\t\t\t\tinputs.push({ displayName: \"Reranker\", type: \"ai_reranker\", required: true, maxConnections: 1})\n\t\t\t\t}\n\n\t\t\t\tif (mode === 'retrieve-as-tool') {\n\t\t\t\t\treturn inputs;\n\t\t\t\t}\n\n\t\t\t\tif (['insert', 'load', 'update'].includes(mode)) {\n\t\t\t\t\tinputs.push({ displayName: \"\", type: \"main\"})\n\t\t\t\t}\n\n\t\t\t\tif (['insert'].includes(mode)) {\n\t\t\t\t\tinputs.push({ displayName: \"Document\", type: \"ai_document\", required: true, maxConnections: 1})\n\t\t\t\t}\n\t\t\t\treturn inputs\n\t\t\t})($parameter)\n\t\t}}","outputs":"={{\n\t\t\t((parameters) => {\n\t\t\t\tconst mode = parameters?.mode ?? 'retrieve';\n\n\t\t\t\tif (mode === 'retrieve-as-tool') {\n\t\t\t\t\treturn [{ displayName: \"Tool\", type: \"ai_tool\"}]\n\t\t\t\t}\n\n\t\t\t\tif (mode === 'retrieve') {\n\t\t\t\t\treturn [{ displayName: \"Vector Store\", type: \"ai_vectorStore\"}]\n\t\t\t\t}\n\t\t\t\treturn [{ displayName: \"\", type: \"main\"}]\n\t\t\t})($parameter)\n\t\t}}","properties":[{"displayName":"Tip: Get a feel for vector stores in n8n with our","name":"ragStarterCallout","type":"callout","typeOptions":{"calloutAction":{"label":"RAG starter template","type":"openSampleWorkflowTemplate","templateId":"rag-starter-template"}},"default":""},{"displayName":"Operation Mode","name":"mode","type":"options","noDataExpression":true,"default":"retrieve","options":[{"name":"Get Many","value":"load","description":"Get many ranked documents from vector store for query","action":"Get ranked documents from vector store"},{"name":"Insert Documents","value":"insert","description":"Insert documents into vector store","action":"Add documents to vector store"},{"name":"Retrieve Documents (As Vector Store for Chain/Tool)","value":"retrieve","description":"Retrieve documents from vector store to be used as vector store with AI nodes","action":"Retrieve documents for Chain/Tool as Vector Store","outputConnectionType":"ai_vectorStore"},{"name":"Retrieve Documents (As Tool for AI Agent)","value":"retrieve-as-tool","description":"Retrieve documents from vector store to be used as tool with AI nodes","action":"Retrieve documents for AI Agent as Tool","outputConnectionType":"ai_tool"},{"name":"Update Documents","value":"update","description":"Update documents in vector store by ID","action":"Update vector store documents"}]},{"displayName":"This node must be connected to a vector store retriever. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_retriever'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"},"displayOptions":{"show":{"mode":["retrieve"]}}},{"displayName":"Name","name":"toolName","type":"string","default":"","required":true,"description":"Name of the vector store","placeholder":"e.g. company_knowledge_base","validateType":"string-alphanumeric","displayOptions":{"show":{"@version":[{"_cnd":{"lte":1.2}}],"mode":["retrieve-as-tool"]}}},{"displayName":"Description","name":"toolDescription","type":"string","default":"","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often","placeholder":"e.g. Work with your data in Azure AI Search Vector Store","displayOptions":{"show":{"mode":["retrieve-as-tool"]}}},{"displayName":"Index Name","name":"indexName","type":"string","default":"n8n-vectorstore","description":"The name of the Azure AI Search index. Will be created automatically if it does not exist.","required":true},{"displayName":"Embedding Batch Size","name":"embeddingBatchSize","type":"number","default":200,"description":"Number of documents to embed in a single batch","displayOptions":{"show":{"mode":["insert"],"@version":[{"_cnd":{"gte":1.1}}]}}},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Clear Index","name":"clearIndex","type":"boolean","default":false,"description":"Whether to delete and recreate the index before inserting new data. Warning: This will reset any custom index configuration (semantic ranking, analyzers, etc.) to defaults."}],"displayOptions":{"show":{"mode":["insert"]}}},{"displayName":"Prompt","name":"prompt","type":"string","default":"","required":true,"description":"Search prompt to retrieve matching documents from the vector store using similarity-based ranking","displayOptions":{"show":{"mode":["load"]}}},{"displayName":"Limit","name":"topK","type":"number","default":4,"description":"Number of top results to fetch from vector store","displayOptions":{"show":{"mode":["load","retrieve-as-tool"]}}},{"displayName":"Include Metadata","name":"includeDocumentMetadata","type":"boolean","default":true,"description":"Whether or not to include document metadata","displayOptions":{"show":{"mode":["load","retrieve-as-tool"]}}},{"displayName":"Rerank Results","name":"useReranker","type":"boolean","default":false,"description":"Whether or not to rerank results","displayOptions":{"show":{"mode":["load","retrieve","retrieve-as-tool"]}}},{"displayName":"ID","name":"id","type":"string","default":"","required":true,"description":"ID of an embedding entry","displayOptions":{"show":{"mode":["update"]}}},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Query Type","name":"queryType","type":"options","default":"hybrid","description":"The type of search query to perform","options":[{"name":"Vector","value":"vector","description":"Vector similarity search only"},{"name":"Hybrid","value":"hybrid","description":"Combines vector and keyword search (recommended)"},{"name":"Semantic Hybrid","value":"semanticHybrid","description":"Hybrid search with semantic ranking (requires Basic tier or higher)"}]},{"displayName":"Filter","name":"filter","type":"string","default":"","description":"Filter results using OData syntax. Use metadata/fieldName for metadata fields. <a href=\"https://learn.microsoft.com/en-us/azure/search/search-query-odata-filter\" target=\"_blank\">Learn more</a>.","placeholder":"metadata/category eq 'technology' and metadata/author eq 'John'"},{"displayName":"Semantic Configuration","name":"semanticConfiguration","type":"string","default":"","description":"Name of the semantic configuration for semantic ranking (optional)","displayOptions":{"show":{"queryType":["semanticHybrid"]}}}],"displayOptions":{"show":{"mode":["load","retrieve-as-tool"]}}},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Query Type","name":"queryType","type":"options","default":"hybrid","description":"The type of search query to perform","options":[{"name":"Vector","value":"vector","description":"Vector similarity search only"},{"name":"Hybrid","value":"hybrid","description":"Combines vector and keyword search (recommended)"},{"name":"Semantic Hybrid","value":"semanticHybrid","description":"Hybrid search with semantic ranking (requires Basic tier or higher)"}]},{"displayName":"Filter","name":"filter","type":"string","default":"","description":"Filter results using OData syntax. Use metadata/fieldName for metadata fields. <a href=\"https://learn.microsoft.com/en-us/azure/search/search-query-odata-filter\" target=\"_blank\">Learn more</a>.","placeholder":"metadata/category eq 'technology' and metadata/author eq 'John'"},{"displayName":"Semantic Configuration","name":"semanticConfiguration","type":"string","default":"","description":"Name of the semantic configuration for semantic ranking (optional)","displayOptions":{"show":{"queryType":["semanticHybrid"]}}}],"displayOptions":{"show":{"mode":["retrieve"]}}}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vector_store/VectorStoreAzureAISearch/azure-aisearch.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/vector_store/VectorStoreAzureAISearch/azure-aisearch.svg"}},
92
93
  {"displayName":"Simple Vector Store","name":"vectorStoreInMemory","description":"The easiest way to experiment with vector stores, without external setup.","icon":"fa:database","iconColor":"black","group":["transform"],"version":[1,1.1,1.2,1.3],"defaults":{"name":"Simple Vector Store"},"codex":{"categories":["AI"],"subcategories":{"AI":["Vector Stores","Tools","Root Nodes"],"Vector Stores":["For Beginners"],"Tools":["Other Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.vectorstoreinmemory/"}]}},"inputs":"={{\n\t\t\t((parameters) => {\n\t\t\t\tconst mode = parameters?.mode;\n\t\t\t\tconst useReranker = parameters?.useReranker;\n\t\t\t\tconst inputs = [{ displayName: \"Embedding\", type: \"ai_embedding\", required: true, maxConnections: 1}]\n\n\t\t\t\tif (['load', 'retrieve', 'retrieve-as-tool'].includes(mode) && useReranker) {\n\t\t\t\t\tinputs.push({ displayName: \"Reranker\", type: \"ai_reranker\", required: true, maxConnections: 1})\n\t\t\t\t}\n\n\t\t\t\tif (mode === 'retrieve-as-tool') {\n\t\t\t\t\treturn inputs;\n\t\t\t\t}\n\n\t\t\t\tif (['insert', 'load', 'update'].includes(mode)) {\n\t\t\t\t\tinputs.push({ displayName: \"\", type: \"main\"})\n\t\t\t\t}\n\n\t\t\t\tif (['insert'].includes(mode)) {\n\t\t\t\t\tinputs.push({ displayName: \"Document\", type: \"ai_document\", required: true, maxConnections: 1})\n\t\t\t\t}\n\t\t\t\treturn inputs\n\t\t\t})($parameter)\n\t\t}}","outputs":"={{\n\t\t\t((parameters) => {\n\t\t\t\tconst mode = parameters?.mode ?? 'retrieve';\n\n\t\t\t\tif (mode === 'retrieve-as-tool') {\n\t\t\t\t\treturn [{ displayName: \"Tool\", type: \"ai_tool\"}]\n\t\t\t\t}\n\n\t\t\t\tif (mode === 'retrieve') {\n\t\t\t\t\treturn [{ displayName: \"Vector Store\", type: \"ai_vectorStore\"}]\n\t\t\t\t}\n\t\t\t\treturn [{ displayName: \"\", type: \"main\"}]\n\t\t\t})($parameter)\n\t\t}}","properties":[{"displayName":"Tip: Get a feel for vector stores in n8n with our","name":"ragStarterCallout","type":"callout","typeOptions":{"calloutAction":{"label":"RAG starter template","type":"openSampleWorkflowTemplate","templateId":"rag-starter-template"}},"default":""},{"displayName":"Operation Mode","name":"mode","type":"options","noDataExpression":true,"default":"retrieve","options":[{"name":"Get Many","value":"load","description":"Get many ranked documents from vector store for query","action":"Get ranked documents from vector store"},{"name":"Insert Documents","value":"insert","description":"Insert documents into vector store","action":"Add documents to vector store"},{"name":"Retrieve Documents (As Vector Store for Chain/Tool)","value":"retrieve","description":"Retrieve documents from vector store to be used as vector store with AI nodes","action":"Retrieve documents for Chain/Tool as Vector Store","outputConnectionType":"ai_vectorStore"},{"name":"Retrieve Documents (As Tool for AI Agent)","value":"retrieve-as-tool","description":"Retrieve documents from vector store to be used as tool with AI nodes","action":"Retrieve documents for AI Agent as Tool","outputConnectionType":"ai_tool"}]},{"displayName":"This node must be connected to a vector store retriever. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_retriever'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"},"displayOptions":{"show":{"mode":["retrieve"]}}},{"displayName":"Name","name":"toolName","type":"string","default":"","required":true,"description":"Name of the vector store","placeholder":"e.g. company_knowledge_base","validateType":"string-alphanumeric","displayOptions":{"show":{"@version":[{"_cnd":{"lte":1.2}}],"mode":["retrieve-as-tool"]}}},{"displayName":"Description","name":"toolDescription","type":"string","default":"","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often","placeholder":"e.g. The easiest way to experiment with vector stores, without external setup.","displayOptions":{"show":{"mode":["retrieve-as-tool"]}}},{"displayName":"Memory Key","name":"memoryKey","type":"string","default":"vector_store_key","description":"The key to use to store the vector memory in the workflow data. The key will be prefixed with the workflow ID to avoid collisions.","displayOptions":{"show":{"@version":[{"_cnd":{"lte":1.1}}]}}},{"displayName":"Memory Key","name":"memoryKey","type":"resourceLocator","required":true,"default":{"mode":"list","value":"vector_store_key"},"description":"The key to use to store the vector memory in the workflow data. These keys are shared between workflows.","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}},"modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"vectorStoresSearch","searchable":true,"allowNewResource":{"label":"resourceLocator.mode.list.addNewResource.vectorStoreInMemory","defaultName":"vector_store_key","method":"createVectorStore"}}},{"displayName":"Manual","name":"id","type":"string","placeholder":"vector_store_key"}]},{"displayName":"Embedding Batch Size","name":"embeddingBatchSize","type":"number","default":200,"description":"Number of documents to embed in a single batch","displayOptions":{"show":{"mode":["insert"],"@version":[{"_cnd":{"gte":1.1}}]}}},{"displayName":"Clear Store","name":"clearStore","type":"boolean","default":false,"description":"Whether to clear the store before inserting new data","displayOptions":{"show":{"mode":["insert"]}}},{"displayName":"<strong>For experimental use only</strong>: Data is stored in memory and will be lost if n8n restarts. Data may also be cleared if available memory gets low, and is accessible to all users of this instance. <a href=\"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.vectorstoreinmemory/\">More info</a>","name":"notice","type":"notice","default":"","displayOptions":{"show":{"mode":["insert"]}}},{"displayName":"Prompt","name":"prompt","type":"string","default":"","required":true,"description":"Search prompt to retrieve matching documents from the vector store using similarity-based ranking","displayOptions":{"show":{"mode":["load"]}}},{"displayName":"Limit","name":"topK","type":"number","default":4,"description":"Number of top results to fetch from vector store","displayOptions":{"show":{"mode":["load","retrieve-as-tool"]}}},{"displayName":"Include Metadata","name":"includeDocumentMetadata","type":"boolean","default":true,"description":"Whether or not to include document metadata","displayOptions":{"show":{"mode":["load","retrieve-as-tool"]}}},{"displayName":"Rerank Results","name":"useReranker","type":"boolean","default":false,"description":"Whether or not to rerank results","displayOptions":{"show":{"mode":["load","retrieve","retrieve-as-tool"]}}},{"displayName":"ID","name":"id","type":"string","default":"","required":true,"description":"ID of an embedding entry","displayOptions":{"show":{"mode":["update"]}}},{"displayName":"<strong>For experimental use only</strong>: Data is stored in memory and will be lost if n8n restarts. Data may also be cleared if available memory gets low, and is accessible to all users of this instance. <a href=\"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.vectorstoreinmemory/\">More info</a>","name":"notice","type":"notice","default":"","displayOptions":{"show":{"mode":["load","retrieve-as-tool"]}}},{"displayName":"<strong>For experimental use only</strong>: Data is stored in memory and will be lost if n8n restarts. Data may also be cleared if available memory gets low, and is accessible to all users of this instance. <a href=\"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.vectorstoreinmemory/\">More info</a>","name":"notice","type":"notice","default":"","displayOptions":{"show":{"mode":["retrieve"]}}}]},