@n8n/n8n-nodes-langchain 1.53.1 → 1.54.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/README.md +1 -3
  2. package/dist/build.tsbuildinfo +1 -1
  3. package/dist/nodes/agents/Agent/agents/OpenAiFunctionsAgent/execute.js +1 -1
  4. package/dist/nodes/agents/Agent/agents/OpenAiFunctionsAgent/execute.js.map +1 -1
  5. package/dist/nodes/agents/Agent/agents/ToolsAgent/execute.js +1 -1
  6. package/dist/nodes/agents/Agent/agents/ToolsAgent/execute.js.map +1 -1
  7. package/dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js +1 -1
  8. package/dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js.map +1 -1
  9. package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js +1 -1
  10. package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js.map +1 -1
  11. package/dist/nodes/llms/LMChatOllama/LmChatOllama.node.js +1 -1
  12. package/dist/nodes/llms/LMChatOllama/LmChatOllama.node.js.map +1 -1
  13. package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js +1 -1
  14. package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js.map +1 -1
  15. package/dist/nodes/llms/LMCohere/LmCohere.node.js +1 -1
  16. package/dist/nodes/llms/LMCohere/LmCohere.node.js.map +1 -1
  17. package/dist/nodes/llms/LMOllama/LmOllama.node.js +1 -1
  18. package/dist/nodes/llms/LMOllama/LmOllama.node.js.map +1 -1
  19. package/dist/nodes/llms/LMOpenAi/LmOpenAi.node.js +1 -1
  20. package/dist/nodes/llms/LMOpenAi/LmOpenAi.node.js.map +1 -1
  21. package/dist/nodes/llms/LMOpenHuggingFaceInference/LmOpenHuggingFaceInference.node.js +1 -1
  22. package/dist/nodes/llms/LMOpenHuggingFaceInference/LmOpenHuggingFaceInference.node.js.map +1 -1
  23. package/dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js +1 -1
  24. package/dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js.map +1 -1
  25. package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js +1 -1
  26. package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js.map +1 -1
  27. package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js +1 -1
  28. package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js.map +1 -1
  29. package/dist/nodes/llms/LmChatGooglePalm/LmChatGooglePalm.node.js +1 -1
  30. package/dist/nodes/llms/LmChatGooglePalm/LmChatGooglePalm.node.js.map +1 -1
  31. package/dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js +1 -1
  32. package/dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js.map +1 -1
  33. package/dist/nodes/llms/LmChatGroq/LmChatGroq.node.js +1 -1
  34. package/dist/nodes/llms/LmChatGroq/LmChatGroq.node.js.map +1 -1
  35. package/dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js +1 -1
  36. package/dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js.map +1 -1
  37. package/dist/nodes/llms/LmGooglePalm/LmGooglePalm.node.js +1 -1
  38. package/dist/nodes/llms/LmGooglePalm/LmGooglePalm.node.js.map +1 -1
  39. package/dist/nodes/memory/MemoryBufferWindow/MemoryBufferWindow.node.js +1 -7
  40. package/dist/nodes/memory/MemoryBufferWindow/MemoryBufferWindow.node.js.map +1 -1
  41. package/dist/nodes/memory/MemoryPostgresChat/MemoryPostgresChat.node.js +11 -2
  42. package/dist/nodes/memory/MemoryPostgresChat/MemoryPostgresChat.node.js.map +1 -1
  43. package/dist/nodes/memory/MemoryRedisChat/MemoryRedisChat.node.js +11 -2
  44. package/dist/nodes/memory/MemoryRedisChat/MemoryRedisChat.node.js.map +1 -1
  45. package/dist/nodes/memory/MemoryXata/MemoryXata.node.js +11 -2
  46. package/dist/nodes/memory/MemoryXata/MemoryXata.node.js.map +1 -1
  47. package/dist/nodes/memory/descriptions.d.ts +1 -0
  48. package/dist/nodes/memory/descriptions.js +8 -1
  49. package/dist/nodes/memory/descriptions.js.map +1 -1
  50. package/dist/nodes/tools/ToolCode/ToolCode.node.js +1 -1
  51. package/dist/nodes/tools/ToolCode/ToolCode.node.js.map +1 -1
  52. package/dist/nodes/tools/ToolHttpRequest/ToolHttpRequest.node.js +16 -3
  53. package/dist/nodes/tools/ToolHttpRequest/ToolHttpRequest.node.js.map +1 -1
  54. package/dist/nodes/tools/ToolHttpRequest/utils.d.ts +4 -2
  55. package/dist/nodes/tools/ToolHttpRequest/utils.js +48 -8
  56. package/dist/nodes/tools/ToolHttpRequest/utils.js.map +1 -1
  57. package/dist/nodes/vendors/OpenAi/actions/assistant/message.operation.js +1 -1
  58. package/dist/nodes/vendors/OpenAi/actions/assistant/message.operation.js.map +1 -1
  59. package/dist/nodes/vendors/OpenAi/actions/text/message.operation.js +1 -1
  60. package/dist/nodes/vendors/OpenAi/actions/text/message.operation.js.map +1 -1
  61. package/dist/types/nodes.json +20 -20
  62. package/dist/utils/N8nTool.d.ts +10 -0
  63. package/dist/utils/N8nTool.js +86 -0
  64. package/dist/utils/N8nTool.js.map +1 -0
  65. package/dist/utils/helpers.d.ts +2 -2
  66. package/dist/utils/helpers.js +11 -3
  67. package/dist/utils/helpers.js.map +1 -1
  68. package/dist/utils/logWrapper.d.ts +1 -1
  69. package/package.json +5 -5
@@ -23,28 +23,28 @@
23
23
  {"displayName":"Embeddings Mistral Cloud","name":"@n8n/n8n-nodes-langchain.embeddingsMistralCloud","credentials":[{"name":"mistralCloudApi","required":true}],"group":["transform"],"version":1,"description":"Use Embeddings Mistral Cloud","defaults":{"name":"Embeddings Mistral Cloud"},"codex":{"categories":["AI"],"subcategories":{"AI":["Embeddings"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsmistralcloud/"}]}},"inputs":[],"outputs":["ai_embedding"],"outputNames":["Embeddings"],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"https://api.mistral.ai/v1"},"properties":[{"displayName":"This node must be connected to a vector store. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_vectorStore'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will compute the embeddings. <a href=\"https://docs.mistral.ai/platform/endpoints/\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.id.includes('embed') }}"}},{"type":"setKeyValue","properties":{"name":"={{ $responseItem.id }}","value":"={{ $responseItem.id }}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"mistral-embed"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":512,"typeOptions":{"maxValue":2048},"description":"Maximum number of documents to send in each request","type":"number"},{"displayName":"Strip New Lines","name":"stripNewLines","default":true,"description":"Whether to strip new lines from the input text","type":"boolean"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/embeddings/EmbeddingsMistralCloud/mistral.svg"},
24
24
  {"displayName":"Embeddings OpenAI","name":"@n8n/n8n-nodes-langchain.embeddingsOpenAi","credentials":[{"name":"openAiApi","required":true}],"group":["transform"],"version":1,"description":"Use Embeddings OpenAI","defaults":{"name":"Embeddings OpenAI"},"codex":{"categories":["AI"],"subcategories":{"AI":["Embeddings"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsopenai/"}]}},"inputs":[],"outputs":["ai_embedding"],"outputNames":["Embeddings"],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"This node must be connected to a vector store. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_vectorStore'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the embeddings. <a href=\"https://platform.openai.com/docs/models/overview\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.id.includes('embed') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"text-embedding-ada-002","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the embeddings. <a href=\"https://platform.openai.com/docs/models/overview\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.id.includes('embed') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"text-embedding-3-small","displayOptions":{"hide":{"@version":[1]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Batch Size","name":"batchSize","default":512,"typeOptions":{"maxValue":2048},"description":"Maximum number of documents to send in each request","type":"number"},{"displayName":"Strip New Lines","name":"stripNewLines","default":true,"description":"Whether to strip new lines from the input text","type":"boolean"},{"displayName":"Timeout","name":"timeout","default":-1,"description":"Maximum amount of time a request is allowed to take in seconds. Set to -1 for no timeout.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/embeddings/EmbeddingsOpenAI/openAiLight.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/embeddings/EmbeddingsOpenAI/openAiLight.dark.svg"}},
25
25
  {"displayName":"Embeddings Ollama","name":"@n8n/n8n-nodes-langchain.embeddingsOllama","group":["transform"],"version":1,"description":"Use Ollama Embeddings","defaults":{"name":"Embeddings Ollama"},"credentials":[{"name":"ollamaApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"codex":{"categories":["AI"],"subcategories":{"AI":["Embeddings"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsollama/"}]}},"inputs":[],"outputs":["ai_embedding"],"outputNames":["Embeddings"],"properties":[{"displayName":"This node must be connected to a vector store. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_vectorStore'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"llama2","description":"The model which will generate the completion. To download models, visit <a href=\"https://ollama.ai/library\">Ollama Models Library</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/embeddings/EmbeddingsOllama/ollama.svg"},
26
- {"displayName":"Anthropic Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatAnthropic","group":["transform"],"version":[1,1.1,1.2],"defaultVersion":1.2,"description":"Language Model Anthropic","defaults":{"name":"Anthropic Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatanthropic/"}]},"alias":["claude","sonnet","opus"]},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"anthropicApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","options":[{"name":"Claude 3 Opus(20240229)","value":"claude-3-opus-20240229"},{"name":"Claude 3.5 Sonnet(20240620)","value":"claude-3-5-sonnet-20240620"},{"name":"Claude 3 Sonnet(20240229)","value":"claude-3-sonnet-20240229"},{"name":"Claude 3 Haiku(20240307)","value":"claude-3-haiku-20240307"},{"name":"LEGACY: Claude 2","value":"claude-2"},{"name":"LEGACY: Claude 2.1","value":"claude-2.1"},{"name":"LEGACY: Claude Instant 1.2","value":"claude-instant-1.2"},{"name":"LEGACY: Claude Instant 1","value":"claude-instant-1"}],"description":"The model which will generate the completion. <a href=\"https://docs.anthropic.com/claude/docs/models-overview\">Learn more</a>.","default":"claude-2","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Model","name":"model","type":"options","options":[{"name":"Claude 3 Opus(20240229)","value":"claude-3-opus-20240229"},{"name":"Claude 3.5 Sonnet(20240620)","value":"claude-3-5-sonnet-20240620"},{"name":"Claude 3 Sonnet(20240229)","value":"claude-3-sonnet-20240229"},{"name":"Claude 3 Haiku(20240307)","value":"claude-3-haiku-20240307"},{"name":"LEGACY: Claude 2","value":"claude-2"},{"name":"LEGACY: Claude 2.1","value":"claude-2.1"},{"name":"LEGACY: Claude Instant 1.2","value":"claude-instant-1.2"},{"name":"LEGACY: Claude Instant 1","value":"claude-instant-1"}],"description":"The model which will generate the completion. <a href=\"https://docs.anthropic.com/claude/docs/models-overview\">Learn more</a>.","default":"claude-3-sonnet-20240229","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Model","name":"model","type":"options","options":[{"name":"Claude 3 Opus(20240229)","value":"claude-3-opus-20240229"},{"name":"Claude 3.5 Sonnet(20240620)","value":"claude-3-5-sonnet-20240620"},{"name":"Claude 3 Sonnet(20240229)","value":"claude-3-sonnet-20240229"},{"name":"Claude 3 Haiku(20240307)","value":"claude-3-haiku-20240307"}],"description":"The model which will generate the completion. <a href=\"https://docs.anthropic.com/claude/docs/models-overview\">Learn more</a>.","default":"claude-3-5-sonnet-20240620","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":4096,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":1,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatAnthropic/anthropic.svg"},
27
- {"displayName":"Azure OpenAI Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"Azure OpenAI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"azureOpenAiApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model (Deployment) Name","name":"model","type":"string","description":"The name of the model(deployment) to use","default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatAzureOpenAi/azure.svg"},
28
- {"displayName":"Google PaLM Language Model","name":"@n8n/n8n-nodes-langchain.lmGooglePalm","hidden":true,"group":["transform"],"version":1,"description":"Language Model Google PaLM","defaults":{"name":"Google PaLM Language Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmgooglepalm/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"Google PaLM API is <a href='https://ai.google.dev/palm_docs/deprecation' target='_blank'>deprecated</a>. Please use Google Vertex or Google Gemini nodes instead.","name":"deprecated","type":"notice","default":""},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta3/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ $responseItem.name.startsWith('models/text') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/text-bison-001"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":1024,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":40,"typeOptions":{"maxValue":1,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":0.9,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmGooglePalm/google.svg"},
29
- {"displayName":"AWS Bedrock Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatAwsBedrock","group":["transform"],"version":1,"description":"Language Model AWS Bedrock","defaults":{"name":"AWS Bedrock Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatawsbedrock/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"aws","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"=https://bedrock.{{$credentials?.region ?? \"eu-central-1\"}}.amazonaws.com"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.aws.amazon.com/bedrock/latest/userguide/foundation-models.html\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"modelSummaries"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.modelName}}","description":"={{$responseItem.modelArn}}","value":"={{$responseItem.modelId}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":2000,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatAwsBedrock/bedrock.svg"},
30
- {"displayName":"Google PaLM Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGooglePalm","hidden":true,"group":["transform"],"version":1,"description":"Chat Model Google PaLM","defaults":{"name":"Google PaLM Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglepalm/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"Google PaLM API is <a href='https://ai.google.dev/palm_docs/deprecation' target='_blank'>deprecated</a>. Please use Google Vertex or Google Gemini nodes instead.","name":"deprecated","type":"notice","default":""},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta3/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ $responseItem.name.startsWith('models/chat') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/chat-bison-001"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":40,"typeOptions":{"maxValue":1,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":0.9,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGooglePalm/google.svg"},
31
- {"displayName":"Google Gemini Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGoogleGemini","group":["transform"],"version":1,"description":"Chat Model Google Gemini","defaults":{"name":"Google Gemini Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ !$responseItem.name.includes('embedding') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/gemini-1.0-pro"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":2048,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.4,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":32,"typeOptions":{"maxValue":40,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Safety Settings","name":"safetySettings","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{"values":{"category":"HARM_CATEGORY_HARASSMENT","threshold":"HARM_BLOCK_THRESHOLD_UNSPECIFIED"}},"placeholder":"Add Option","options":[{"name":"values","displayName":"Values","values":[{"displayName":"Safety Category","name":"category","type":"options","description":"The category of harmful content to block","default":"HARM_CATEGORY_UNSPECIFIED","options":[{"value":"HARM_CATEGORY_HARASSMENT","name":"HARM_CATEGORY_HARASSMENT","description":"Harassment content"},{"value":"HARM_CATEGORY_HATE_SPEECH","name":"HARM_CATEGORY_HATE_SPEECH","description":"Hate speech and content"},{"value":"HARM_CATEGORY_SEXUALLY_EXPLICIT","name":"HARM_CATEGORY_SEXUALLY_EXPLICIT","description":"Sexually explicit content"},{"value":"HARM_CATEGORY_DANGEROUS_CONTENT","name":"HARM_CATEGORY_DANGEROUS_CONTENT","description":"Dangerous content"}]},{"displayName":"Safety Threshold","name":"threshold","type":"options","description":"The threshold of harmful content to block","default":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","options":[{"value":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","name":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","description":"Threshold is unspecified"},{"value":"BLOCK_LOW_AND_ABOVE","name":"BLOCK_LOW_AND_ABOVE","description":"Content with NEGLIGIBLE will be allowed"},{"value":"BLOCK_MEDIUM_AND_ABOVE","name":"BLOCK_MEDIUM_AND_ABOVE","description":"Content with NEGLIGIBLE and LOW will be allowed"},{"value":"BLOCK_ONLY_HIGH","name":"BLOCK_ONLY_HIGH","description":"Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed"},{"value":"BLOCK_NONE","name":"BLOCK_NONE","description":"All content will be allowed"}]}]}]}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGoogleGemini/google.svg"},
32
- {"displayName":"Google Vertex Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGoogleVertex","group":["transform"],"version":1,"description":"Chat Model Google Vertex","defaults":{"name":"Google Vertex Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googleApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Project ID","name":"projectId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"description":"Select or enter your Google Cloud project ID","modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"gcpProjectsList"}},{"displayName":"ID","name":"id","type":"string"}]},{"displayName":"Model Name","name":"modelName","type":"string","description":"The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.","default":"gemini-1.5-flash"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":2048,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.4,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":32,"typeOptions":{"maxValue":40,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Safety Settings","name":"safetySettings","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{"values":{"category":"HARM_CATEGORY_HARASSMENT","threshold":"HARM_BLOCK_THRESHOLD_UNSPECIFIED"}},"placeholder":"Add Option","options":[{"name":"values","displayName":"Values","values":[{"displayName":"Safety Category","name":"category","type":"options","description":"The category of harmful content to block","default":"HARM_CATEGORY_UNSPECIFIED","options":[{"value":"HARM_CATEGORY_HARASSMENT","name":"HARM_CATEGORY_HARASSMENT","description":"Harassment content"},{"value":"HARM_CATEGORY_HATE_SPEECH","name":"HARM_CATEGORY_HATE_SPEECH","description":"Hate speech and content"},{"value":"HARM_CATEGORY_SEXUALLY_EXPLICIT","name":"HARM_CATEGORY_SEXUALLY_EXPLICIT","description":"Sexually explicit content"},{"value":"HARM_CATEGORY_DANGEROUS_CONTENT","name":"HARM_CATEGORY_DANGEROUS_CONTENT","description":"Dangerous content"}]},{"displayName":"Safety Threshold","name":"threshold","type":"options","description":"The threshold of harmful content to block","default":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","options":[{"value":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","name":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","description":"Threshold is unspecified"},{"value":"BLOCK_LOW_AND_ABOVE","name":"BLOCK_LOW_AND_ABOVE","description":"Content with NEGLIGIBLE will be allowed"},{"value":"BLOCK_MEDIUM_AND_ABOVE","name":"BLOCK_MEDIUM_AND_ABOVE","description":"Content with NEGLIGIBLE and LOW will be allowed"},{"value":"BLOCK_ONLY_HIGH","name":"BLOCK_ONLY_HIGH","description":"Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed"},{"value":"BLOCK_NONE","name":"BLOCK_NONE","description":"All content will be allowed"}]}]}]}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGoogleVertex/google.svg"},
33
- {"displayName":"Groq Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGroq","group":["transform"],"version":1,"description":"Language Model Groq","defaults":{"name":"Groq Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgroq/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"groqApi","required":true}],"requestDefaults":{"baseURL":"https://api.groq.com/openai/v1"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.active === true && $responseItem.object === \"model\" }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"description":"The model which will generate the completion. <a href=\"https://console.groq.com/docs/models\">Learn more</a>.","default":"llama3-8b-8192"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":4096,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGroq/groq.svg"},
34
- {"displayName":"Mistral Cloud Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatMistralCloud","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"Mistral Cloud Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatmistralcloud/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"mistralCloudApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"https://api.mistral.ai/v1"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.mistral.ai/platform/endpoints/\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ !$responseItem.id.includes('embed') }}"}},{"type":"setKeyValue","properties":{"name":"={{ $responseItem.id }}","value":"={{ $responseItem.id }}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"mistral-small"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Enable Safe Mode","name":"safeMode","default":false,"type":"boolean","description":"Whether to inject a safety prompt before all conversations"},{"displayName":"Random Seed","name":"randomSeed","type":"number","description":"The seed to use for random sampling. If set, different calls will generate deterministic results."}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatMistralCloud/mistral.svg"},
35
- {"displayName":"Ollama Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatOllama","group":["transform"],"version":1,"description":"Language Model Ollama","defaults":{"name":"Ollama Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatollama/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"ollamaApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"llama2","description":"The model which will generate the completion. To download models, visit <a href=\"https://ollama.ai/library\">Ollama Models Library</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the randomness of the generated text. Lower values make the output more focused and deterministic, while higher values make it more diverse and random.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":100,"minValue":-1,"numberPrecision":1},"description":"Limits the number of highest probability vocabulary tokens to consider at each step. A higher value increases diversity but may reduce coherence. Set to -1 to disable.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. Helps generate more human-like text by reducing repetitions.","type":"number"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","type":"number","default":0,"typeOptions":{"minValue":0},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Keep Alive","name":"keepAlive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Useful for frequently used models. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"lowVram","type":"boolean","default":false,"description":"Whether to Activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"mainGpu","type":"number","default":0,"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"numBatch","type":"number","default":512,"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Context Length","name":"numCtx","type":"number","default":2048,"description":"The maximum number of tokens to use as context for generating the next token. Smaller values reduce memory usage, while larger values provide more context to the model."},{"displayName":"Number of GPUs","name":"numGpu","type":"number","default":-1,"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Max Tokens to Generate","name":"numPredict","type":"number","default":-1,"description":"The maximum number of tokens to generate. Set to -1 for no limit. Be cautious when setting this to a large value, as it can lead to very long outputs."},{"displayName":"Number of CPU Threads","name":"numThread","type":"number","default":0,"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalizeNewline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Presence Penalty","name":"presencePenalty","type":"number","default":0,"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeatPenalty","type":"number","default":1,"description":"Adjusts the penalty factor for repeated tokens. Higher values more strongly discourage repetition. Set to 1.0 to disable repetition penalty."},{"displayName":"Use Memory Locking","name":"useMLock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"useMMap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance. Recommended to keep enabled."},{"displayName":"Load Vocabulary Only","name":"vocabOnly","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":"default"},{"name":"JSON","value":"json"}],"default":"default","description":"Specifies the format of the API response"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOllama/ollama.svg"},
36
- {"displayName":"OpenAI Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatOpenAi","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"OpenAI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"openAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{\n\t\t\t\t\t\t\t\t\t\t\t\t($parameter.options?.baseURL && !$parameter.options?.baseURL?.includes('api.openai.com')) ||\n\t\t\t\t\t\t\t\t\t\t\t\t($responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct'))\n\t\t\t\t\t\t\t\t\t\t\t}}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"gpt-3.5-turbo"},{"displayName":"When using non-OpenAI models via \"Base URL\" override, not all models might be chat-compatible or support other features, like tools calling or JSON response format","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.baseURL":[{"_cnd":{"exists":true}}]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOpenAi/openAiLight.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOpenAi/openAiLight.dark.svg"}},
37
- {"displayName":"OpenAI Model","name":"@n8n/n8n-nodes-langchain.lmOpenAi","hidden":true,"group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"OpenAI Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"openAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"This node is using OpenAI completions which are now deprecated. Please use the OpenAI Chat Model node instead.","name":"deprecated","type":"notice","default":""},{"displayName":"Model","name":"model","type":"resourceLocator","default":{"mode":"list","value":"gpt-3.5-turbo-instruct"},"required":true,"description":"The model which will generate the completion. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>.","modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"openAiModelSearch"}},{"displayName":"ID","name":"id","type":"string"}],"routing":{"send":{"type":"body","property":"model","value":"={{$parameter.model.value}}"}}},{"displayName":"When using non OpenAI models via Base URL override, not all models might be chat-compatible or support other features, like tools calling or JSON response format.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.baseURL":[{"_cnd":{"exists":true}}]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOpenAi/openAiLight.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOpenAi/openAiLight.dark.svg"}},
38
- {"displayName":"Cohere Model","name":"@n8n/n8n-nodes-langchain.lmCohere","group":["transform"],"version":1,"description":"Language Model Cohere","defaults":{"name":"Cohere Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmcohere/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"cohereApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":250,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Model","name":"model","type":"string","description":"The name of the model to use","default":""},{"displayName":"Sampling Temperature","name":"temperature","default":0,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMCohere/cohere.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMCohere/cohere.dark.svg"}},
39
- {"displayName":"Ollama Model","name":"@n8n/n8n-nodes-langchain.lmOllama","group":["transform"],"version":1,"description":"Language Model Ollama","defaults":{"name":"Ollama Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmollama/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"ollamaApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"llama2","description":"The model which will generate the completion. To download models, visit <a href=\"https://ollama.ai/library\">Ollama Models Library</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the randomness of the generated text. Lower values make the output more focused and deterministic, while higher values make it more diverse and random.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":100,"minValue":-1,"numberPrecision":1},"description":"Limits the number of highest probability vocabulary tokens to consider at each step. A higher value increases diversity but may reduce coherence. Set to -1 to disable.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. Helps generate more human-like text by reducing repetitions.","type":"number"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","type":"number","default":0,"typeOptions":{"minValue":0},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Keep Alive","name":"keepAlive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Useful for frequently used models. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"lowVram","type":"boolean","default":false,"description":"Whether to Activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"mainGpu","type":"number","default":0,"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"numBatch","type":"number","default":512,"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Context Length","name":"numCtx","type":"number","default":2048,"description":"The maximum number of tokens to use as context for generating the next token. Smaller values reduce memory usage, while larger values provide more context to the model."},{"displayName":"Number of GPUs","name":"numGpu","type":"number","default":-1,"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Max Tokens to Generate","name":"numPredict","type":"number","default":-1,"description":"The maximum number of tokens to generate. Set to -1 for no limit. Be cautious when setting this to a large value, as it can lead to very long outputs."},{"displayName":"Number of CPU Threads","name":"numThread","type":"number","default":0,"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalizeNewline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Presence Penalty","name":"presencePenalty","type":"number","default":0,"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeatPenalty","type":"number","default":1,"description":"Adjusts the penalty factor for repeated tokens. Higher values more strongly discourage repetition. Set to 1.0 to disable repetition penalty."},{"displayName":"Use Memory Locking","name":"useMLock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"useMMap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance. Recommended to keep enabled."},{"displayName":"Load Vocabulary Only","name":"vocabOnly","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":"default"},{"name":"JSON","value":"json"}],"default":"default","description":"Specifies the format of the API response"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOllama/ollama.svg"},
40
- {"displayName":"Hugging Face Inference Model","name":"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference","group":["transform"],"version":1,"description":"Language Model HuggingFaceInference","defaults":{"name":"Hugging Face Inference Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmopenhuggingfaceinference/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"huggingFaceApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"string","default":"gpt2"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Custom Inference Endpoint","name":"endpointUrl","default":"","description":"Custom endpoint URL","type":"string"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":128,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the top tokens to consider within the sample operation to create new text","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOpenHuggingFaceInference/huggingface.svg"},
41
- {"displayName":"Window Buffer Memory (easiest)","name":"@n8n/n8n-nodes-langchain.memoryBufferWindow","icon":"fa:database","group":["transform"],"version":[1,1.1,1.2],"description":"Stores in n8n memory, so no credentials required","defaults":{"name":"Window Buffer Memory"},"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorybufferwindow/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session Key","name":"sessionKey","type":"string","default":"chat_history","description":"The key to use to store the memory in the workflow data","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionKey","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Context Window Length","name":"contextWindowLength","type":"number","default":5,"description":"The number of previous messages to consider for context"}]},
26
+ {"displayName":"Anthropic Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatAnthropic","group":["transform"],"version":[1,1.1,1.2],"defaultVersion":1.2,"description":"Language Model Anthropic","defaults":{"name":"Anthropic Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatanthropic/"}]},"alias":["claude","sonnet","opus"]},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"anthropicApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","options":[{"name":"Claude 3 Opus(20240229)","value":"claude-3-opus-20240229"},{"name":"Claude 3.5 Sonnet(20240620)","value":"claude-3-5-sonnet-20240620"},{"name":"Claude 3 Sonnet(20240229)","value":"claude-3-sonnet-20240229"},{"name":"Claude 3 Haiku(20240307)","value":"claude-3-haiku-20240307"},{"name":"LEGACY: Claude 2","value":"claude-2"},{"name":"LEGACY: Claude 2.1","value":"claude-2.1"},{"name":"LEGACY: Claude Instant 1.2","value":"claude-instant-1.2"},{"name":"LEGACY: Claude Instant 1","value":"claude-instant-1"}],"description":"The model which will generate the completion. <a href=\"https://docs.anthropic.com/claude/docs/models-overview\">Learn more</a>.","default":"claude-2","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Model","name":"model","type":"options","options":[{"name":"Claude 3 Opus(20240229)","value":"claude-3-opus-20240229"},{"name":"Claude 3.5 Sonnet(20240620)","value":"claude-3-5-sonnet-20240620"},{"name":"Claude 3 Sonnet(20240229)","value":"claude-3-sonnet-20240229"},{"name":"Claude 3 Haiku(20240307)","value":"claude-3-haiku-20240307"},{"name":"LEGACY: Claude 2","value":"claude-2"},{"name":"LEGACY: Claude 2.1","value":"claude-2.1"},{"name":"LEGACY: Claude Instant 1.2","value":"claude-instant-1.2"},{"name":"LEGACY: Claude Instant 1","value":"claude-instant-1"}],"description":"The model which will generate the completion. <a href=\"https://docs.anthropic.com/claude/docs/models-overview\">Learn more</a>.","default":"claude-3-sonnet-20240229","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Model","name":"model","type":"options","options":[{"name":"Claude 3 Opus(20240229)","value":"claude-3-opus-20240229"},{"name":"Claude 3.5 Sonnet(20240620)","value":"claude-3-5-sonnet-20240620"},{"name":"Claude 3 Sonnet(20240229)","value":"claude-3-sonnet-20240229"},{"name":"Claude 3 Haiku(20240307)","value":"claude-3-haiku-20240307"}],"description":"The model which will generate the completion. <a href=\"https://docs.anthropic.com/claude/docs/models-overview\">Learn more</a>.","default":"claude-3-5-sonnet-20240620","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":4096,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":1,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatAnthropic/anthropic.svg"},
27
+ {"displayName":"Azure OpenAI Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatAzureOpenAi","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"Azure OpenAI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"azureOpenAiApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model (Deployment) Name","name":"model","type":"string","description":"The name of the model(deployment) to use","default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatAzureOpenAi/azure.svg"},
28
+ {"displayName":"Google PaLM Language Model","name":"@n8n/n8n-nodes-langchain.lmGooglePalm","hidden":true,"group":["transform"],"version":1,"description":"Language Model Google PaLM","defaults":{"name":"Google PaLM Language Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmgooglepalm/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"Google PaLM API is <a href='https://ai.google.dev/palm_docs/deprecation' target='_blank'>deprecated</a>. Please use Google Vertex or Google Gemini nodes instead.","name":"deprecated","type":"notice","default":""},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta3/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ $responseItem.name.startsWith('models/text') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/text-bison-001"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":1024,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":40,"typeOptions":{"maxValue":1,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":0.9,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmGooglePalm/google.svg"},
29
+ {"displayName":"AWS Bedrock Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatAwsBedrock","group":["transform"],"version":1,"description":"Language Model AWS Bedrock","defaults":{"name":"AWS Bedrock Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatawsbedrock/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"aws","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"=https://bedrock.{{$credentials?.region ?? \"eu-central-1\"}}.amazonaws.com"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.aws.amazon.com/bedrock/latest/userguide/foundation-models.html\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"modelSummaries"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.modelName}}","description":"={{$responseItem.modelArn}}","value":"={{$responseItem.modelId}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":2000,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatAwsBedrock/bedrock.svg"},
30
+ {"displayName":"Google PaLM Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGooglePalm","hidden":true,"group":["transform"],"version":1,"description":"Chat Model Google PaLM","defaults":{"name":"Google PaLM Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglepalm/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"Google PaLM API is <a href='https://ai.google.dev/palm_docs/deprecation' target='_blank'>deprecated</a>. Please use Google Vertex or Google Gemini nodes instead.","name":"deprecated","type":"notice","default":""},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta3/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ $responseItem.name.startsWith('models/chat') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/chat-bison-001"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":40,"typeOptions":{"maxValue":1,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":0.9,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGooglePalm/google.svg"},
31
+ {"displayName":"Google Gemini Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGoogleGemini","group":["transform"],"version":1,"description":"Chat Model Google Gemini","defaults":{"name":"Google Gemini Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ !$responseItem.name.includes('embedding') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/gemini-1.0-pro"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":2048,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.4,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":32,"typeOptions":{"maxValue":40,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Safety Settings","name":"safetySettings","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{"values":{"category":"HARM_CATEGORY_HARASSMENT","threshold":"HARM_BLOCK_THRESHOLD_UNSPECIFIED"}},"placeholder":"Add Option","options":[{"name":"values","displayName":"Values","values":[{"displayName":"Safety Category","name":"category","type":"options","description":"The category of harmful content to block","default":"HARM_CATEGORY_UNSPECIFIED","options":[{"value":"HARM_CATEGORY_HARASSMENT","name":"HARM_CATEGORY_HARASSMENT","description":"Harassment content"},{"value":"HARM_CATEGORY_HATE_SPEECH","name":"HARM_CATEGORY_HATE_SPEECH","description":"Hate speech and content"},{"value":"HARM_CATEGORY_SEXUALLY_EXPLICIT","name":"HARM_CATEGORY_SEXUALLY_EXPLICIT","description":"Sexually explicit content"},{"value":"HARM_CATEGORY_DANGEROUS_CONTENT","name":"HARM_CATEGORY_DANGEROUS_CONTENT","description":"Dangerous content"}]},{"displayName":"Safety Threshold","name":"threshold","type":"options","description":"The threshold of harmful content to block","default":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","options":[{"value":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","name":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","description":"Threshold is unspecified"},{"value":"BLOCK_LOW_AND_ABOVE","name":"BLOCK_LOW_AND_ABOVE","description":"Content with NEGLIGIBLE will be allowed"},{"value":"BLOCK_MEDIUM_AND_ABOVE","name":"BLOCK_MEDIUM_AND_ABOVE","description":"Content with NEGLIGIBLE and LOW will be allowed"},{"value":"BLOCK_ONLY_HIGH","name":"BLOCK_ONLY_HIGH","description":"Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed"},{"value":"BLOCK_NONE","name":"BLOCK_NONE","description":"All content will be allowed"}]}]}]}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGoogleGemini/google.svg"},
32
+ {"displayName":"Google Vertex Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGoogleVertex","group":["transform"],"version":1,"description":"Chat Model Google Vertex","defaults":{"name":"Google Vertex Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googleApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Project ID","name":"projectId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"description":"Select or enter your Google Cloud project ID","modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"gcpProjectsList"}},{"displayName":"ID","name":"id","type":"string"}]},{"displayName":"Model Name","name":"modelName","type":"string","description":"The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.","default":"gemini-1.5-flash"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":2048,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.4,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":32,"typeOptions":{"maxValue":40,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Safety Settings","name":"safetySettings","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{"values":{"category":"HARM_CATEGORY_HARASSMENT","threshold":"HARM_BLOCK_THRESHOLD_UNSPECIFIED"}},"placeholder":"Add Option","options":[{"name":"values","displayName":"Values","values":[{"displayName":"Safety Category","name":"category","type":"options","description":"The category of harmful content to block","default":"HARM_CATEGORY_UNSPECIFIED","options":[{"value":"HARM_CATEGORY_HARASSMENT","name":"HARM_CATEGORY_HARASSMENT","description":"Harassment content"},{"value":"HARM_CATEGORY_HATE_SPEECH","name":"HARM_CATEGORY_HATE_SPEECH","description":"Hate speech and content"},{"value":"HARM_CATEGORY_SEXUALLY_EXPLICIT","name":"HARM_CATEGORY_SEXUALLY_EXPLICIT","description":"Sexually explicit content"},{"value":"HARM_CATEGORY_DANGEROUS_CONTENT","name":"HARM_CATEGORY_DANGEROUS_CONTENT","description":"Dangerous content"}]},{"displayName":"Safety Threshold","name":"threshold","type":"options","description":"The threshold of harmful content to block","default":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","options":[{"value":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","name":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","description":"Threshold is unspecified"},{"value":"BLOCK_LOW_AND_ABOVE","name":"BLOCK_LOW_AND_ABOVE","description":"Content with NEGLIGIBLE will be allowed"},{"value":"BLOCK_MEDIUM_AND_ABOVE","name":"BLOCK_MEDIUM_AND_ABOVE","description":"Content with NEGLIGIBLE and LOW will be allowed"},{"value":"BLOCK_ONLY_HIGH","name":"BLOCK_ONLY_HIGH","description":"Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed"},{"value":"BLOCK_NONE","name":"BLOCK_NONE","description":"All content will be allowed"}]}]}]}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGoogleVertex/google.svg"},
33
+ {"displayName":"Groq Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatGroq","group":["transform"],"version":1,"description":"Language Model Groq","defaults":{"name":"Groq Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgroq/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"groqApi","required":true}],"requestDefaults":{"baseURL":"https://api.groq.com/openai/v1"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.active === true && $responseItem.object === \"model\" }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"description":"The model which will generate the completion. <a href=\"https://console.groq.com/docs/models\">Learn more</a>.","default":"llama3-8b-8192"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":4096,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGroq/groq.svg"},
34
+ {"displayName":"Mistral Cloud Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatMistralCloud","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"Mistral Cloud Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatmistralcloud/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"mistralCloudApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"https://api.mistral.ai/v1"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.mistral.ai/platform/endpoints/\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ !$responseItem.id.includes('embed') }}"}},{"type":"setKeyValue","properties":{"name":"={{ $responseItem.id }}","value":"={{ $responseItem.id }}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"mistral-small"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Enable Safe Mode","name":"safeMode","default":false,"type":"boolean","description":"Whether to inject a safety prompt before all conversations"},{"displayName":"Random Seed","name":"randomSeed","type":"number","description":"The seed to use for random sampling. If set, different calls will generate deterministic results."}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatMistralCloud/mistral.svg"},
35
+ {"displayName":"Ollama Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatOllama","group":["transform"],"version":1,"description":"Language Model Ollama","defaults":{"name":"Ollama Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatollama/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"ollamaApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"llama2","description":"The model which will generate the completion. To download models, visit <a href=\"https://ollama.ai/library\">Ollama Models Library</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the randomness of the generated text. Lower values make the output more focused and deterministic, while higher values make it more diverse and random.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":100,"minValue":-1,"numberPrecision":1},"description":"Limits the number of highest probability vocabulary tokens to consider at each step. A higher value increases diversity but may reduce coherence. Set to -1 to disable.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. Helps generate more human-like text by reducing repetitions.","type":"number"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","type":"number","default":0,"typeOptions":{"minValue":0},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Keep Alive","name":"keepAlive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Useful for frequently used models. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"lowVram","type":"boolean","default":false,"description":"Whether to Activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"mainGpu","type":"number","default":0,"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"numBatch","type":"number","default":512,"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Context Length","name":"numCtx","type":"number","default":2048,"description":"The maximum number of tokens to use as context for generating the next token. Smaller values reduce memory usage, while larger values provide more context to the model."},{"displayName":"Number of GPUs","name":"numGpu","type":"number","default":-1,"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Max Tokens to Generate","name":"numPredict","type":"number","default":-1,"description":"The maximum number of tokens to generate. Set to -1 for no limit. Be cautious when setting this to a large value, as it can lead to very long outputs."},{"displayName":"Number of CPU Threads","name":"numThread","type":"number","default":0,"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalizeNewline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Presence Penalty","name":"presencePenalty","type":"number","default":0,"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeatPenalty","type":"number","default":1,"description":"Adjusts the penalty factor for repeated tokens. Higher values more strongly discourage repetition. Set to 1.0 to disable repetition penalty."},{"displayName":"Use Memory Locking","name":"useMLock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"useMMap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance. Recommended to keep enabled."},{"displayName":"Load Vocabulary Only","name":"vocabOnly","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":"default"},{"name":"JSON","value":"json"}],"default":"default","description":"Specifies the format of the API response"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOllama/ollama.svg"},
36
+ {"displayName":"OpenAI Chat Model","name":"@n8n/n8n-nodes-langchain.lmChatOpenAi","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"OpenAI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"openAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{\n\t\t\t\t\t\t\t\t\t\t\t\t($parameter.options?.baseURL && !$parameter.options?.baseURL?.includes('api.openai.com')) ||\n\t\t\t\t\t\t\t\t\t\t\t\t($responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct'))\n\t\t\t\t\t\t\t\t\t\t\t}}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"gpt-3.5-turbo"},{"displayName":"When using non-OpenAI models via \"Base URL\" override, not all models might be chat-compatible or support other features, like tools calling or JSON response format","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.baseURL":[{"_cnd":{"exists":true}}]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOpenAi/openAiLight.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOpenAi/openAiLight.dark.svg"}},
37
+ {"displayName":"OpenAI Model","name":"@n8n/n8n-nodes-langchain.lmOpenAi","hidden":true,"group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"OpenAI Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"openAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"This node is using OpenAI completions which are now deprecated. Please use the OpenAI Chat Model node instead.","name":"deprecated","type":"notice","default":""},{"displayName":"Model","name":"model","type":"resourceLocator","default":{"mode":"list","value":"gpt-3.5-turbo-instruct"},"required":true,"description":"The model which will generate the completion. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>.","modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"openAiModelSearch"}},{"displayName":"ID","name":"id","type":"string"}],"routing":{"send":{"type":"body","property":"model","value":"={{$parameter.model.value}}"}}},{"displayName":"When using non OpenAI models via Base URL override, not all models might be chat-compatible or support other features, like tools calling or JSON response format.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.baseURL":[{"_cnd":{"exists":true}}]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOpenAi/openAiLight.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOpenAi/openAiLight.dark.svg"}},
38
+ {"displayName":"Cohere Model","name":"@n8n/n8n-nodes-langchain.lmCohere","group":["transform"],"version":1,"description":"Language Model Cohere","defaults":{"name":"Cohere Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmcohere/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"cohereApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":250,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Model","name":"model","type":"string","description":"The name of the model to use","default":""},{"displayName":"Sampling Temperature","name":"temperature","default":0,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMCohere/cohere.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMCohere/cohere.dark.svg"}},
39
+ {"displayName":"Ollama Model","name":"@n8n/n8n-nodes-langchain.lmOllama","group":["transform"],"version":1,"description":"Language Model Ollama","defaults":{"name":"Ollama Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmollama/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"ollamaApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"llama2","description":"The model which will generate the completion. To download models, visit <a href=\"https://ollama.ai/library\">Ollama Models Library</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the randomness of the generated text. Lower values make the output more focused and deterministic, while higher values make it more diverse and random.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":100,"minValue":-1,"numberPrecision":1},"description":"Limits the number of highest probability vocabulary tokens to consider at each step. A higher value increases diversity but may reduce coherence. Set to -1 to disable.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. Helps generate more human-like text by reducing repetitions.","type":"number"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","type":"number","default":0,"typeOptions":{"minValue":0},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Keep Alive","name":"keepAlive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Useful for frequently used models. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"lowVram","type":"boolean","default":false,"description":"Whether to Activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"mainGpu","type":"number","default":0,"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"numBatch","type":"number","default":512,"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Context Length","name":"numCtx","type":"number","default":2048,"description":"The maximum number of tokens to use as context for generating the next token. Smaller values reduce memory usage, while larger values provide more context to the model."},{"displayName":"Number of GPUs","name":"numGpu","type":"number","default":-1,"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Max Tokens to Generate","name":"numPredict","type":"number","default":-1,"description":"The maximum number of tokens to generate. Set to -1 for no limit. Be cautious when setting this to a large value, as it can lead to very long outputs."},{"displayName":"Number of CPU Threads","name":"numThread","type":"number","default":0,"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalizeNewline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Presence Penalty","name":"presencePenalty","type":"number","default":0,"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeatPenalty","type":"number","default":1,"description":"Adjusts the penalty factor for repeated tokens. Higher values more strongly discourage repetition. Set to 1.0 to disable repetition penalty."},{"displayName":"Use Memory Locking","name":"useMLock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"useMMap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance. Recommended to keep enabled."},{"displayName":"Load Vocabulary Only","name":"vocabOnly","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":"default"},{"name":"JSON","value":"json"}],"default":"default","description":"Specifies the format of the API response"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOllama/ollama.svg"},
40
+ {"displayName":"Hugging Face Inference Model","name":"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference","group":["transform"],"version":1,"description":"Language Model HuggingFaceInference","defaults":{"name":"Hugging Face Inference Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Text Completion Models"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmopenhuggingfaceinference/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"huggingFaceApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"string","default":"gpt2"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Custom Inference Endpoint","name":"endpointUrl","default":"","description":"Custom endpoint URL","type":"string"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":128,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the top tokens to consider within the sample operation to create new text","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMOpenHuggingFaceInference/huggingface.svg"},
41
+ {"displayName":"Window Buffer Memory (easiest)","name":"@n8n/n8n-nodes-langchain.memoryBufferWindow","icon":"fa:database","group":["transform"],"version":[1,1.1,1.2],"description":"Stores in n8n memory, so no credentials required","defaults":{"name":"Window Buffer Memory"},"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorybufferwindow/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session Key","name":"sessionKey","type":"string","default":"chat_history","description":"The key to use to store the memory in the workflow data","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionKey","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Context Window Length","name":"contextWindowLength","type":"number","default":5,"hint":"How many past interactions the model receives as context"}]},
42
42
  {"displayName":"Motorhead","name":"@n8n/n8n-nodes-langchain.memoryMotorhead","icon":"fa:file-export","group":["transform"],"version":[1,1.1,1.2],"description":"Use Motorhead Memory","defaults":{"name":"Motorhead"},"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorymotorhead/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"credentials":[{"name":"motorheadApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session ID","name":"sessionId","type":"string","required":true,"default":"","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionId","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}}]},
43
- {"displayName":"Postgres Chat Memory","name":"@n8n/n8n-nodes-langchain.memoryPostgresChat","group":["transform"],"version":[1],"description":"Stores the chat history in Postgres table.","defaults":{"name":"Postgres Chat Memory"},"credentials":[{"name":"postgres","required":true,"testedBy":"postgresConnectionTest"}],"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorypostgreschat/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput"},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Table Name","name":"tableName","type":"string","default":"n8n_chat_histories","description":"The table name to store the chat history in. If table does not exist, it will be created."}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryPostgresChat/postgres.svg"},
44
- {"displayName":"Redis Chat Memory","name":"@n8n/n8n-nodes-langchain.memoryRedisChat","group":["transform"],"version":[1,1.1,1.2],"description":"Stores the chat history in Redis.","defaults":{"name":"Redis Chat Memory"},"credentials":[{"name":"redis","required":true}],"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memoryredischat/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session Key","name":"sessionKey","type":"string","default":"chat_history","description":"The key to use to store the memory in the workflow data","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionKey","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Session Time To Live","name":"sessionTTL","type":"number","default":0,"description":"For how long the session should be stored in seconds. If set to 0 it will not expire."}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryRedisChat/redis.svg"},
43
+ {"displayName":"Postgres Chat Memory","name":"@n8n/n8n-nodes-langchain.memoryPostgresChat","group":["transform"],"version":[1,1.1],"description":"Stores the chat history in Postgres table.","defaults":{"name":"Postgres Chat Memory"},"credentials":[{"name":"postgres","required":true,"testedBy":"postgresConnectionTest"}],"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorypostgreschat/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput"},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Table Name","name":"tableName","type":"string","default":"n8n_chat_histories","description":"The table name to store the chat history in. If table does not exist, it will be created."},{"displayName":"Context Window Length","name":"contextWindowLength","type":"number","default":5,"hint":"How many past interactions the model receives as context","displayOptions":{"hide":{"@version":[{"_cnd":{"lt":1.1}}]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryPostgresChat/postgres.svg"},
44
+ {"displayName":"Redis Chat Memory","name":"@n8n/n8n-nodes-langchain.memoryRedisChat","group":["transform"],"version":[1,1.1,1.2,1.3],"description":"Stores the chat history in Redis.","defaults":{"name":"Redis Chat Memory"},"credentials":[{"name":"redis","required":true}],"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memoryredischat/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session Key","name":"sessionKey","type":"string","default":"chat_history","description":"The key to use to store the memory in the workflow data","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionKey","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Session Time To Live","name":"sessionTTL","type":"number","default":0,"description":"For how long the session should be stored in seconds. If set to 0 it will not expire."},{"displayName":"Context Window Length","name":"contextWindowLength","type":"number","default":5,"hint":"How many past interactions the model receives as context","displayOptions":{"hide":{"@version":[{"_cnd":{"lt":1.3}}]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryRedisChat/redis.svg"},
45
45
  {"displayName":"Chat Memory Manager","name":"@n8n/n8n-nodes-langchain.memoryManager","icon":"fa:database","iconColor":"black","group":["transform"],"version":[1,1.1],"description":"Manage chat messages memory and use it in the workflow","defaults":{"name":"Chat Memory Manager"},"codex":{"categories":["AI"],"subcategories":{"AI":["Miscellaneous","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorymanager/"}]}},"inputs":[{"displayName":"","type":"main"},{"displayName":"Memory","type":"ai_memory","required":true,"maxConnections":1}],"outputs":[{"displayName":"","type":"main"}],"properties":[{"displayName":"Operation Mode","name":"mode","type":"options","noDataExpression":true,"default":"load","options":[{"name":"Get Many Messages","description":"Retrieve chat messages from connected memory","value":"load"},{"name":"Insert Messages","description":"Insert chat messages into connected memory","value":"insert"},{"name":"Delete Messages","description":"Delete chat messages from connected memory","value":"delete"}]},{"displayName":"Insert Mode","name":"insertMode","type":"options","description":"Choose how new messages are inserted into the memory","noDataExpression":true,"default":"insert","options":[{"name":"Insert Messages","value":"insert","description":"Add messages alongside existing ones"},{"name":"Override All Messages","value":"override","description":"Replace the current memory with new messages"}],"displayOptions":{"show":{"mode":["insert"]}}},{"displayName":"Delete Mode","name":"deleteMode","type":"options","description":"How messages are deleted from memory","noDataExpression":true,"default":"lastN","options":[{"name":"Last N","value":"lastN","description":"Delete the last N messages"},{"name":"All Messages","value":"all","description":"Clear all messages from memory"}],"displayOptions":{"show":{"mode":["delete"]}}},{"displayName":"Chat Messages","name":"messages","description":"Chat messages to insert into memory","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{},"placeholder":"Add message","options":[{"name":"messageValues","displayName":"Message","values":[{"displayName":"Type Name or ID","name":"type","type":"options","options":[{"name":"AI","value":"ai"},{"name":"System","value":"system"},{"name":"User","value":"user"}],"default":"system"},{"displayName":"Message","name":"message","type":"string","required":true,"default":""},{"displayName":"Hide Message in Chat","name":"hideFromUI","type":"boolean","required":true,"default":false,"description":"Whether to hide the message from the chat UI"}]}],"displayOptions":{"show":{"mode":["insert"]}}},{"displayName":"Messages Count","name":"lastMessagesCount","type":"number","description":"The amount of last messages to delete","default":2,"displayOptions":{"show":{"mode":["delete"],"deleteMode":["lastN"]}}},{"displayName":"Simplify Output","name":"simplifyOutput","type":"boolean","description":"Whether to simplify the output to only include the sender and the text","default":true,"displayOptions":{"show":{"mode":["load"]}}},{"displayName":"Options","name":"options","placeholder":"Add Option","type":"collection","default":{},"options":[{"displayName":"Group Messages","name":"groupMessages","type":"boolean","default":true,"description":"Whether to group messages into a single item or return each message as a separate item"}],"displayOptions":{"show":{"mode":["load"]}}}]},
46
46
  {"displayName":"Chat Messages Retriever","name":"@n8n/n8n-nodes-langchain.memoryChatRetriever","icon":"fa:database","group":["transform"],"hidden":true,"version":1,"description":"Retrieve chat messages from memory and use them in the workflow","defaults":{"name":"Chat Messages Retriever"},"codex":{"categories":["AI"],"subcategories":{"AI":["Miscellaneous"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memorymanager/"}]}},"inputs":["main",{"displayName":"Memory","maxConnections":1,"type":"ai_memory","required":true}],"outputs":["main"],"properties":[{"displayName":"This node is deprecated. Use 'Chat Memory Manager' node instead.","type":"notice","default":"","name":"deprecatedNotice"},{"displayName":"Simplify Output","name":"simplifyOutput","type":"boolean","description":"Whether to simplify the output to only include the sender and the text","default":true}]},
47
- {"displayName":"Xata","name":"@n8n/n8n-nodes-langchain.memoryXata","group":["transform"],"version":[1,1.1,1.2],"description":"Use Xata Memory","defaults":{"name":"Xata","color":"#1321A7"},"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memoryxata/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"credentials":[{"name":"xataApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session ID","name":"sessionId","type":"string","required":true,"default":"","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionId","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryXata/xata.svg"},
47
+ {"displayName":"Xata","name":"@n8n/n8n-nodes-langchain.memoryXata","group":["transform"],"version":[1,1.1,1.2,1.3],"description":"Use Xata Memory","defaults":{"name":"Xata","color":"#1321A7"},"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memoryxata/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"credentials":[{"name":"xataApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session ID","name":"sessionId","type":"string","required":true,"default":"","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionId","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}},{"displayName":"Context Window Length","name":"contextWindowLength","type":"number","default":5,"hint":"How many past interactions the model receives as context","displayOptions":{"hide":{"@version":[{"_cnd":{"lt":1.3}}]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryXata/xata.svg"},
48
48
  {"displayName":"Zep","name":"@n8n/n8n-nodes-langchain.memoryZep","group":["transform"],"version":[1,1.1,1.2],"description":"Use Zep Memory","defaults":{"name":"Zep"},"codex":{"categories":["AI"],"subcategories":{"AI":["Memory"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.memoryzep/"}]}},"inputs":[],"outputs":["ai_memory"],"outputNames":["Memory"],"credentials":[{"name":"zepApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Session ID","name":"sessionId","type":"string","required":true,"default":"","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Session ID","name":"sessionId","type":"string","default":"={{ $json.sessionId }}","description":"The key to use to store the memory","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"Session ID","name":"sessionIdType","type":"options","options":[{"name":"Take from previous node automatically","value":"fromInput","description":"Looks for an input field called sessionId"},{"name":"Define below","value":"customKey","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"fromInput","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.2}}]}}},{"displayName":"Key","name":"sessionKey","type":"string","default":"","description":"The key to use to store session ID in the memory","displayOptions":{"show":{"sessionIdType":["customKey"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/memory/MemoryZep/zep.png"},
49
49
  {"displayName":"Auto-fixing Output Parser","name":"@n8n/n8n-nodes-langchain.outputParserAutofixing","icon":"fa:tools","group":["transform"],"version":1,"description":"Automatically fix the output if it is not in the correct format","defaults":{"name":"Auto-fixing Output Parser"},"codex":{"categories":["AI"],"subcategories":{"AI":["Output Parsers"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.outputparserautofixing/"}]}},"inputs":[{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true},{"displayName":"Output Parser","maxConnections":1,"required":true,"type":"ai_outputParser"}],"outputs":["ai_outputParser"],"outputNames":["Output Parser"],"properties":[{"displayName":"This node wraps another output parser. If the first one fails it calls an LLM to fix the format","name":"info","type":"notice","default":""},{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}}]},
50
50
  {"displayName":"Item List Output Parser","name":"@n8n/n8n-nodes-langchain.outputParserItemList","icon":"fa:bars","group":["transform"],"version":1,"description":"Return the results as separate items","defaults":{"name":"Item List Output Parser"},"codex":{"categories":["AI"],"subcategories":{"AI":["Output Parsers"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.outputparseritemlist/"}]}},"inputs":[],"outputs":["ai_outputParser"],"outputNames":["Output Parser"],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Number Of Items","name":"numberOfItems","type":"number","default":-1,"description":"Defines how many items should be returned maximally. If set to -1, there is no limit."},{"displayName":"Separator","name":"separator","type":"string","default":"\\n","description":"Defines the separator that should be used to split the results into separate items. Defaults to a new line but can be changed depending on the data that should be returned."}]}]},
@@ -58,7 +58,7 @@
58
58
  {"displayName":"Token Splitter","name":"@n8n/n8n-nodes-langchain.textSplitterTokenSplitter","icon":"fa:grip-lines-vertical","group":["transform"],"version":1,"description":"Split text into chunks by tokens","defaults":{"name":"Token Splitter"},"codex":{"categories":["AI"],"subcategories":{"AI":["Text Splitters"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.textsplittertokensplitter/"}]}},"inputs":[],"outputs":["ai_textSplitter"],"outputNames":["Text Splitter"],"properties":[{"displayName":"This node must be connected to a document loader. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_document'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Chunk Size","name":"chunkSize","type":"number","default":1000},{"displayName":"Chunk Overlap","name":"chunkOverlap","type":"number","default":0}]},
59
59
  {"displayName":"Calculator","name":"@n8n/n8n-nodes-langchain.toolCalculator","icon":"fa:calculator","group":["transform"],"version":1,"description":"Make it easier for AI agents to perform arithmetic","defaults":{"name":"Calculator"},"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolcalculator/"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}}]},
60
60
  {"displayName":"Code Tool","name":"@n8n/n8n-nodes-langchain.toolCode","icon":"fa:code","group":["transform"],"version":[1,1.1],"description":"Write a tool in JS or Python","defaults":{"name":"Code Tool"},"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Recommended Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolcode/"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"See an example of a conversational agent with custom tool written in JavaScript <a href=\"/templates/1963\" target=\"_blank\">here</a>.","name":"noticeTemplateExample","type":"notice","default":""},{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"My_Tool","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"e.g. My_Tool","validateType":"string-alphanumeric","description":"The name of the function to be called, could contain letters, numbers, and underscores only","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}}},{"displayName":"Description","name":"description","type":"string","default":"","placeholder":"Call this tool to get a random color. The input should be a string with comma separted names of colors to exclude.","typeOptions":{"rows":3}},{"displayName":"Language","name":"language","type":"options","noDataExpression":true,"options":[{"name":"JavaScript","value":"javaScript"},{"name":"Python (Beta)","value":"python"}],"default":"javaScript"},{"displayName":"JavaScript","name":"jsCode","type":"string","displayOptions":{"show":{"language":["javaScript"]}},"typeOptions":{"editor":"jsEditor"},"default":"// Example: convert the incoming query to uppercase and return it\nreturn query.toUpperCase()","hint":"You can access the input the tool receives via the input property \"query\". The returned value should be a single string.","description":"E.g. Converts any text to uppercase","noDataExpression":true},{"displayName":"Python","name":"pythonCode","type":"string","displayOptions":{"show":{"language":["python"]}},"typeOptions":{"editor":"codeNodeEditor","editorLanguage":"python"},"default":"# Example: convert the incoming query to uppercase and return it\nreturn query.upper()","hint":"You can access the input the tool receives via the input property \"query\". The returned value should be a single string.","description":"E.g. Converts any text to uppercase","noDataExpression":true}]},
61
- {"displayName":"HTTP Request Tool","name":"@n8n/n8n-nodes-langchain.toolHttpRequest","group":["output"],"version":1,"description":"Makes an HTTP request and returns the response data","subtitle":"={{ $parameter.toolDescription }}","defaults":{"name":"HTTP Request"},"credentials":[],"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Recommended Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolhttprequest/"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Description","name":"toolDescription","type":"string","description":"Explain to LLM what this tool does, better description would allow LLM to produce expected result","placeholder":"e.g. Get the current weather in the requested city","default":"","typeOptions":{"rows":3}},{"displayName":"Method","name":"method","type":"options","options":[{"name":"DELETE","value":"DELETE"},{"name":"GET","value":"GET"},{"name":"PATCH","value":"PATCH"},{"name":"POST","value":"POST"},{"name":"PUT","value":"PUT"}],"default":"GET"},{"displayName":"Tip: You can use a {placeholder} for any part of the request to be filled by the model. Provide more context about them in the placeholders section","name":"placeholderNotice","type":"notice","default":""},{"displayName":"URL","name":"url","type":"string","default":"","required":true,"placeholder":"e.g. http://www.example.com/{path}"},{"displayName":"Authentication","name":"authentication","description":"Select the type of authentication to use if needed, authentication would be done by n8n and your credentials will not be shared with the LLM","noDataExpression":true,"type":"options","options":[{"name":"None","value":"none"},{"name":"Predefined Credential Type","value":"predefinedCredentialType","description":"We've already implemented auth for many services so that you don't have to set it up manually"},{"name":"Generic Credential Type","value":"genericCredentialType","description":"Fully customizable. Choose between basic, header, OAuth2, etc."}],"default":"none"},{"displayName":"Credential Type","name":"nodeCredentialType","type":"credentialsSelect","noDataExpression":true,"required":true,"default":"","credentialTypes":["extends:oAuth2Api","extends:oAuth1Api","has:authenticate"],"displayOptions":{"show":{"authentication":["predefinedCredentialType"]}}},{"displayName":"Make sure you have specified the scope(s) for the Service Account in the credential","name":"googleApiWarning","type":"notice","default":"","displayOptions":{"show":{"nodeCredentialType":["googleApi"]}}},{"displayName":"Generic Auth Type","name":"genericAuthType","type":"credentialsSelect","required":true,"default":"","credentialTypes":["has:genericAuth"],"displayOptions":{"show":{"authentication":["genericCredentialType"]}}},{"displayName":"Send Query Parameters","name":"sendQuery","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the request has query params or not"},{"displayName":"Specify Query Parameters","name":"specifyQuery","type":"options","options":[{"name":"Using Fields Below","value":"keypair"},{"name":"Using JSON Below","value":"json"},{"name":"Let Model Specify Entire Body","value":"model"}],"default":"keypair","displayOptions":{"show":{"sendQuery":[true]}}},{"displayName":"Query Parameters","name":"parametersQuery","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Parameter","default":{"values":[{"name":""}]},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":""},{"displayName":"Value Provided","name":"valueProvider","type":"options","options":[{"name":"By Model (and is required)","value":"modelRequired"},{"name":"By Model (but is optional)","value":"modelOptional"},{"name":"Using Field Below","value":"fieldValue"}],"default":"modelRequired"},{"displayName":"Value","name":"value","type":"string","default":"","hint":"Use a {placeholder} for any data to be filled in by the model","displayOptions":{"show":{"valueProvider":["fieldValue"]}}}]}],"displayOptions":{"show":{"sendQuery":[true],"specifyQuery":["keypair"]}}},{"displayName":"JSON","name":"jsonQuery","type":"string","typeOptions":{"rows":5},"hint":"Use a {placeholder} for any data to be filled in by the model","default":"","displayOptions":{"show":{"sendQuery":[true],"specifyQuery":["json"]}}},{"displayName":"Send Headers","name":"sendHeaders","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the request has headers or not"},{"displayName":"Specify Headers","name":"specifyHeaders","type":"options","options":[{"name":"Using Fields Below","value":"keypair"},{"name":"Using JSON Below","value":"json"},{"name":"Let Model Specify Entire Body","value":"model"}],"default":"keypair","displayOptions":{"show":{"sendHeaders":[true]}}},{"displayName":"Header Parameters","name":"parametersHeaders","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Parameter","default":{"values":[{"name":""}]},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":""},{"displayName":"Value Provided","name":"valueProvider","type":"options","options":[{"name":"By Model (and is required)","value":"modelRequired"},{"name":"By Model (but is optional)","value":"modelOptional"},{"name":"Using Field Below","value":"fieldValue"}],"default":"modelRequired"},{"displayName":"Value","name":"value","type":"string","default":"","hint":"Use a {placeholder} for any data to be filled in by the model","displayOptions":{"show":{"valueProvider":["fieldValue"]}}}]}],"displayOptions":{"show":{"sendHeaders":[true],"specifyHeaders":["keypair"]}}},{"displayName":"JSON","name":"jsonHeaders","type":"string","typeOptions":{"rows":5},"hint":"Use a {placeholder} for any data to be filled in by the model","default":"","displayOptions":{"show":{"sendHeaders":[true],"specifyHeaders":["json"]}}},{"displayName":"Send Body","name":"sendBody","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the request has body or not"},{"displayName":"Specify Body","name":"specifyBody","type":"options","options":[{"name":"Using Fields Below","value":"keypair"},{"name":"Using JSON Below","value":"json"},{"name":"Let Model Specify Entire Body","value":"model"}],"default":"keypair","displayOptions":{"show":{"sendBody":[true]}}},{"displayName":"Body Parameters","name":"parametersBody","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Parameter","default":{"values":[{"name":""}]},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":""},{"displayName":"Value Provided","name":"valueProvider","type":"options","options":[{"name":"By Model (and is required)","value":"modelRequired"},{"name":"By Model (but is optional)","value":"modelOptional"},{"name":"Using Field Below","value":"fieldValue"}],"default":"modelRequired"},{"displayName":"Value","name":"value","type":"string","default":"","hint":"Use a {placeholder} for any data to be filled in by the model","displayOptions":{"show":{"valueProvider":["fieldValue"]}}}]}],"displayOptions":{"show":{"sendBody":[true],"specifyBody":["keypair"]}}},{"displayName":"JSON","name":"jsonBody","type":"string","typeOptions":{"rows":5},"hint":"Use a {placeholder} for any data to be filled in by the model","default":"","displayOptions":{"show":{"sendBody":[true],"specifyBody":["json"]}}},{"displayName":"Placeholder Definitions","name":"placeholderDefinitions","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Definition","default":[],"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Placeholder Name","name":"name","type":"string","default":""},{"displayName":"Description","name":"description","type":"string","default":""},{"displayName":"Type","name":"type","type":"options","options":[{"name":"Not Specified (Default)","value":"not specified"},{"name":"String","value":"string"},{"name":"Number","value":"number"},{"name":"Boolean","value":"boolean"},{"name":"JSON","value":"json"}],"default":"not specified"}]}]},{"displayName":"Optimize Response","name":"optimizeResponse","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the optimize the tool response to reduce amount of data passed to the LLM that could lead to better result and reduce cost"},{"displayName":"Expected Response Type","name":"responseType","type":"options","displayOptions":{"show":{"optimizeResponse":[true]}},"options":[{"name":"JSON","value":"json"},{"name":"HTML","value":"html"},{"name":"Text","value":"text"}],"default":"json"},{"displayName":"Field Containing Data","name":"dataField","type":"string","default":"","placeholder":"e.g. records","description":"Specify the name of the field in the response containing the data","hint":"leave blank to use whole response","requiresDataPath":"single","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["json"]}}},{"displayName":"Include Fields","name":"fieldsToInclude","type":"options","description":"What fields response object should include","default":"all","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["json"]}},"options":[{"name":"All","value":"all","description":"Include all fields"},{"name":"Selected","value":"selected","description":"Include only fields specified below"},{"name":"Except","value":"except","description":"Exclude fields specified below"}]},{"displayName":"Fields","name":"fields","type":"string","default":"","placeholder":"e.g. field1,field2","description":"Comma-separated list of the field names. Supports dot notation. You can drag the selected fields from the input panel.","requiresDataPath":"multiple","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["json"]},"hide":{"fieldsToInclude":["all"]}}},{"displayName":"Selector (CSS)","name":"cssSelector","type":"string","description":"Select specific element(e.g. body) or multiple elements(e.g. div) of chosen type in the response HTML.","placeholder":"e.g. body","default":"body","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["html"]}}},{"displayName":"Return Only Content","name":"onlyContent","type":"boolean","default":false,"description":"Whether to return only content of html elements, stripping html tags and attributes","hint":"Uses less tokens and may be easier for model to understand","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["html"]}}},{"displayName":"Elements To Omit","name":"elementsToOmit","type":"string","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["html"],"onlyContent":[true]}},"default":"","placeholder":"e.g. img, .className, #ItemId","description":"Comma-separated list of selectors that would be excluded when extracting content"},{"displayName":"Truncate Response","name":"truncateResponse","type":"boolean","default":false,"hint":"Helps save tokens","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["text","html"]}}},{"displayName":"Max Response Characters","name":"maxLength","type":"number","default":1000,"typeOptions":{"minValue":1},"displayOptions":{"show":{"optimizeResponse":[true],"responseType":["text","html"],"truncateResponse":[true]}}}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/tools/ToolHttpRequest/httprequest.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/tools/ToolHttpRequest/httprequest.dark.svg"}},
61
+ {"displayName":"HTTP Request Tool","name":"@n8n/n8n-nodes-langchain.toolHttpRequest","group":["output"],"version":[1,1.1],"description":"Makes an HTTP request and returns the response data","subtitle":"={{ $parameter.toolDescription }}","defaults":{"name":"HTTP Request"},"credentials":[],"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Recommended Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolhttprequest/"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Description","name":"toolDescription","type":"string","description":"Explain to LLM what this tool does, better description would allow LLM to produce expected result","placeholder":"e.g. Get the current weather in the requested city","default":"","typeOptions":{"rows":3}},{"displayName":"Method","name":"method","type":"options","options":[{"name":"DELETE","value":"DELETE"},{"name":"GET","value":"GET"},{"name":"PATCH","value":"PATCH"},{"name":"POST","value":"POST"},{"name":"PUT","value":"PUT"}],"default":"GET"},{"displayName":"Tip: You can use a {placeholder} for any part of the request to be filled by the model. Provide more context about them in the placeholders section","name":"placeholderNotice","type":"notice","default":""},{"displayName":"URL","name":"url","type":"string","default":"","required":true,"placeholder":"e.g. http://www.example.com/{path}"},{"displayName":"Authentication","name":"authentication","description":"Select the type of authentication to use if needed, authentication would be done by n8n and your credentials will not be shared with the LLM","noDataExpression":true,"type":"options","options":[{"name":"None","value":"none"},{"name":"Predefined Credential Type","value":"predefinedCredentialType","description":"We've already implemented auth for many services so that you don't have to set it up manually"},{"name":"Generic Credential Type","value":"genericCredentialType","description":"Fully customizable. Choose between basic, header, OAuth2, etc."}],"default":"none"},{"displayName":"Credential Type","name":"nodeCredentialType","type":"credentialsSelect","noDataExpression":true,"required":true,"default":"","credentialTypes":["extends:oAuth2Api","extends:oAuth1Api","has:authenticate"],"displayOptions":{"show":{"authentication":["predefinedCredentialType"]}}},{"displayName":"Make sure you have specified the scope(s) for the Service Account in the credential","name":"googleApiWarning","type":"notice","default":"","displayOptions":{"show":{"nodeCredentialType":["googleApi"]}}},{"displayName":"Generic Auth Type","name":"genericAuthType","type":"credentialsSelect","required":true,"default":"","credentialTypes":["has:genericAuth"],"displayOptions":{"show":{"authentication":["genericCredentialType"]}}},{"displayName":"Send Query Parameters","name":"sendQuery","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the request has query params or not"},{"displayName":"Specify Query Parameters","name":"specifyQuery","type":"options","options":[{"name":"Using Fields Below","value":"keypair"},{"name":"Using JSON Below","value":"json"},{"name":"Let Model Specify Entire Body","value":"model"}],"default":"keypair","displayOptions":{"show":{"sendQuery":[true]}}},{"displayName":"Query Parameters","name":"parametersQuery","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Parameter","default":{"values":[{"name":""}]},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":""},{"displayName":"Value Provided","name":"valueProvider","type":"options","options":[{"name":"By Model (and is required)","value":"modelRequired"},{"name":"By Model (but is optional)","value":"modelOptional"},{"name":"Using Field Below","value":"fieldValue"}],"default":"modelRequired"},{"displayName":"Value","name":"value","type":"string","default":"","hint":"Use a {placeholder} for any data to be filled in by the model","displayOptions":{"show":{"valueProvider":["fieldValue"]}}}]}],"displayOptions":{"show":{"sendQuery":[true],"specifyQuery":["keypair"]}}},{"displayName":"JSON","name":"jsonQuery","type":"string","typeOptions":{"rows":5},"hint":"Use a {placeholder} for any data to be filled in by the model","default":"","displayOptions":{"show":{"sendQuery":[true],"specifyQuery":["json"]}}},{"displayName":"Send Headers","name":"sendHeaders","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the request has headers or not"},{"displayName":"Specify Headers","name":"specifyHeaders","type":"options","options":[{"name":"Using Fields Below","value":"keypair"},{"name":"Using JSON Below","value":"json"},{"name":"Let Model Specify Entire Body","value":"model"}],"default":"keypair","displayOptions":{"show":{"sendHeaders":[true]}}},{"displayName":"Header Parameters","name":"parametersHeaders","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Parameter","default":{"values":[{"name":""}]},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":""},{"displayName":"Value Provided","name":"valueProvider","type":"options","options":[{"name":"By Model (and is required)","value":"modelRequired"},{"name":"By Model (but is optional)","value":"modelOptional"},{"name":"Using Field Below","value":"fieldValue"}],"default":"modelRequired"},{"displayName":"Value","name":"value","type":"string","default":"","hint":"Use a {placeholder} for any data to be filled in by the model","displayOptions":{"show":{"valueProvider":["fieldValue"]}}}]}],"displayOptions":{"show":{"sendHeaders":[true],"specifyHeaders":["keypair"]}}},{"displayName":"JSON","name":"jsonHeaders","type":"string","typeOptions":{"rows":5},"hint":"Use a {placeholder} for any data to be filled in by the model","default":"","displayOptions":{"show":{"sendHeaders":[true],"specifyHeaders":["json"]}}},{"displayName":"Send Body","name":"sendBody","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the request has body or not"},{"displayName":"Specify Body","name":"specifyBody","type":"options","options":[{"name":"Using Fields Below","value":"keypair"},{"name":"Using JSON Below","value":"json"},{"name":"Let Model Specify Entire Body","value":"model"}],"default":"keypair","displayOptions":{"show":{"sendBody":[true]}}},{"displayName":"Body Parameters","name":"parametersBody","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Parameter","default":{"values":[{"name":""}]},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Name","name":"name","type":"string","default":""},{"displayName":"Value Provided","name":"valueProvider","type":"options","options":[{"name":"By Model (and is required)","value":"modelRequired"},{"name":"By Model (but is optional)","value":"modelOptional"},{"name":"Using Field Below","value":"fieldValue"}],"default":"modelRequired"},{"displayName":"Value","name":"value","type":"string","default":"","hint":"Use a {placeholder} for any data to be filled in by the model","displayOptions":{"show":{"valueProvider":["fieldValue"]}}}]}],"displayOptions":{"show":{"sendBody":[true],"specifyBody":["keypair"]}}},{"displayName":"JSON","name":"jsonBody","type":"string","typeOptions":{"rows":5},"hint":"Use a {placeholder} for any data to be filled in by the model","default":"","displayOptions":{"show":{"sendBody":[true],"specifyBody":["json"]}}},{"displayName":"Placeholder Definitions","name":"placeholderDefinitions","type":"fixedCollection","typeOptions":{"multipleValues":true},"placeholder":"Add Definition","default":[],"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Placeholder Name","name":"name","type":"string","default":""},{"displayName":"Description","name":"description","type":"string","default":""},{"displayName":"Type","name":"type","type":"options","options":[{"name":"Not Specified (Default)","value":"not specified"},{"name":"String","value":"string"},{"name":"Number","value":"number"},{"name":"Boolean","value":"boolean"},{"name":"JSON","value":"json"}],"default":"not specified"}]}]},{"displayName":"Optimize Response","name":"optimizeResponse","type":"boolean","default":false,"noDataExpression":true,"description":"Whether the optimize the tool response to reduce amount of data passed to the LLM that could lead to better result and reduce cost"},{"displayName":"Expected Response Type","name":"responseType","type":"options","displayOptions":{"show":{"optimizeResponse":[true]}},"options":[{"name":"JSON","value":"json"},{"name":"HTML","value":"html"},{"name":"Text","value":"text"}],"default":"json"},{"displayName":"Field Containing Data","name":"dataField","type":"string","default":"","placeholder":"e.g. records","description":"Specify the name of the field in the response containing the data","hint":"leave blank to use whole response","requiresDataPath":"single","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["json"]}}},{"displayName":"Include Fields","name":"fieldsToInclude","type":"options","description":"What fields response object should include","default":"all","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["json"]}},"options":[{"name":"All","value":"all","description":"Include all fields"},{"name":"Selected","value":"selected","description":"Include only fields specified below"},{"name":"Except","value":"except","description":"Exclude fields specified below"}]},{"displayName":"Fields","name":"fields","type":"string","default":"","placeholder":"e.g. field1,field2","description":"Comma-separated list of the field names. Supports dot notation. You can drag the selected fields from the input panel.","requiresDataPath":"multiple","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["json"]},"hide":{"fieldsToInclude":["all"]}}},{"displayName":"Selector (CSS)","name":"cssSelector","type":"string","description":"Select specific element(e.g. body) or multiple elements(e.g. div) of chosen type in the response HTML.","placeholder":"e.g. body","default":"body","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["html"]}}},{"displayName":"Return Only Content","name":"onlyContent","type":"boolean","default":false,"description":"Whether to return only content of html elements, stripping html tags and attributes","hint":"Uses less tokens and may be easier for model to understand","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["html"]}}},{"displayName":"Elements To Omit","name":"elementsToOmit","type":"string","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["html"],"onlyContent":[true]}},"default":"","placeholder":"e.g. img, .className, #ItemId","description":"Comma-separated list of selectors that would be excluded when extracting content"},{"displayName":"Truncate Response","name":"truncateResponse","type":"boolean","default":false,"hint":"Helps save tokens","displayOptions":{"show":{"optimizeResponse":[true],"responseType":["text","html"]}}},{"displayName":"Max Response Characters","name":"maxLength","type":"number","default":1000,"typeOptions":{"minValue":1},"displayOptions":{"show":{"optimizeResponse":[true],"responseType":["text","html"],"truncateResponse":[true]}}}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/tools/ToolHttpRequest/httprequest.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/tools/ToolHttpRequest/httprequest.dark.svg"}},
62
62
  {"displayName":"SerpApi (Google Search)","name":"@n8n/n8n-nodes-langchain.toolSerpApi","group":["transform"],"version":1,"description":"Search in Google using SerpAPI","defaults":{"name":"SerpAPI"},"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolserpapi/"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"credentials":[{"name":"serpApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Country","name":"gl","type":"string","default":"us","description":"Defines the country to use for search. Head to <a href=\"https://serpapi.com/google-countries\">Google countries page</a> for a full list of supported countries."},{"displayName":"Device","name":"device","type":"options","options":[{"name":"Desktop","value":"desktop"},{"name":"Mobile","value":"mobile"},{"name":"Tablet","value":"tablet"}],"default":"desktop","description":"Device to use to get the results"},{"displayName":"Explicit Array","name":"no_cache","type":"boolean","default":false,"description":"Whether to force SerpApi to fetch the Google results even if a cached version is already present. Cache expires after 1h. Cached searches are free, and are not counted towards your searches per month."},{"displayName":"Google Domain","name":"google_domain","type":"string","default":"google.com","description":"Defines the domain to use for search. Head to <a href=\"https://serpapi.com/google-domains\">Google domains page</a> for a full list of supported domains."},{"displayName":"Language","name":"hl","type":"string","default":"en","description":"Defines the language to use. It's a two-letter language code. (e.g., `en` for English, `es` for Spanish, or `fr` for French). Head to <a href=\"https://serpapi.com/google-languages\">Google languages page</a> for a full list of supported languages."}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/tools/ToolSerpApi/serpApi.svg"},
63
63
  {"displayName":"Vector Store Tool","name":"@n8n/n8n-nodes-langchain.toolVectorStore","icon":"fa:database","group":["transform"],"version":[1],"description":"Retrieve context from vector store","defaults":{"name":"Vector Store Tool"},"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolvectorstore/"}]}},"inputs":[{"displayName":"Vector Store","maxConnections":1,"type":"ai_vectorStore","required":true},{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true}],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Name","name":"name","type":"string","default":"","placeholder":"e.g. state_of_union_address","validateType":"string-alphanumeric","description":"Name of the vector store"},{"displayName":"Description","name":"description","type":"string","default":"","placeholder":"The most recent state of the Union address","typeOptions":{"rows":3}},{"displayName":"Limit","name":"topK","type":"number","default":4,"description":"The maximum number of results to return"}]},
64
64
  {"displayName":"Wikipedia","name":"@n8n/n8n-nodes-langchain.toolWikipedia","group":["transform"],"version":1,"description":"Search in Wikipedia","defaults":{"name":"Wikipedia"},"codex":{"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.toolwikipedia/"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Tool"],"properties":[{"displayName":"This node must be connected to an AI agent. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/tools/ToolWikipedia/wikipedia.svg"},