@n8n/n8n-nodes-langchain 1.100.0 → 1.101.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/credentials/WeaviateApi.credentials.js +164 -0
- package/dist/credentials/WeaviateApi.credentials.js.map +1 -0
- package/dist/known/credentials.json +7 -0
- package/dist/known/nodes.json +4 -0
- package/dist/nodes/agents/Agent/Agent.node.js +5 -2
- package/dist/nodes/agents/Agent/Agent.node.js.map +1 -1
- package/dist/nodes/agents/Agent/V1/AgentV1.node.js +1 -4
- package/dist/nodes/agents/Agent/V1/AgentV1.node.js.map +1 -1
- package/dist/nodes/agents/Agent/V2/AgentV2.node.js +23 -6
- package/dist/nodes/agents/Agent/V2/AgentV2.node.js.map +1 -1
- package/dist/nodes/agents/Agent/agents/SqlAgent/description.js +0 -1
- package/dist/nodes/agents/Agent/agents/SqlAgent/description.js.map +1 -1
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/description.js +29 -1
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/description.js.map +1 -1
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js +94 -4
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js.map +1 -1
- package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js.map +1 -1
- package/dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js +0 -1
- package/dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js.map +1 -1
- package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js +0 -1
- package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js.map +1 -1
- package/dist/nodes/chains/ChainSummarization/V1/ChainSummarizationV1.node.js +0 -1
- package/dist/nodes/chains/ChainSummarization/V1/ChainSummarizationV1.node.js.map +1 -1
- package/dist/nodes/chains/ChainSummarization/V2/ChainSummarizationV2.node.js +0 -1
- package/dist/nodes/chains/ChainSummarization/V2/ChainSummarizationV2.node.js.map +1 -1
- package/dist/nodes/code/Code.node.js.map +1 -1
- package/dist/nodes/document_loaders/DocumentBinaryInputLoader/DocumentBinaryInputLoader.node.js +0 -2
- package/dist/nodes/document_loaders/DocumentBinaryInputLoader/DocumentBinaryInputLoader.node.js.map +1 -1
- package/dist/nodes/document_loaders/DocumentDefaultDataLoader/DocumentDefaultDataLoader.node.js +0 -2
- package/dist/nodes/document_loaders/DocumentDefaultDataLoader/DocumentDefaultDataLoader.node.js.map +1 -1
- package/dist/nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.js +0 -2
- package/dist/nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.js.map +1 -1
- package/dist/nodes/document_loaders/DocumentJSONInputLoader/DocumentJsonInputLoader.node.js +0 -2
- package/dist/nodes/document_loaders/DocumentJSONInputLoader/DocumentJsonInputLoader.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsAwsBedrock/EmbeddingsAwsBedrock.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsAwsBedrock/EmbeddingsAwsBedrock.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsAzureOpenAi/EmbeddingsAzureOpenAi.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsAzureOpenAi/EmbeddingsAzureOpenAi.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsCohere/EmbeddingsCohere.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsCohere/EmbeddingsCohere.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsGoogleVertex/EmbeddingsGoogleVertex.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsGoogleVertex/EmbeddingsGoogleVertex.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsHuggingFaceInference/EmbeddingsHuggingFaceInference.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsHuggingFaceInference/EmbeddingsHuggingFaceInference.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsOllama/EmbeddingsOllama.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsOllama/EmbeddingsOllama.node.js.map +1 -1
- package/dist/nodes/embeddings/EmbeddingsOpenAI/EmbeddingsOpenAi.node.js +0 -2
- package/dist/nodes/embeddings/EmbeddingsOpenAI/EmbeddingsOpenAi.node.js.map +1 -1
- package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js +4 -5
- package/dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js.map +1 -1
- package/dist/nodes/llms/LMChatOllama/LmChatOllama.node.js +0 -3
- package/dist/nodes/llms/LMChatOllama/LmChatOllama.node.js.map +1 -1
- package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js +6 -6
- package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js.map +1 -1
- package/dist/nodes/llms/LMChatOpenAi/methods/loadModels.js +3 -1
- package/dist/nodes/llms/LMChatOpenAi/methods/loadModels.js.map +1 -1
- package/dist/nodes/llms/LMCohere/LmCohere.node.js +0 -3
- package/dist/nodes/llms/LMCohere/LmCohere.node.js.map +1 -1
- package/dist/nodes/llms/LMOllama/LmOllama.node.js +0 -3
- package/dist/nodes/llms/LMOllama/LmOllama.node.js.map +1 -1
- package/dist/nodes/llms/LMOpenAi/LmOpenAi.node.js +3 -4
- package/dist/nodes/llms/LMOpenAi/LmOpenAi.node.js.map +1 -1
- package/dist/nodes/llms/LMOpenHuggingFaceInference/LmOpenHuggingFaceInference.node.js +0 -3
- package/dist/nodes/llms/LMOpenHuggingFaceInference/LmOpenHuggingFaceInference.node.js.map +1 -1
- package/dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js +1 -5
- package/dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js.map +1 -1
- package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js +3 -4
- package/dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js.map +1 -1
- package/dist/nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.js +3 -4
- package/dist/nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.js.map +1 -1
- package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js +0 -3
- package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js.map +1 -1
- package/dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js +0 -3
- package/dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js.map +1 -1
- package/dist/nodes/llms/LmChatGroq/LmChatGroq.node.js +1 -4
- package/dist/nodes/llms/LmChatGroq/LmChatGroq.node.js.map +1 -1
- package/dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js +0 -3
- package/dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js.map +1 -1
- package/dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js +3 -3
- package/dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js.map +1 -1
- package/dist/nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.js +3 -4
- package/dist/nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.js.map +1 -1
- package/dist/nodes/llms/N8nLlmTracing.js.map +1 -1
- package/dist/nodes/llms/N8nNonEstimatingTracing.js.map +1 -1
- package/dist/nodes/llms/n8nDefaultFailedAttemptHandler.js +1 -1
- package/dist/nodes/llms/n8nDefaultFailedAttemptHandler.js.map +1 -1
- package/dist/nodes/memory/MemoryBufferWindow/MemoryBufferWindow.node.js +0 -2
- package/dist/nodes/memory/MemoryBufferWindow/MemoryBufferWindow.node.js.map +1 -1
- package/dist/nodes/memory/MemoryChatRetriever/MemoryChatRetriever.node.js +0 -2
- package/dist/nodes/memory/MemoryChatRetriever/MemoryChatRetriever.node.js.map +1 -1
- package/dist/nodes/memory/MemoryManager/MemoryManager.node.js +0 -2
- package/dist/nodes/memory/MemoryManager/MemoryManager.node.js.map +1 -1
- package/dist/nodes/memory/MemoryMotorhead/MemoryMotorhead.node.js +0 -2
- package/dist/nodes/memory/MemoryMotorhead/MemoryMotorhead.node.js.map +1 -1
- package/dist/nodes/memory/MemoryPostgresChat/MemoryPostgresChat.node.js +0 -2
- package/dist/nodes/memory/MemoryPostgresChat/MemoryPostgresChat.node.js.map +1 -1
- package/dist/nodes/memory/MemoryRedisChat/MemoryRedisChat.node.js +0 -2
- package/dist/nodes/memory/MemoryRedisChat/MemoryRedisChat.node.js.map +1 -1
- package/dist/nodes/memory/MemoryXata/MemoryXata.node.js +0 -2
- package/dist/nodes/memory/MemoryXata/MemoryXata.node.js.map +1 -1
- package/dist/nodes/memory/MemoryZep/MemoryZep.node.js +0 -2
- package/dist/nodes/memory/MemoryZep/MemoryZep.node.js.map +1 -1
- package/dist/nodes/output_parser/OutputParserAutofixing/OutputParserAutofixing.node.js +0 -2
- package/dist/nodes/output_parser/OutputParserAutofixing/OutputParserAutofixing.node.js.map +1 -1
- package/dist/nodes/output_parser/OutputParserItemList/OutputParserItemList.node.js +0 -2
- package/dist/nodes/output_parser/OutputParserItemList/OutputParserItemList.node.js.map +1 -1
- package/dist/nodes/output_parser/OutputParserStructured/OutputParserStructured.node.js +0 -1
- package/dist/nodes/output_parser/OutputParserStructured/OutputParserStructured.node.js.map +1 -1
- package/dist/nodes/rerankers/RerankerCohere/RerankerCohere.node.js.map +1 -1
- package/dist/nodes/retrievers/RetrieverContextualCompression/RetrieverContextualCompression.node.js +0 -1
- package/dist/nodes/retrievers/RetrieverContextualCompression/RetrieverContextualCompression.node.js.map +1 -1
- package/dist/nodes/retrievers/RetrieverMultiQuery/RetrieverMultiQuery.node.js +0 -1
- package/dist/nodes/retrievers/RetrieverMultiQuery/RetrieverMultiQuery.node.js.map +1 -1
- package/dist/nodes/retrievers/RetrieverVectorStore/RetrieverVectorStore.node.js +0 -2
- package/dist/nodes/retrievers/RetrieverVectorStore/RetrieverVectorStore.node.js.map +1 -1
- package/dist/nodes/retrievers/RetrieverWorkflow/RetrieverWorkflow.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.js +0 -2
- package/dist/nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterRecursiveCharacterTextSplitter/TextSplitterRecursiveCharacterTextSplitter.node.js +0 -2
- package/dist/nodes/text_splitters/TextSplitterRecursiveCharacterTextSplitter/TextSplitterRecursiveCharacterTextSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js +0 -2
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js.map +1 -1
- package/dist/nodes/tools/ToolCalculator/ToolCalculator.node.js +0 -2
- package/dist/nodes/tools/ToolCalculator/ToolCalculator.node.js.map +1 -1
- package/dist/nodes/tools/ToolCode/ToolCode.node.js +0 -2
- package/dist/nodes/tools/ToolCode/ToolCode.node.js.map +1 -1
- package/dist/nodes/tools/ToolHttpRequest/ToolHttpRequest.node.js +0 -2
- package/dist/nodes/tools/ToolHttpRequest/ToolHttpRequest.node.js.map +1 -1
- package/dist/nodes/tools/ToolHttpRequest/utils.js.map +1 -1
- package/dist/nodes/tools/ToolSerpApi/ToolSerpApi.node.js +0 -2
- package/dist/nodes/tools/ToolSerpApi/ToolSerpApi.node.js.map +1 -1
- package/dist/nodes/tools/ToolThink/ToolThink.node.js.map +1 -1
- package/dist/nodes/tools/ToolVectorStore/ToolVectorStore.node.js +0 -2
- package/dist/nodes/tools/ToolVectorStore/ToolVectorStore.node.js.map +1 -1
- package/dist/nodes/tools/ToolWikipedia/ToolWikipedia.node.js +0 -2
- package/dist/nodes/tools/ToolWikipedia/ToolWikipedia.node.js.map +1 -1
- package/dist/nodes/tools/ToolWolframAlpha/ToolWolframAlpha.node.js +0 -2
- package/dist/nodes/tools/ToolWolframAlpha/ToolWolframAlpha.node.js.map +1 -1
- package/dist/nodes/tools/ToolWorkflow/v1/versionDescription.js +0 -2
- package/dist/nodes/tools/ToolWorkflow/v1/versionDescription.js.map +1 -1
- package/dist/nodes/tools/ToolWorkflow/v2/versionDescription.js.map +1 -1
- package/dist/nodes/trigger/ChatTrigger/ChatTrigger.node.js +215 -153
- package/dist/nodes/trigger/ChatTrigger/ChatTrigger.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStoreInMemory/VectorStoreInMemory.node.js +1 -1
- package/dist/nodes/vector_store/VectorStoreInMemory/VectorStoreInMemory.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStoreInMemoryInsert/VectorStoreInMemoryInsert.node.js +0 -1
- package/dist/nodes/vector_store/VectorStoreInMemoryInsert/VectorStoreInMemoryInsert.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStoreInMemoryLoad/VectorStoreInMemoryLoad.node.js +0 -1
- package/dist/nodes/vector_store/VectorStoreInMemoryLoad/VectorStoreInMemoryLoad.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStorePGVector/VectorStorePGVector.node.js +2 -2
- package/dist/nodes/vector_store/VectorStorePGVector/VectorStorePGVector.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStoreQdrant/VectorStoreQdrant.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStoreSupabaseLoad/VectorStoreSupabaseLoad.node.js.map +1 -1
- package/dist/nodes/vector_store/VectorStoreWeaviate/VectorStoreWeaviate.node.js +242 -0
- package/dist/nodes/vector_store/VectorStoreWeaviate/VectorStoreWeaviate.node.js.map +1 -0
- package/dist/nodes/vector_store/VectorStoreWeaviate/Weaviate.utils.js +127 -0
- package/dist/nodes/vector_store/VectorStoreWeaviate/Weaviate.utils.js.map +1 -0
- package/dist/nodes/vector_store/VectorStoreWeaviate/weaviate.svg +2 -0
- package/dist/nodes/vector_store/shared/createVectorStoreNode/createVectorStoreNode.js +0 -1
- package/dist/nodes/vector_store/shared/createVectorStoreNode/createVectorStoreNode.js.map +1 -1
- package/dist/nodes/vector_store/shared/createVectorStoreNode/methods/listSearch.js +15 -2
- package/dist/nodes/vector_store/shared/createVectorStoreNode/methods/listSearch.js.map +1 -1
- package/dist/nodes/vector_store/shared/descriptions.js +26 -2
- package/dist/nodes/vector_store/shared/descriptions.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/assistant/message.operation.js +0 -1
- package/dist/nodes/vendors/OpenAi/actions/assistant/message.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/versionDescription.js.map +1 -1
- package/dist/types/credentials.json +1 -0
- package/dist/types/nodes.json +4 -3
- package/dist/utils/httpProxyAgent.js +20 -6
- package/dist/utils/httpProxyAgent.js.map +1 -1
- package/dist/utils/logWrapper.js.map +1 -1
- package/package.json +21 -15
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\n\nimport { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getHttpProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatDeepSeek implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'DeepSeek Chat Model',\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased\n\t\tname: 'lmChatDeepSeek',\n\t\ticon: 'file:deepseek.svg',\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'DeepSeek Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatdeepseek/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node\n\t\tinputs: [],\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'deepSeekApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://api-docs.deepseek.com/quick_start/pricing\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'deepseek-chat',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('deepSeekApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\thttpAgent: getHttpProxyAgent(),\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\topenAIApiKey: credentials.apiKey,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,oBAA+C;AAC/C,0BAMO;AAEP,4BAAkC;AAClC,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,eAAoC;AAAA,EAA1C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA;AAAA,MAEA,QAAQ,CAAC;AAAA;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,aAAa;AAEvF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,eAAW,yCAAkB;AAAA,IAC9B;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,cAAc,YAAY;AAAA,MAC1B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatDeepSeek implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'DeepSeek Chat Model',\n\n\t\tname: 'lmChatDeepSeek',\n\t\ticon: 'file:deepseek.svg',\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'DeepSeek Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatdeepseek/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'deepSeekApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://api-docs.deepseek.com/quick_start/pricing\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'deepseek-chat',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('deepSeekApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\tfetchOptions: {\n\t\t\t\tdispatcher: getProxyAgent(credentials.url),\n\t\t\t},\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\topenAIApiKey: credentials.apiKey,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,eAAoC;AAAA,EAA1C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,aAAa;AAEvF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,cAAc;AAAA,QACb,gBAAY,qCAAc,YAAY,GAAG;AAAA,MAC1C;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,cAAc,YAAY;AAAA,MAC1B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -37,7 +37,6 @@ class LmChatGoogleGemini {
|
|
|
37
37
|
constructor() {
|
|
38
38
|
this.description = {
|
|
39
39
|
displayName: "Google Gemini Chat Model",
|
|
40
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
|
41
40
|
name: "lmChatGoogleGemini",
|
|
42
41
|
icon: "file:google.svg",
|
|
43
42
|
group: ["transform"],
|
|
@@ -60,9 +59,7 @@ class LmChatGoogleGemini {
|
|
|
60
59
|
]
|
|
61
60
|
}
|
|
62
61
|
},
|
|
63
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
64
62
|
inputs: [],
|
|
65
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
66
63
|
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
|
|
67
64
|
outputNames: ["Model"],
|
|
68
65
|
credentials: [
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts"],"sourcesContent":["
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts"],"sourcesContent":["import type { SafetySetting } from '@google/generative-ai';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tNodeError,\n\tINodeType,\n\tINodeTypeDescription,\n\tISupplyDataFunctions,\n\tSupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nfunction errorDescriptionMapper(error: NodeError) {\n\tif (error.description?.includes('properties: should be non-empty for OBJECT type')) {\n\t\treturn 'Google Gemini requires at least one <a href=\"https://docs.n8n.io/advanced-ai/examples/using-the-fromai-function/\" target=\"_blank\">dynamic parameter</a> when using tools';\n\t}\n\n\treturn error.description ?? 'Unknown error';\n}\nexport class LmChatGoogleGemini implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Gemini Chat Model',\n\n\t\tname: 'lmChatGoogleGemini',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Gemini',\n\t\tdefaults: {\n\t\t\tname: 'Google Gemini Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googlePalmApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials.host }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/v1beta/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'models',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: \"={{ !$responseItem.name.includes('embedding') }}\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tdescription: '={{$responseItem.description}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'models/gemini-2.5-flash',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googlePalmApi');\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 1024,\n\t\t\ttemperature: 0.7,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\tconst model = new ChatGoogleGenerativeAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tbaseUrl: credentials.host as string,\n\t\t\tmodel: modelName,\n\t\t\ttopK: options.topK,\n\t\t\ttopP: options.topP,\n\t\t\ttemperature: options.temperature,\n\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\tsafetySettings,\n\t\t\tcallbacks: [new N8nLlmTracing(this, { errorDescriptionMapper })],\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAuC;AACvC,0BAAoC;AASpC,0BAA6C;AAE7C,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAE9B,SAAS,uBAAuB,OAAkB;AACjD,MAAI,MAAM,aAAa,SAAS,iDAAiD,GAAG;AACnF,WAAO;AAAA,EACR;AAEA,SAAO,MAAM,eAAe;AAC7B;AACO,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,wBACP,aAAa;AAAA,sBACd;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,eAAe;AAE7D,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,2CAAuB;AAAA,MACxC,QAAQ,YAAY;AAAA,MACpB,SAAS,YAAY;AAAA,MACrB,OAAO;AAAA,MACP,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,aAAa,QAAQ;AAAA,MACrB,iBAAiB,QAAQ;AAAA,MACzB;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,MAAM,EAAE,uBAAuB,CAAC,CAAC;AAAA,MAC/D,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -34,7 +34,6 @@ class LmChatGoogleVertex {
|
|
|
34
34
|
constructor() {
|
|
35
35
|
this.description = {
|
|
36
36
|
displayName: "Google Vertex Chat Model",
|
|
37
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
|
38
37
|
name: "lmChatGoogleVertex",
|
|
39
38
|
icon: "file:google.svg",
|
|
40
39
|
group: ["transform"],
|
|
@@ -57,9 +56,7 @@ class LmChatGoogleVertex {
|
|
|
57
56
|
]
|
|
58
57
|
}
|
|
59
58
|
},
|
|
60
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
61
59
|
inputs: [],
|
|
62
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
63
60
|
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
|
|
64
61
|
outputNames: ["Model"],
|
|
65
62
|
credentials: [
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\nimport type { SafetySetting } from '@google/generative-ai';\nimport { ProjectsClient } from '@google-cloud/resource-manager';\nimport { ChatVertexAI } from '@langchain/google-vertexai';\nimport { formatPrivateKey } from 'n8n-nodes-base/dist/utils/utilities';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n\ttype ILoadOptionsFunctions,\n\ttype JsonObject,\n\tNodeOperationError,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { makeErrorFromStatus } from './error-handling';\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatGoogleVertex implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Vertex Chat Model',\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased\n\t\tname: 'lmChatGoogleVertex',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Vertex',\n\t\tdefaults: {\n\t\t\tname: 'Google Vertex Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node\n\t\tinputs: [],\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googleApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Project ID',\n\t\t\t\tname: 'projectId',\n\t\t\t\ttype: 'resourceLocator',\n\t\t\t\tdefault: { mode: 'list', value: '' },\n\t\t\t\trequired: true,\n\t\t\t\tdescription: 'Select or enter your Google Cloud project ID',\n\t\t\t\tmodes: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'From List',\n\t\t\t\t\t\tname: 'list',\n\t\t\t\t\t\ttype: 'list',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tsearchListMethod: 'gcpProjectsList',\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'ID',\n\t\t\t\t\t\tname: 'id',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model Name',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'string',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.',\n\t\t\t\tdefault: 'gemini-1.5-flash',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tmethods = {\n\t\tlistSearch: {\n\t\t\tasync gcpProjectsList(this: ILoadOptionsFunctions) {\n\t\t\t\tconst results: Array<{ name: string; value: string }> = [];\n\n\t\t\t\tconst credentials = await this.getCredentials('googleApi');\n\t\t\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\t\t\tconst email = (credentials.email as string).trim();\n\n\t\t\t\tconst client = new ProjectsClient({\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t});\n\n\t\t\t\tconst [projects] = await client.searchProjects();\n\n\t\t\t\tfor (const project of projects) {\n\t\t\t\t\tif (project.projectId) {\n\t\t\t\t\t\tresults.push({\n\t\t\t\t\t\t\tname: project.displayName ?? project.projectId,\n\t\t\t\t\t\t\tvalue: project.projectId,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn { results };\n\t\t\t},\n\t\t},\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googleApi');\n\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\tconst email = (credentials.email as string).trim();\n\t\tconst region = credentials.region as string;\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\n\t\tconst projectId = this.getNodeParameter('projectId', itemIndex, '', {\n\t\t\textractValue: true,\n\t\t}) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 2048,\n\t\t\ttemperature: 0.4,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\ttry {\n\t\t\tconst model = new ChatVertexAI({\n\t\t\t\tauthOptions: {\n\t\t\t\t\tprojectId,\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlocation: region,\n\t\t\t\tmodel: modelName,\n\t\t\t\ttopK: options.topK,\n\t\t\t\ttopP: options.topP,\n\t\t\t\ttemperature: options.temperature,\n\t\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\t\tsafetySettings,\n\t\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\t\t// Handle ChatVertexAI invocation errors to provide better error messages\n\t\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, (error: any) => {\n\t\t\t\t\t// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access\n\t\t\t\t\tconst customError = makeErrorFromStatus(Number(error?.response?.status), {\n\t\t\t\t\t\tmodelName,\n\t\t\t\t\t});\n\n\t\t\t\t\tif (customError) {\n\t\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error as JsonObject, customError);\n\t\t\t\t\t}\n\n\t\t\t\t\tthrow error;\n\t\t\t\t}),\n\t\t\t});\n\n\t\t\treturn {\n\t\t\t\tresponse: model,\n\t\t\t};\n\t\t} catch (e) {\n\t\t\t// Catch model name validation error from LangChain (https://github.com/langchain-ai/langchainjs/blob/ef201d0ee85ee4049078270a0cfd7a1767e624f8/libs/langchain-google-common/src/utils/common.ts#L124)\n\t\t\t// to show more helpful error message\n\t\t\tif (e?.message?.startsWith('Unable to verify model params')) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\t\tmessage: 'Unsupported model',\n\t\t\t\t\tdescription: \"Only models starting with 'gemini' are supported.\",\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Assume all other exceptions while creating a new ChatVertexAI instance are parameter validation errors\n\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\tmessage: 'Invalid options',\n\t\t\t\tdescription: e.message,\n\t\t\t});\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,8BAA+B;AAC/B,6BAA6B;AAC7B,uBAAiC;AACjC,0BASO;AAEP,0BAA6C;AAE7C,4BAAoC;AACpC,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA;AAAA,MAEA,QAAQ,CAAC;AAAA;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,EAAE,MAAM,QAAQ,OAAO,GAAG;AAAA,UACnC,UAAU;AAAA,UACV,aAAa;AAAA,UACb,OAAO;AAAA,YACN;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,kBAAkB;AAAA,cACnB;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAEA,mBAAU;AAAA,MACT,YAAY;AAAA,QACX,MAAM,kBAA6C;AAClD,gBAAM,UAAkD,CAAC;AAEzD,gBAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,gBAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,gBAAM,QAAS,YAAY,MAAiB,KAAK;AAEjD,gBAAM,SAAS,IAAI,uCAAe;AAAA,YACjC,aAAa;AAAA,cACZ,cAAc;AAAA,cACd,aAAa;AAAA,YACd;AAAA,UACD,CAAC;AAED,gBAAM,CAAC,QAAQ,IAAI,MAAM,OAAO,eAAe;AAE/C,qBAAW,WAAW,UAAU;AAC/B,gBAAI,QAAQ,WAAW;AACtB,sBAAQ,KAAK;AAAA,gBACZ,MAAM,QAAQ,eAAe,QAAQ;AAAA,gBACrC,OAAO,QAAQ;AAAA,cAChB,CAAC;AAAA,YACF;AAAA,UACD;AAEA,iBAAO,EAAE,QAAQ;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,UAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,UAAM,QAAS,YAAY,MAAiB,KAAK;AACjD,UAAM,SAAS,YAAY;AAE3B,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAE9D,UAAM,YAAY,KAAK,iBAAiB,aAAa,WAAW,IAAI;AAAA,MACnE,cAAc;AAAA,IACf,CAAC;AAED,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,QAAI;AACH,YAAM,QAAQ,IAAI,oCAAa;AAAA,QAC9B,aAAa;AAAA,UACZ;AAAA,UACA,aAAa;AAAA,YACZ,cAAc;AAAA,YACd,aAAa;AAAA,UACd;AAAA,QACD;AAAA,QACA,UAAU;AAAA,QACV,OAAO;AAAA,QACP,MAAM,QAAQ;AAAA,QACd,MAAM,QAAQ;AAAA,QACd,aAAa,QAAQ;AAAA,QACrB,iBAAiB,QAAQ;AAAA,QACzB;AAAA,QACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA;AAAA,QAEnC,qBAAiB,kEAA+B,MAAM,CAAC,UAAe;AAErE,gBAAM,kBAAc,2CAAoB,OAAO,OAAO,UAAU,MAAM,GAAG;AAAA,YACxE;AAAA,UACD,CAAC;AAED,cAAI,aAAa;AAChB,kBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,OAAqB,WAAW;AAAA,UAC9E;AAEA,gBAAM;AAAA,QACP,CAAC;AAAA,MACF,CAAC;AAED,aAAO;AAAA,QACN,UAAU;AAAA,MACX;AAAA,IACD,SAAS,GAAG;AAGX,UAAI,GAAG,SAAS,WAAW,+BAA+B,GAAG;AAC5D,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,UAC7D,SAAS;AAAA,UACT,aAAa;AAAA,QACd,CAAC;AAAA,MACF;AAGA,YAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,QAC7D,SAAS;AAAA,QACT,aAAa,EAAE;AAAA,MAChB,CAAC;AAAA,IACF;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.ts"],"sourcesContent":["import type { SafetySetting } from '@google/generative-ai';\nimport { ProjectsClient } from '@google-cloud/resource-manager';\nimport { ChatVertexAI } from '@langchain/google-vertexai';\nimport { formatPrivateKey } from 'n8n-nodes-base/dist/utils/utilities';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n\ttype ILoadOptionsFunctions,\n\ttype JsonObject,\n\tNodeOperationError,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { makeErrorFromStatus } from './error-handling';\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatGoogleVertex implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Vertex Chat Model',\n\n\t\tname: 'lmChatGoogleVertex',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Vertex',\n\t\tdefaults: {\n\t\t\tname: 'Google Vertex Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googleApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Project ID',\n\t\t\t\tname: 'projectId',\n\t\t\t\ttype: 'resourceLocator',\n\t\t\t\tdefault: { mode: 'list', value: '' },\n\t\t\t\trequired: true,\n\t\t\t\tdescription: 'Select or enter your Google Cloud project ID',\n\t\t\t\tmodes: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'From List',\n\t\t\t\t\t\tname: 'list',\n\t\t\t\t\t\ttype: 'list',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tsearchListMethod: 'gcpProjectsList',\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'ID',\n\t\t\t\t\t\tname: 'id',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model Name',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'string',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.',\n\t\t\t\tdefault: 'gemini-1.5-flash',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tmethods = {\n\t\tlistSearch: {\n\t\t\tasync gcpProjectsList(this: ILoadOptionsFunctions) {\n\t\t\t\tconst results: Array<{ name: string; value: string }> = [];\n\n\t\t\t\tconst credentials = await this.getCredentials('googleApi');\n\t\t\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\t\t\tconst email = (credentials.email as string).trim();\n\n\t\t\t\tconst client = new ProjectsClient({\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t});\n\n\t\t\t\tconst [projects] = await client.searchProjects();\n\n\t\t\t\tfor (const project of projects) {\n\t\t\t\t\tif (project.projectId) {\n\t\t\t\t\t\tresults.push({\n\t\t\t\t\t\t\tname: project.displayName ?? project.projectId,\n\t\t\t\t\t\t\tvalue: project.projectId,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn { results };\n\t\t\t},\n\t\t},\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googleApi');\n\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\tconst email = (credentials.email as string).trim();\n\t\tconst region = credentials.region as string;\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\n\t\tconst projectId = this.getNodeParameter('projectId', itemIndex, '', {\n\t\t\textractValue: true,\n\t\t}) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 2048,\n\t\t\ttemperature: 0.4,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\ttry {\n\t\t\tconst model = new ChatVertexAI({\n\t\t\t\tauthOptions: {\n\t\t\t\t\tprojectId,\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlocation: region,\n\t\t\t\tmodel: modelName,\n\t\t\t\ttopK: options.topK,\n\t\t\t\ttopP: options.topP,\n\t\t\t\ttemperature: options.temperature,\n\t\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\t\tsafetySettings,\n\t\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\t\t// Handle ChatVertexAI invocation errors to provide better error messages\n\t\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, (error: any) => {\n\t\t\t\t\t// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access\n\t\t\t\t\tconst customError = makeErrorFromStatus(Number(error?.response?.status), {\n\t\t\t\t\t\tmodelName,\n\t\t\t\t\t});\n\n\t\t\t\t\tif (customError) {\n\t\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error as JsonObject, customError);\n\t\t\t\t\t}\n\n\t\t\t\t\tthrow error;\n\t\t\t\t}),\n\t\t\t});\n\n\t\t\treturn {\n\t\t\t\tresponse: model,\n\t\t\t};\n\t\t} catch (e) {\n\t\t\t// Catch model name validation error from LangChain (https://github.com/langchain-ai/langchainjs/blob/ef201d0ee85ee4049078270a0cfd7a1767e624f8/libs/langchain-google-common/src/utils/common.ts#L124)\n\t\t\t// to show more helpful error message\n\t\t\tif (e?.message?.startsWith('Unable to verify model params')) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\t\tmessage: 'Unsupported model',\n\t\t\t\t\tdescription: \"Only models starting with 'gemini' are supported.\",\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Assume all other exceptions while creating a new ChatVertexAI instance are parameter validation errors\n\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\tmessage: 'Invalid options',\n\t\t\t\tdescription: e.message,\n\t\t\t});\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,8BAA+B;AAC/B,6BAA6B;AAC7B,uBAAiC;AACjC,0BASO;AAEP,0BAA6C;AAE7C,4BAAoC;AACpC,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,EAAE,MAAM,QAAQ,OAAO,GAAG;AAAA,UACnC,UAAU;AAAA,UACV,aAAa;AAAA,UACb,OAAO;AAAA,YACN;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,kBAAkB;AAAA,cACnB;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAEA,mBAAU;AAAA,MACT,YAAY;AAAA,QACX,MAAM,kBAA6C;AAClD,gBAAM,UAAkD,CAAC;AAEzD,gBAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,gBAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,gBAAM,QAAS,YAAY,MAAiB,KAAK;AAEjD,gBAAM,SAAS,IAAI,uCAAe;AAAA,YACjC,aAAa;AAAA,cACZ,cAAc;AAAA,cACd,aAAa;AAAA,YACd;AAAA,UACD,CAAC;AAED,gBAAM,CAAC,QAAQ,IAAI,MAAM,OAAO,eAAe;AAE/C,qBAAW,WAAW,UAAU;AAC/B,gBAAI,QAAQ,WAAW;AACtB,sBAAQ,KAAK;AAAA,gBACZ,MAAM,QAAQ,eAAe,QAAQ;AAAA,gBACrC,OAAO,QAAQ;AAAA,cAChB,CAAC;AAAA,YACF;AAAA,UACD;AAEA,iBAAO,EAAE,QAAQ;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,UAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,UAAM,QAAS,YAAY,MAAiB,KAAK;AACjD,UAAM,SAAS,YAAY;AAE3B,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAE9D,UAAM,YAAY,KAAK,iBAAiB,aAAa,WAAW,IAAI;AAAA,MACnE,cAAc;AAAA,IACf,CAAC;AAED,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,QAAI;AACH,YAAM,QAAQ,IAAI,oCAAa;AAAA,QAC9B,aAAa;AAAA,UACZ;AAAA,UACA,aAAa;AAAA,YACZ,cAAc;AAAA,YACd,aAAa;AAAA,UACd;AAAA,QACD;AAAA,QACA,UAAU;AAAA,QACV,OAAO;AAAA,QACP,MAAM,QAAQ;AAAA,QACd,MAAM,QAAQ;AAAA,QACd,aAAa,QAAQ;AAAA,QACrB,iBAAiB,QAAQ;AAAA,QACzB;AAAA,QACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA;AAAA,QAEnC,qBAAiB,kEAA+B,MAAM,CAAC,UAAe;AAErE,gBAAM,kBAAc,2CAAoB,OAAO,OAAO,UAAU,MAAM,GAAG;AAAA,YACxE;AAAA,UACD,CAAC;AAED,cAAI,aAAa;AAChB,kBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,OAAqB,WAAW;AAAA,UAC9E;AAEA,gBAAM;AAAA,QACP,CAAC;AAAA,MACF,CAAC;AAED,aAAO;AAAA,QACN,UAAU;AAAA,MACX;AAAA,IACD,SAAS,GAAG;AAGX,UAAI,GAAG,SAAS,WAAW,+BAA+B,GAAG;AAC5D,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,UAC7D,SAAS;AAAA,UACT,aAAa;AAAA,QACd,CAAC;AAAA,MACF;AAGA,YAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,QAC7D,SAAS;AAAA,QACT,aAAa,EAAE;AAAA,MAChB,CAAC;AAAA,IACF;AAAA,EACD;AACD;","names":[]}
|
|
@@ -31,7 +31,6 @@ class LmChatGroq {
|
|
|
31
31
|
constructor() {
|
|
32
32
|
this.description = {
|
|
33
33
|
displayName: "Groq Chat Model",
|
|
34
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
|
35
34
|
name: "lmChatGroq",
|
|
36
35
|
icon: "file:groq.svg",
|
|
37
36
|
group: ["transform"],
|
|
@@ -54,9 +53,7 @@ class LmChatGroq {
|
|
|
54
53
|
]
|
|
55
54
|
}
|
|
56
55
|
},
|
|
57
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
58
56
|
inputs: [],
|
|
59
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
60
57
|
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
|
|
61
58
|
outputNames: ["Model"],
|
|
62
59
|
credentials: [
|
|
@@ -154,7 +151,7 @@ class LmChatGroq {
|
|
|
154
151
|
maxTokens: options.maxTokensToSample,
|
|
155
152
|
temperature: options.temperature,
|
|
156
153
|
callbacks: [new import_N8nLlmTracing.N8nLlmTracing(this)],
|
|
157
|
-
httpAgent: (0, import_httpProxyAgent.
|
|
154
|
+
httpAgent: (0, import_httpProxyAgent.getProxyAgent)("https://api.groq.com/openai/v1"),
|
|
158
155
|
onFailedAttempt: (0, import_n8nLlmFailedAttemptHandler.makeN8nLlmFailedAttemptHandler)(this)
|
|
159
156
|
});
|
|
160
157
|
return {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatGroq/LmChatGroq.node.ts"],"sourcesContent":["
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatGroq/LmChatGroq.node.ts"],"sourcesContent":["import { ChatGroq } from '@langchain/groq';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatGroq implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Groq Chat Model',\n\n\t\tname: 'lmChatGroq',\n\t\ticon: 'file:groq.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Language Model Groq',\n\t\tdefaults: {\n\t\t\tname: 'Groq Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgroq/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'groqApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tbaseURL: 'https://api.groq.com/openai/v1',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiChain]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: '={{ $responseItem.active === true && $responseItem.object === \"model\" }}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://console.groq.com/docs/models\">Learn more</a>.',\n\t\t\t\tdefault: 'llama3-8b-8192',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokensToSample',\n\t\t\t\t\t\tdefault: 4096,\n\t\t\t\t\t\tdescription: 'The maximum number of tokens to generate in the completion',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('groqApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tmaxTokensToSample?: number;\n\t\t\ttemperature: number;\n\t\t};\n\n\t\tconst model = new ChatGroq({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tmodel: modelName,\n\t\t\tmaxTokens: options.maxTokensToSample,\n\t\t\ttemperature: options.temperature,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\thttpAgent: getProxyAgent('https://api.groq.com/openai/v1'),\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAAyB;AACzB,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAE7C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,WAAgC;AAAA,EAAtC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,aACC;AAAA,UACD,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,SAAS;AAEvD,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAC1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAK9D,UAAM,QAAQ,IAAI,qBAAS;AAAA,MAC1B,QAAQ,YAAY;AAAA,MACpB,OAAO;AAAA,MACP,WAAW,QAAQ;AAAA,MACnB,aAAa,QAAQ;AAAA,MACrB,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,eAAW,qCAAc,gCAAgC;AAAA,MACzD,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -30,7 +30,6 @@ class LmChatMistralCloud {
|
|
|
30
30
|
constructor() {
|
|
31
31
|
this.description = {
|
|
32
32
|
displayName: "Mistral Cloud Chat Model",
|
|
33
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
|
34
33
|
name: "lmChatMistralCloud",
|
|
35
34
|
icon: "file:mistral.svg",
|
|
36
35
|
group: ["transform"],
|
|
@@ -53,9 +52,7 @@ class LmChatMistralCloud {
|
|
|
53
52
|
]
|
|
54
53
|
}
|
|
55
54
|
},
|
|
56
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
57
55
|
inputs: [],
|
|
58
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
59
56
|
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
|
|
60
57
|
outputNames: ["Model"],
|
|
61
58
|
credentials: [
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.ts"],"sourcesContent":["
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.ts"],"sourcesContent":["import type { ChatMistralAIInput } from '@langchain/mistralai';\nimport { ChatMistralAI } from '@langchain/mistralai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatMistralCloud implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Mistral Cloud Chat Model',\n\n\t\tname: 'lmChatMistralCloud',\n\t\ticon: 'file:mistral.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'Mistral Cloud Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatmistralcloud/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'mistralCloudApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: 'https://api.mistral.ai/v1',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://docs.mistral.ai/platform/endpoints/\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: \"={{ !$responseItem.id.includes('embed') }}\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{ $responseItem.id }}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{ $responseItem.id }}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'mistral-small',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Enable Safe Mode',\n\t\t\t\t\t\tname: 'safeMode',\n\t\t\t\t\t\tdefault: false,\n\t\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\t\tdescription: 'Whether to inject a safety prompt before all conversations',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Random Seed',\n\t\t\t\t\t\tname: 'randomSeed',\n\t\t\t\t\t\tdefault: undefined,\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The seed to use for random sampling. If set, different calls will generate deterministic results.',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('mistralCloudApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxRetries: 2,\n\t\t\ttopP: 1,\n\t\t\ttemperature: 0.7,\n\t\t\tmaxTokens: -1,\n\t\t\tsafeMode: false,\n\t\t\trandomSeed: undefined,\n\t\t}) as Partial<ChatMistralAIInput>;\n\n\t\tconst model = new ChatMistralAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,uBAA8B;AAC9B,0BAMO;AAEP,0BAA6C;AAE7C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,aAAa;AAAA,YACd;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,aACC;AAAA,YACF;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,iBAAiB;AAE/D,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAC1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,YAAY;AAAA,MACZ,MAAM;AAAA,MACN,aAAa;AAAA,MACb,WAAW;AAAA,MACX,UAAU;AAAA,MACV,YAAY;AAAA,IACb,CAAC;AAED,UAAM,QAAQ,IAAI,+BAAc;AAAA,MAC/B,QAAQ,YAAY;AAAA,MACpB,OAAO;AAAA,MACP,GAAG;AAAA,MACH,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -54,9 +54,7 @@ class LmChatOpenRouter {
|
|
|
54
54
|
]
|
|
55
55
|
}
|
|
56
56
|
},
|
|
57
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
58
57
|
inputs: [],
|
|
59
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
60
58
|
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
|
|
61
59
|
outputNames: ["Model"],
|
|
62
60
|
credentials: [
|
|
@@ -221,7 +219,9 @@ class LmChatOpenRouter {
|
|
|
221
219
|
const options = this.getNodeParameter("options", itemIndex, {});
|
|
222
220
|
const configuration = {
|
|
223
221
|
baseURL: credentials.url,
|
|
224
|
-
|
|
222
|
+
fetchOptions: {
|
|
223
|
+
dispatcher: (0, import_httpProxyAgent.getProxyAgent)(credentials.url)
|
|
224
|
+
}
|
|
225
225
|
};
|
|
226
226
|
const model = new import_openai.ChatOpenAI({
|
|
227
227
|
openAIApiKey: credentials.apiKey,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\n\nimport { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getHttpProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatOpenRouter implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'OpenRouter Chat Model',\n\t\tname: 'lmChatOpenRouter',\n\t\ticon: { light: 'file:openrouter.svg', dark: 'file:openrouter.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'OpenRouter Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenrouter/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node\n\t\tinputs: [],\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'openRouterApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://openrouter.ai/docs/models\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'openai/gpt-4.1-mini',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('openRouterApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\thttpAgent: getHttpProxyAgent(),\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\topenAIApiKey: credentials.apiKey,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,oBAA+C;AAC/C,0BAMO;AAEP,4BAAkC;AAClC,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,iBAAsC;AAAA,EAA5C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,uBAAuB,MAAM,2BAA2B;AAAA,MACvE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA;AAAA,MAEA,QAAQ,CAAC;AAAA;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,eAAe;AAEzF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,eAAW,yCAAkB;AAAA,IAC9B;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,cAAc,YAAY;AAAA,MAC1B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatOpenRouter implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'OpenRouter Chat Model',\n\t\tname: 'lmChatOpenRouter',\n\t\ticon: { light: 'file:openrouter.svg', dark: 'file:openrouter.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'OpenRouter Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenrouter/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'openRouterApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://openrouter.ai/docs/models\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'openai/gpt-4.1-mini',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('openRouterApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\tfetchOptions: {\n\t\t\t\tdispatcher: getProxyAgent(credentials.url),\n\t\t\t},\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\topenAIApiKey: credentials.apiKey,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,iBAAsC;AAAA,EAA5C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,uBAAuB,MAAM,2BAA2B;AAAA,MACvE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,eAAe;AAEzF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,cAAc;AAAA,QACb,gBAAY,qCAAc,YAAY,GAAG;AAAA,MAC1C;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,cAAc,YAAY;AAAA,MAC1B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -32,7 +32,6 @@ class LmChatXAiGrok {
|
|
|
32
32
|
constructor() {
|
|
33
33
|
this.description = {
|
|
34
34
|
displayName: "xAI Grok Chat Model",
|
|
35
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
|
36
35
|
name: "lmChatXAiGrok",
|
|
37
36
|
icon: { light: "file:logo.dark.svg", dark: "file:logo.svg" },
|
|
38
37
|
group: ["transform"],
|
|
@@ -55,9 +54,7 @@ class LmChatXAiGrok {
|
|
|
55
54
|
]
|
|
56
55
|
}
|
|
57
56
|
},
|
|
58
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
59
57
|
inputs: [],
|
|
60
|
-
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
61
58
|
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
|
|
62
59
|
outputNames: ["Model"],
|
|
63
60
|
credentials: [
|
|
@@ -222,7 +219,9 @@ class LmChatXAiGrok {
|
|
|
222
219
|
const options = this.getNodeParameter("options", itemIndex, {});
|
|
223
220
|
const configuration = {
|
|
224
221
|
baseURL: credentials.url,
|
|
225
|
-
|
|
222
|
+
fetchOptions: {
|
|
223
|
+
dispatcher: (0, import_httpProxyAgent.getProxyAgent)(credentials.url)
|
|
224
|
+
}
|
|
226
225
|
};
|
|
227
226
|
const model = new import_openai.ChatOpenAI({
|
|
228
227
|
openAIApiKey: credentials.apiKey,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-dirname-against-convention */\n\nimport { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getHttpProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatXAiGrok implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'xAI Grok Chat Model',\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased\n\t\tname: 'lmChatXAiGrok',\n\t\ticon: { light: 'file:logo.dark.svg', dark: 'file:logo.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'xAI Grok Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatxaigrok/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node\n\t\tinputs: [],\n\t\t// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'xAiApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://docs.x.ai/docs/models\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'grok-2-vision-1212',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('xAiApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\thttpAgent: getHttpProxyAgent(),\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\topenAIApiKey: credentials.apiKey,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: {\n\t\t\t\tstream_options: undefined,\n\t\t\t\t...(options.responseFormat\n\t\t\t\t\t? {\n\t\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t\t}\n\t\t\t\t\t: undefined),\n\t\t\t},\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,oBAA+C;AAC/C,0BAMO;AAEP,4BAAkC;AAClC,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,cAAmC;AAAA,EAAzC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA;AAAA,MAEb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,sBAAsB,MAAM,gBAAgB;AAAA,MAC3D,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA;AAAA,MAEA,QAAQ,CAAC;AAAA;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,QAAQ;AAElF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,eAAW,yCAAkB;AAAA,IAC9B;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,cAAc,YAAY;AAAA,MAC1B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa;AAAA,QACZ,gBAAgB;AAAA,QAChB,GAAI,QAAQ,iBACT;AAAA,UACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,QACjD,IACC;AAAA,MACJ;AAAA,MACA,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatXAiGrok implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'xAI Grok Chat Model',\n\n\t\tname: 'lmChatXAiGrok',\n\t\ticon: { light: 'file:logo.dark.svg', dark: 'file:logo.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'xAI Grok Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatxaigrok/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'xAiApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://docs.x.ai/docs/models\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'grok-2-vision-1212',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('xAiApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\tfetchOptions: {\n\t\t\t\tdispatcher: getProxyAgent(credentials.url),\n\t\t\t},\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\topenAIApiKey: credentials.apiKey,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: {\n\t\t\t\tstream_options: undefined,\n\t\t\t\t...(options.responseFormat\n\t\t\t\t\t? {\n\t\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t\t}\n\t\t\t\t\t: undefined),\n\t\t\t},\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,cAAmC;AAAA,EAAzC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,sBAAsB,MAAM,gBAAgB;AAAA,MAC3D,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,QAAQ;AAElF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,cAAc;AAAA,QACb,gBAAY,qCAAc,YAAY,GAAG;AAAA,MAC1C;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,cAAc,YAAY;AAAA,MAC1B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa;AAAA,QACZ,gBAAgB;AAAA,QAChB,GAAI,QAAQ,iBACT;AAAA,UACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,QACjD,IACC;AAAA,MACJ;AAAA,MACA,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../nodes/llms/N8nLlmTracing.ts"],"sourcesContent":["import { BaseCallbackHandler } from '@langchain/core/callbacks/base';\nimport type { SerializedFields } from '@langchain/core/dist/load/map_keys';\nimport { getModelNameForTiktoken } from '@langchain/core/language_models/base';\nimport type {\n\tSerialized,\n\tSerializedNotImplemented,\n\tSerializedSecret,\n} from '@langchain/core/load/serializable';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { LLMResult } from '@langchain/core/outputs';\nimport pick from 'lodash/pick';\nimport type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';\nimport { NodeConnectionTypes, NodeError, NodeOperationError } from 'n8n-workflow';\n\nimport { logAiEvent } from '@utils/helpers';\nimport { estimateTokensFromStringList } from '@utils/tokenizer/token-estimator';\n\ntype TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {\n\tcompletionTokens: number;\n\tpromptTokens: number;\n\ttotalTokens: number;\n};\n\ntype RunDetail = {\n\tindex: number;\n\tmessages: BaseMessage[] | string[] | string;\n\toptions: SerializedSecret | SerializedNotImplemented | SerializedFields;\n};\n\nconst TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';\nexport class N8nLlmTracing extends BaseCallbackHandler {\n\tname = 'N8nLlmTracing';\n\n\t// This flag makes sure that LangChain will wait for the handlers to finish before continuing\n\t// This is crucial for the handleLLMError handler to work correctly (it should be called before the error is propagated to the root node)\n\tawaitHandlers = true;\n\n\tconnectionType = NodeConnectionTypes.AiLanguageModel;\n\n\tpromptTokensEstimate = 0;\n\n\tcompletionTokensEstimate = 0;\n\n\t#parentRunIndex?: number;\n\n\t/**\n\t * A map to associate LLM run IDs to run details.\n\t * Key: Unique identifier for each LLM run (run ID)\n\t * Value: RunDetails object\n\t *\n\t */\n\trunsMap: Record<string, RunDetail> = {};\n\n\toptions = {\n\t\t// Default(OpenAI format) parser\n\t\ttokensUsageParser: (llmOutput: LLMResult['llmOutput']) => {\n\t\t\tconst completionTokens = (llmOutput?.tokenUsage?.completionTokens as number) ?? 0;\n\t\t\tconst promptTokens = (llmOutput?.tokenUsage?.promptTokens as number) ?? 0;\n\n\t\t\treturn {\n\t\t\t\tcompletionTokens,\n\t\t\t\tpromptTokens,\n\t\t\t\ttotalTokens: completionTokens + promptTokens,\n\t\t\t};\n\t\t},\n\t\terrorDescriptionMapper: (error: NodeError) => error.description,\n\t};\n\n\tconstructor(\n\t\tprivate executionFunctions: ISupplyDataFunctions,\n\t\toptions?: {\n\t\t\ttokensUsageParser?: TokensUsageParser;\n\t\t\terrorDescriptionMapper?: (error: NodeError) => string;\n\t\t},\n\t) {\n\t\tsuper();\n\t\tthis.options = { ...this.options, ...options };\n\t}\n\n\tasync estimateTokensFromGeneration(generations: LLMResult['generations']) {\n\t\tconst messages = generations.flatMap((gen) => gen.map((g) => g.text));\n\t\treturn await this.estimateTokensFromStringList(messages);\n\t}\n\n\tasync estimateTokensFromStringList(list: string[]) {\n\t\tconst embeddingModel = getModelNameForTiktoken(TIKTOKEN_ESTIMATE_MODEL);\n\t\treturn await estimateTokensFromStringList(list, embeddingModel);\n\t}\n\n\tasync handleLLMEnd(output: LLMResult, runId: string) {\n\t\t// The fallback should never happen since handleLLMStart should always set the run details\n\t\t// but just in case, we set the index to the length of the runsMap\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\toutput.generations = output.generations.map((gen) =>\n\t\t\tgen.map((g) => pick(g, ['text', 'generationInfo'])),\n\t\t);\n\n\t\tconst tokenUsageEstimate = {\n\t\t\tcompletionTokens: 0,\n\t\t\tpromptTokens: 0,\n\t\t\ttotalTokens: 0,\n\t\t};\n\t\tconst tokenUsage = this.options.tokensUsageParser(output.llmOutput);\n\n\t\tif (output.generations.length > 0) {\n\t\t\ttokenUsageEstimate.completionTokens = await this.estimateTokensFromGeneration(\n\t\t\t\toutput.generations,\n\t\t\t);\n\n\t\t\ttokenUsageEstimate.promptTokens = this.promptTokensEstimate;\n\t\t\ttokenUsageEstimate.totalTokens =\n\t\t\t\ttokenUsageEstimate.completionTokens + this.promptTokensEstimate;\n\t\t}\n\t\tconst response: {\n\t\t\tresponse: { generations: LLMResult['generations'] };\n\t\t\ttokenUsageEstimate?: typeof tokenUsageEstimate;\n\t\t\ttokenUsage?: typeof tokenUsage;\n\t\t} = {\n\t\t\tresponse: { generations: output.generations },\n\t\t};\n\n\t\t// If the LLM response contains actual tokens usage, otherwise fallback to the estimate\n\t\tif (tokenUsage.completionTokens > 0) {\n\t\t\tresponse.tokenUsage = tokenUsage;\n\t\t} else {\n\t\t\tresponse.tokenUsageEstimate = tokenUsageEstimate;\n\t\t}\n\n\t\tconst parsedMessages =\n\t\t\ttypeof runDetails.messages === 'string'\n\t\t\t\t? runDetails.messages\n\t\t\t\t: runDetails.messages.map((message) => {\n\t\t\t\t\t\tif (typeof message === 'string') return message;\n\t\t\t\t\t\tif (typeof message?.toJSON === 'function') return message.toJSON();\n\n\t\t\t\t\t\treturn message;\n\t\t\t\t\t});\n\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined ? this.#parentRunIndex + runDetails.index : undefined;\n\n\t\tthis.executionFunctions.addOutputData(\n\t\t\tthis.connectionType,\n\t\t\trunDetails.index,\n\t\t\t[[{ json: { ...response } }]],\n\t\t\tundefined,\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-generated-output', {\n\t\t\tmessages: parsedMessages,\n\t\t\toptions: runDetails.options,\n\t\t\tresponse,\n\t\t});\n\t}\n\n\tasync handleLLMStart(llm: Serialized, prompts: string[], runId: string) {\n\t\tconst estimatedTokens = await this.estimateTokensFromStringList(prompts);\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined\n\t\t\t\t? this.#parentRunIndex + this.executionFunctions.getNextRunIndex()\n\t\t\t\t: undefined;\n\n\t\tconst options = llm.type === 'constructor' ? llm.kwargs : llm;\n\t\tconst { index } = this.executionFunctions.addInputData(\n\t\t\tthis.connectionType,\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\tjson: {\n\t\t\t\t\t\t\tmessages: prompts,\n\t\t\t\t\t\t\testimatedTokens,\n\t\t\t\t\t\t\toptions,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t],\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\t// Save the run details for later use when processing `handleLLMEnd` event\n\t\tthis.runsMap[runId] = {\n\t\t\tindex,\n\t\t\toptions,\n\t\t\tmessages: prompts,\n\t\t};\n\t\tthis.promptTokensEstimate = estimatedTokens;\n\t}\n\n\tasync handleLLMError(\n\t\terror: IDataObject | Error,\n\t\trunId: string,\n\t\tparentRunId?: string | undefined,\n\t) {\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\t// Filter out non-x- headers to avoid leaking sensitive information in logs\n\t\tif (typeof error === 'object' && error?.hasOwnProperty('headers')) {\n\t\t\tconst errorWithHeaders = error as { headers: Record<string, unknown> };\n\n\t\t\tObject.keys(errorWithHeaders.headers).forEach((key) => {\n\t\t\t\tif (!key.startsWith('x-')) {\n\t\t\t\t\tdelete errorWithHeaders.headers[key];\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\n\t\tif (error instanceof NodeError) {\n\t\t\tif (this.options.errorDescriptionMapper) {\n\t\t\t\terror.description = this.options.errorDescriptionMapper(error);\n\t\t\t}\n\n\t\t\tthis.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);\n\t\t} else {\n\t\t\t// If the error is not a NodeError, we wrap it in a NodeOperationError\n\t\t\tthis.executionFunctions.addOutputData(\n\t\t\t\tthis.connectionType,\n\t\t\t\trunDetails.index,\n\t\t\t\tnew NodeOperationError(this.executionFunctions.getNode(), error as JsonObject, {\n\t\t\t\t\tfunctionality: 'configuration-node',\n\t\t\t\t}),\n\t\t\t);\n\t\t}\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-errored', {\n\t\t\terror: Object.keys(error).length === 0 ? error.toString() : error,\n\t\t\trunId,\n\t\t\tparentRunId,\n\t\t});\n\t}\n\n\t// Used to associate subsequent runs with the correct parent run in subnodes of subnodes\n\tsetParentRunIndex(runIndex: number) {\n\t\tthis.#parentRunIndex = runIndex;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAAoC;AAEpC,IAAAA,eAAwC;AAQxC,kBAAiB;AAEjB,0BAAmE;AAEnE,qBAA2B;AAC3B,6BAA6C;AAf7C;AA6BA,MAAM,0BAA0B;AACzB,MAAM,sBAAsB,gCAAoB;AAAA,EAsCtD,YACS,oBACR,SAIC;AACD,UAAM;AANE;AAtCT,gBAAO;AAIP;AAAA;AAAA,yBAAgB;AAEhB,0BAAiB,wCAAoB;AAErC,gCAAuB;AAEvB,oCAA2B;AAE3B;AAQA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,mBAAqC,CAAC;AAEtC,mBAAU;AAAA;AAAA,MAET,mBAAmB,CAAC,cAAsC;AACzD,cAAM,mBAAoB,WAAW,YAAY,oBAA+B;AAChF,cAAM,eAAgB,WAAW,YAAY,gBAA2B;AAExE,eAAO;AAAA,UACN;AAAA,UACA;AAAA,UACA,aAAa,mBAAmB;AAAA,QACjC;AAAA,MACD;AAAA,MACA,wBAAwB,CAAC,UAAqB,MAAM;AAAA,IACrD;AAUC,SAAK,UAAU,EAAE,GAAG,KAAK,SAAS,GAAG,QAAQ;AAAA,EAC9C;AAAA,EAEA,MAAM,6BAA6B,aAAuC;AACzE,UAAM,WAAW,YAAY,QAAQ,CAAC,QAAQ,IAAI,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC;AACpE,WAAO,MAAM,KAAK,6BAA6B,QAAQ;AAAA,EACxD;AAAA,EAEA,MAAM,6BAA6B,MAAgB;AAClD,UAAM,qBAAiB,sCAAwB,uBAAuB;AACtE,WAAO,UAAM,qDAA6B,MAAM,cAAc;AAAA,EAC/D;AAAA,EAEA,MAAM,aAAa,QAAmB,OAAe;AAGpD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAEpF,WAAO,cAAc,OAAO,YAAY;AAAA,MAAI,CAAC,QAC5C,IAAI,IAAI,CAAC,UAAM,YAAAC,SAAK,GAAG,CAAC,QAAQ,gBAAgB,CAAC,CAAC;AAAA,IACnD;AAEA,UAAM,qBAAqB;AAAA,MAC1B,kBAAkB;AAAA,MAClB,cAAc;AAAA,MACd,aAAa;AAAA,IACd;AACA,UAAM,aAAa,KAAK,QAAQ,kBAAkB,OAAO,SAAS;AAElE,QAAI,OAAO,YAAY,SAAS,GAAG;AAClC,yBAAmB,mBAAmB,MAAM,KAAK;AAAA,QAChD,OAAO;AAAA,MACR;AAEA,yBAAmB,eAAe,KAAK;AACvC,yBAAmB,cAClB,mBAAmB,mBAAmB,KAAK;AAAA,IAC7C;AACA,UAAM,WAIF;AAAA,MACH,UAAU,EAAE,aAAa,OAAO,YAAY;AAAA,IAC7C;AAGA,QAAI,WAAW,mBAAmB,GAAG;AACpC,eAAS,aAAa;AAAA,IACvB,OAAO;AACN,eAAS,qBAAqB;AAAA,IAC/B;AAEA,UAAM,iBACL,OAAO,WAAW,aAAa,WAC5B,WAAW,WACX,WAAW,SAAS,IAAI,CAAC,YAAY;AACrC,UAAI,OAAO,YAAY,SAAU,QAAO;AACxC,UAAI,OAAO,SAAS,WAAW,WAAY,QAAO,QAAQ,OAAO;AAEjE,aAAO;AAAA,IACR,CAAC;AAEJ,UAAM,qBACL,mBAAK,qBAAoB,SAAY,mBAAK,mBAAkB,WAAW,QAAQ;AAEhF,SAAK,mBAAmB;AAAA,MACvB,KAAK;AAAA,MACL,WAAW;AAAA,MACX,CAAC,CAAC,EAAE,MAAM,EAAE,GAAG,SAAS,EAAE,CAAC,CAAC;AAAA,MAC5B;AAAA,MACA;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,2BAA2B;AAAA,MAC9D,UAAU;AAAA,MACV,SAAS,WAAW;AAAA,MACpB;AAAA,IACD,CAAC;AAAA,EACF;AAAA,EAEA,MAAM,eAAe,KAAiB,SAAmB,OAAe;AACvE,UAAM,kBAAkB,MAAM,KAAK,6BAA6B,OAAO;AACvE,UAAM,qBACL,mBAAK,qBAAoB,SACtB,mBAAK,mBAAkB,KAAK,mBAAmB,gBAAgB,IAC/D;AAEJ,UAAM,UAAU,IAAI,SAAS,gBAAgB,IAAI,SAAS;AAC1D,UAAM,EAAE,MAAM,IAAI,KAAK,mBAAmB;AAAA,MACzC,KAAK;AAAA,MACL;AAAA,QACC;AAAA,UACC;AAAA,YACC,MAAM;AAAA,cACL,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YACD;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA;AAAA,IACD;AAGA,SAAK,QAAQ,KAAK,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA,UAAU;AAAA,IACX;AACA,SAAK,uBAAuB;AAAA,EAC7B;AAAA,EAEA,MAAM,eACL,OACA,OACA,aACC;AACD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAGpF,QAAI,OAAO,UAAU,YAAY,OAAO,eAAe,SAAS,GAAG;AAClE,YAAM,mBAAmB;AAEzB,aAAO,KAAK,iBAAiB,OAAO,EAAE,QAAQ,CAAC,QAAQ;AACtD,YAAI,CAAC,IAAI,WAAW,IAAI,GAAG;AAC1B,iBAAO,iBAAiB,QAAQ,GAAG;AAAA,QACpC;AAAA,MACD,CAAC;AAAA,IACF;AAEA,QAAI,iBAAiB,+BAAW;AAC/B,UAAI,KAAK,QAAQ,wBAAwB;AACxC,cAAM,cAAc,KAAK,QAAQ,uBAAuB,KAAK;AAAA,MAC9D;AAEA,WAAK,mBAAmB,cAAc,KAAK,gBAAgB,WAAW,OAAO,KAAK;AAAA,IACnF,OAAO;AAEN,WAAK,mBAAmB;AAAA,QACvB,KAAK;AAAA,QACL,WAAW;AAAA,QACX,IAAI,uCAAmB,KAAK,mBAAmB,QAAQ,GAAG,OAAqB;AAAA,UAC9E,eAAe;AAAA,QAChB,CAAC;AAAA,MACF;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,kBAAkB;AAAA,MACrD,OAAO,OAAO,KAAK,KAAK,EAAE,WAAW,IAAI,MAAM,SAAS,IAAI;AAAA,MAC5D;AAAA,MACA;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA,EAGA,kBAAkB,UAAkB;AACnC,uBAAK,iBAAkB;AAAA,EACxB;AACD;AAjMC;","names":["import_base","pick"]}
|
|
1
|
+
{"version":3,"sources":["../../../nodes/llms/N8nLlmTracing.ts"],"sourcesContent":["import { BaseCallbackHandler } from '@langchain/core/callbacks/base';\nimport type { SerializedFields } from '@langchain/core/dist/load/map_keys';\nimport { getModelNameForTiktoken } from '@langchain/core/language_models/base';\nimport type {\n\tSerialized,\n\tSerializedNotImplemented,\n\tSerializedSecret,\n} from '@langchain/core/load/serializable';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport type { LLMResult } from '@langchain/core/outputs';\nimport pick from 'lodash/pick';\nimport type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';\nimport { NodeConnectionTypes, NodeError, NodeOperationError } from 'n8n-workflow';\n\nimport { logAiEvent } from '@utils/helpers';\nimport { estimateTokensFromStringList } from '@utils/tokenizer/token-estimator';\n\ntype TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {\n\tcompletionTokens: number;\n\tpromptTokens: number;\n\ttotalTokens: number;\n};\n\ntype RunDetail = {\n\tindex: number;\n\tmessages: BaseMessage[] | string[] | string;\n\toptions: SerializedSecret | SerializedNotImplemented | SerializedFields;\n};\n\nconst TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';\nexport class N8nLlmTracing extends BaseCallbackHandler {\n\tname = 'N8nLlmTracing';\n\n\t// This flag makes sure that LangChain will wait for the handlers to finish before continuing\n\t// This is crucial for the handleLLMError handler to work correctly (it should be called before the error is propagated to the root node)\n\tawaitHandlers = true;\n\n\tconnectionType = NodeConnectionTypes.AiLanguageModel;\n\n\tpromptTokensEstimate = 0;\n\n\tcompletionTokensEstimate = 0;\n\n\t#parentRunIndex?: number;\n\n\t/**\n\t * A map to associate LLM run IDs to run details.\n\t * Key: Unique identifier for each LLM run (run ID)\n\t * Value: RunDetails object\n\t *\n\t */\n\trunsMap: Record<string, RunDetail> = {};\n\n\toptions = {\n\t\t// Default(OpenAI format) parser\n\t\ttokensUsageParser: (llmOutput: LLMResult['llmOutput']) => {\n\t\t\tconst completionTokens = (llmOutput?.tokenUsage?.completionTokens as number) ?? 0;\n\t\t\tconst promptTokens = (llmOutput?.tokenUsage?.promptTokens as number) ?? 0;\n\n\t\t\treturn {\n\t\t\t\tcompletionTokens,\n\t\t\t\tpromptTokens,\n\t\t\t\ttotalTokens: completionTokens + promptTokens,\n\t\t\t};\n\t\t},\n\t\terrorDescriptionMapper: (error: NodeError) => error.description,\n\t};\n\n\tconstructor(\n\t\tprivate executionFunctions: ISupplyDataFunctions,\n\t\toptions?: {\n\t\t\ttokensUsageParser?: TokensUsageParser;\n\t\t\terrorDescriptionMapper?: (error: NodeError) => string;\n\t\t},\n\t) {\n\t\tsuper();\n\t\tthis.options = { ...this.options, ...options };\n\t}\n\n\tasync estimateTokensFromGeneration(generations: LLMResult['generations']) {\n\t\tconst messages = generations.flatMap((gen) => gen.map((g) => g.text));\n\t\treturn await this.estimateTokensFromStringList(messages);\n\t}\n\n\tasync estimateTokensFromStringList(list: string[]) {\n\t\tconst embeddingModel = getModelNameForTiktoken(TIKTOKEN_ESTIMATE_MODEL);\n\t\treturn await estimateTokensFromStringList(list, embeddingModel);\n\t}\n\n\tasync handleLLMEnd(output: LLMResult, runId: string) {\n\t\t// The fallback should never happen since handleLLMStart should always set the run details\n\t\t// but just in case, we set the index to the length of the runsMap\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\toutput.generations = output.generations.map((gen) =>\n\t\t\tgen.map((g) => pick(g, ['text', 'generationInfo'])),\n\t\t);\n\n\t\tconst tokenUsageEstimate = {\n\t\t\tcompletionTokens: 0,\n\t\t\tpromptTokens: 0,\n\t\t\ttotalTokens: 0,\n\t\t};\n\t\tconst tokenUsage = this.options.tokensUsageParser(output.llmOutput);\n\n\t\tif (output.generations.length > 0) {\n\t\t\ttokenUsageEstimate.completionTokens = await this.estimateTokensFromGeneration(\n\t\t\t\toutput.generations,\n\t\t\t);\n\n\t\t\ttokenUsageEstimate.promptTokens = this.promptTokensEstimate;\n\t\t\ttokenUsageEstimate.totalTokens =\n\t\t\t\ttokenUsageEstimate.completionTokens + this.promptTokensEstimate;\n\t\t}\n\t\tconst response: {\n\t\t\tresponse: { generations: LLMResult['generations'] };\n\t\t\ttokenUsageEstimate?: typeof tokenUsageEstimate;\n\t\t\ttokenUsage?: typeof tokenUsage;\n\t\t} = {\n\t\t\tresponse: { generations: output.generations },\n\t\t};\n\n\t\t// If the LLM response contains actual tokens usage, otherwise fallback to the estimate\n\t\tif (tokenUsage.completionTokens > 0) {\n\t\t\tresponse.tokenUsage = tokenUsage;\n\t\t} else {\n\t\t\tresponse.tokenUsageEstimate = tokenUsageEstimate;\n\t\t}\n\n\t\tconst parsedMessages =\n\t\t\ttypeof runDetails.messages === 'string'\n\t\t\t\t? runDetails.messages\n\t\t\t\t: runDetails.messages.map((message) => {\n\t\t\t\t\t\tif (typeof message === 'string') return message;\n\t\t\t\t\t\tif (typeof message?.toJSON === 'function') return message.toJSON();\n\n\t\t\t\t\t\treturn message;\n\t\t\t\t\t});\n\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined ? this.#parentRunIndex + runDetails.index : undefined;\n\n\t\tthis.executionFunctions.addOutputData(\n\t\t\tthis.connectionType,\n\t\t\trunDetails.index,\n\t\t\t[[{ json: { ...response } }]],\n\t\t\tundefined,\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-generated-output', {\n\t\t\tmessages: parsedMessages,\n\t\t\toptions: runDetails.options,\n\t\t\tresponse,\n\t\t});\n\t}\n\n\tasync handleLLMStart(llm: Serialized, prompts: string[], runId: string) {\n\t\tconst estimatedTokens = await this.estimateTokensFromStringList(prompts);\n\t\tconst sourceNodeRunIndex =\n\t\t\tthis.#parentRunIndex !== undefined\n\t\t\t\t? this.#parentRunIndex + this.executionFunctions.getNextRunIndex()\n\t\t\t\t: undefined;\n\n\t\tconst options = llm.type === 'constructor' ? llm.kwargs : llm;\n\t\tconst { index } = this.executionFunctions.addInputData(\n\t\t\tthis.connectionType,\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\tjson: {\n\t\t\t\t\t\t\tmessages: prompts,\n\t\t\t\t\t\t\testimatedTokens,\n\t\t\t\t\t\t\toptions,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t],\n\t\t\tsourceNodeRunIndex,\n\t\t);\n\n\t\t// Save the run details for later use when processing `handleLLMEnd` event\n\t\tthis.runsMap[runId] = {\n\t\t\tindex,\n\t\t\toptions,\n\t\t\tmessages: prompts,\n\t\t};\n\t\tthis.promptTokensEstimate = estimatedTokens;\n\t}\n\n\tasync handleLLMError(error: IDataObject | Error, runId: string, parentRunId?: string) {\n\t\tconst runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };\n\n\t\t// Filter out non-x- headers to avoid leaking sensitive information in logs\n\t\tif (typeof error === 'object' && error?.hasOwnProperty('headers')) {\n\t\t\tconst errorWithHeaders = error as { headers: Record<string, unknown> };\n\n\t\t\tObject.keys(errorWithHeaders.headers).forEach((key) => {\n\t\t\t\tif (!key.startsWith('x-')) {\n\t\t\t\t\tdelete errorWithHeaders.headers[key];\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\n\t\tif (error instanceof NodeError) {\n\t\t\tif (this.options.errorDescriptionMapper) {\n\t\t\t\terror.description = this.options.errorDescriptionMapper(error);\n\t\t\t}\n\n\t\t\tthis.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);\n\t\t} else {\n\t\t\t// If the error is not a NodeError, we wrap it in a NodeOperationError\n\t\t\tthis.executionFunctions.addOutputData(\n\t\t\t\tthis.connectionType,\n\t\t\t\trunDetails.index,\n\t\t\t\tnew NodeOperationError(this.executionFunctions.getNode(), error as JsonObject, {\n\t\t\t\t\tfunctionality: 'configuration-node',\n\t\t\t\t}),\n\t\t\t);\n\t\t}\n\n\t\tlogAiEvent(this.executionFunctions, 'ai-llm-errored', {\n\t\t\terror: Object.keys(error).length === 0 ? error.toString() : error,\n\t\t\trunId,\n\t\t\tparentRunId,\n\t\t});\n\t}\n\n\t// Used to associate subsequent runs with the correct parent run in subnodes of subnodes\n\tsetParentRunIndex(runIndex: number) {\n\t\tthis.#parentRunIndex = runIndex;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,kBAAoC;AAEpC,IAAAA,eAAwC;AAQxC,kBAAiB;AAEjB,0BAAmE;AAEnE,qBAA2B;AAC3B,6BAA6C;AAf7C;AA6BA,MAAM,0BAA0B;AACzB,MAAM,sBAAsB,gCAAoB;AAAA,EAsCtD,YACS,oBACR,SAIC;AACD,UAAM;AANE;AAtCT,gBAAO;AAIP;AAAA;AAAA,yBAAgB;AAEhB,0BAAiB,wCAAoB;AAErC,gCAAuB;AAEvB,oCAA2B;AAE3B;AAQA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,mBAAqC,CAAC;AAEtC,mBAAU;AAAA;AAAA,MAET,mBAAmB,CAAC,cAAsC;AACzD,cAAM,mBAAoB,WAAW,YAAY,oBAA+B;AAChF,cAAM,eAAgB,WAAW,YAAY,gBAA2B;AAExE,eAAO;AAAA,UACN;AAAA,UACA;AAAA,UACA,aAAa,mBAAmB;AAAA,QACjC;AAAA,MACD;AAAA,MACA,wBAAwB,CAAC,UAAqB,MAAM;AAAA,IACrD;AAUC,SAAK,UAAU,EAAE,GAAG,KAAK,SAAS,GAAG,QAAQ;AAAA,EAC9C;AAAA,EAEA,MAAM,6BAA6B,aAAuC;AACzE,UAAM,WAAW,YAAY,QAAQ,CAAC,QAAQ,IAAI,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC;AACpE,WAAO,MAAM,KAAK,6BAA6B,QAAQ;AAAA,EACxD;AAAA,EAEA,MAAM,6BAA6B,MAAgB;AAClD,UAAM,qBAAiB,sCAAwB,uBAAuB;AACtE,WAAO,UAAM,qDAA6B,MAAM,cAAc;AAAA,EAC/D;AAAA,EAEA,MAAM,aAAa,QAAmB,OAAe;AAGpD,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAEpF,WAAO,cAAc,OAAO,YAAY;AAAA,MAAI,CAAC,QAC5C,IAAI,IAAI,CAAC,UAAM,YAAAC,SAAK,GAAG,CAAC,QAAQ,gBAAgB,CAAC,CAAC;AAAA,IACnD;AAEA,UAAM,qBAAqB;AAAA,MAC1B,kBAAkB;AAAA,MAClB,cAAc;AAAA,MACd,aAAa;AAAA,IACd;AACA,UAAM,aAAa,KAAK,QAAQ,kBAAkB,OAAO,SAAS;AAElE,QAAI,OAAO,YAAY,SAAS,GAAG;AAClC,yBAAmB,mBAAmB,MAAM,KAAK;AAAA,QAChD,OAAO;AAAA,MACR;AAEA,yBAAmB,eAAe,KAAK;AACvC,yBAAmB,cAClB,mBAAmB,mBAAmB,KAAK;AAAA,IAC7C;AACA,UAAM,WAIF;AAAA,MACH,UAAU,EAAE,aAAa,OAAO,YAAY;AAAA,IAC7C;AAGA,QAAI,WAAW,mBAAmB,GAAG;AACpC,eAAS,aAAa;AAAA,IACvB,OAAO;AACN,eAAS,qBAAqB;AAAA,IAC/B;AAEA,UAAM,iBACL,OAAO,WAAW,aAAa,WAC5B,WAAW,WACX,WAAW,SAAS,IAAI,CAAC,YAAY;AACrC,UAAI,OAAO,YAAY,SAAU,QAAO;AACxC,UAAI,OAAO,SAAS,WAAW,WAAY,QAAO,QAAQ,OAAO;AAEjE,aAAO;AAAA,IACR,CAAC;AAEJ,UAAM,qBACL,mBAAK,qBAAoB,SAAY,mBAAK,mBAAkB,WAAW,QAAQ;AAEhF,SAAK,mBAAmB;AAAA,MACvB,KAAK;AAAA,MACL,WAAW;AAAA,MACX,CAAC,CAAC,EAAE,MAAM,EAAE,GAAG,SAAS,EAAE,CAAC,CAAC;AAAA,MAC5B;AAAA,MACA;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,2BAA2B;AAAA,MAC9D,UAAU;AAAA,MACV,SAAS,WAAW;AAAA,MACpB;AAAA,IACD,CAAC;AAAA,EACF;AAAA,EAEA,MAAM,eAAe,KAAiB,SAAmB,OAAe;AACvE,UAAM,kBAAkB,MAAM,KAAK,6BAA6B,OAAO;AACvE,UAAM,qBACL,mBAAK,qBAAoB,SACtB,mBAAK,mBAAkB,KAAK,mBAAmB,gBAAgB,IAC/D;AAEJ,UAAM,UAAU,IAAI,SAAS,gBAAgB,IAAI,SAAS;AAC1D,UAAM,EAAE,MAAM,IAAI,KAAK,mBAAmB;AAAA,MACzC,KAAK;AAAA,MACL;AAAA,QACC;AAAA,UACC;AAAA,YACC,MAAM;AAAA,cACL,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YACD;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA;AAAA,IACD;AAGA,SAAK,QAAQ,KAAK,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA,UAAU;AAAA,IACX;AACA,SAAK,uBAAuB;AAAA,EAC7B;AAAA,EAEA,MAAM,eAAe,OAA4B,OAAe,aAAsB;AACrF,UAAM,aAAa,KAAK,QAAQ,KAAK,KAAK,EAAE,OAAO,OAAO,KAAK,KAAK,OAAO,EAAE,OAAO;AAGpF,QAAI,OAAO,UAAU,YAAY,OAAO,eAAe,SAAS,GAAG;AAClE,YAAM,mBAAmB;AAEzB,aAAO,KAAK,iBAAiB,OAAO,EAAE,QAAQ,CAAC,QAAQ;AACtD,YAAI,CAAC,IAAI,WAAW,IAAI,GAAG;AAC1B,iBAAO,iBAAiB,QAAQ,GAAG;AAAA,QACpC;AAAA,MACD,CAAC;AAAA,IACF;AAEA,QAAI,iBAAiB,+BAAW;AAC/B,UAAI,KAAK,QAAQ,wBAAwB;AACxC,cAAM,cAAc,KAAK,QAAQ,uBAAuB,KAAK;AAAA,MAC9D;AAEA,WAAK,mBAAmB,cAAc,KAAK,gBAAgB,WAAW,OAAO,KAAK;AAAA,IACnF,OAAO;AAEN,WAAK,mBAAmB;AAAA,QACvB,KAAK;AAAA,QACL,WAAW;AAAA,QACX,IAAI,uCAAmB,KAAK,mBAAmB,QAAQ,GAAG,OAAqB;AAAA,UAC9E,eAAe;AAAA,QAChB,CAAC;AAAA,MACF;AAAA,IACD;AAEA,mCAAW,KAAK,oBAAoB,kBAAkB;AAAA,MACrD,OAAO,OAAO,KAAK,KAAK,EAAE,WAAW,IAAI,MAAM,SAAS,IAAI;AAAA,MAC5D;AAAA,MACA;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA,EAGA,kBAAkB,UAAkB;AACnC,uBAAK,iBAAkB;AAAA,EACxB;AACD;AA7LC;","names":["import_base","pick"]}
|